max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
webots_ros2_core/webots_ros2_core/devices/gps_device.py | TaoYibo1866/webots_ros2 | 176 | 9500 | <reponame>TaoYibo1866/webots_ros2
# Copyright 1996-2021 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Webots GPS device wrapper for ROS2."""
from rclpy.qos import QoSReliabilityPolicy, qos_profile_sensor_data
from std_msgs.msg import Float32
from sensor_msgs.msg import NavSatFix, NavSatStatus
from geometry_msgs.msg import PointStamped
from .sensor_device import SensorDevice
from controller import GPS
class GpsDevice(SensorDevice):
"""
ROS2 wrapper for Webots GPS node.
Creates suitable ROS2 interface based on Webots
[GPS](https://cyberbotics.com/doc/reference/gps) node instance:
It allows the following functinalities:
- Publishes position measurements of type `sensor_msgs::NavSatFix` if WGS84
- Publishes position measurements of type `geometry_msgs::PointStamped` if LOCAL
Args:
----
node (WebotsNode): The ROS2 node.
device_key (str): Unique identifier of the device used for configuration.
wb_device (Gps): Webots node of type GPS.
Kwargs:
params (dict): Inherited from `SensorDevice` + the following::
dict: {
'timestep': int, # Publish period in ms (default 128ms)
}
"""
def __init__(self, node, device_key, wb_device, params=None):
super().__init__(node, device_key, wb_device, params)
self.__speed_publisher = None
self.__gps_publisher = None
self.__coordinate_system = self._wb_device.getCoordinateSystem()
# Exit if disabled
if self._disable:
return
# Change default timestep
self._timestep = 128
qos_sensor_reliable = qos_profile_sensor_data
qos_sensor_reliable.reliability = QoSReliabilityPolicy.RELIABLE
# Create topics
self.__speed_publisher = node.create_publisher(
Float32, self._topic_name + '/speed', qos_sensor_reliable)
if self.__coordinate_system == GPS.WGS84:
self.__gps_publisher = node.create_publisher(
NavSatFix, self._topic_name + '/gps', qos_sensor_reliable)
else:
self.__gps_publisher = node.create_publisher(
PointStamped, self._topic_name + '/gps', qos_sensor_reliable)
def step(self):
stamp = super().step()
if not stamp:
return
if self.__gps_publisher.get_subscription_count() > 0 or \
self.__speed_publisher.get_subscription_count() > 0 or \
self._always_publish:
self._wb_device.enable(self._timestep)
msg = Float32()
msg.data = self._wb_device.getSpeed()
self.__speed_publisher.publish(msg)
if self.__coordinate_system == GPS.WGS84:
msg = NavSatFix()
msg.header.stamp = stamp
msg.header.frame_id = self._frame_id
msg.latitude = self._wb_device.getValues()[0]
msg.longitude = self._wb_device.getValues()[1]
msg.altitude = self._wb_device.getValues()[2]
msg.position_covariance_type = NavSatFix.COVARIANCE_TYPE_UNKNOWN
msg.status.service = NavSatStatus.SERVICE_GPS
self.__gps_publisher.publish(msg)
else:
msg = PointStamped()
msg.header.stamp = stamp
msg.header.frame_id = self._frame_id
msg.point.x = self._wb_device.getValues()[0]
msg.point.y = self._wb_device.getValues()[1]
msg.point.z = self._wb_device.getValues()[2]
self.__gps_publisher.publish(msg)
else:
self._wb_device.disable()
| # Copyright 1996-2021 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Webots GPS device wrapper for ROS2."""
from rclpy.qos import QoSReliabilityPolicy, qos_profile_sensor_data
from std_msgs.msg import Float32
from sensor_msgs.msg import NavSatFix, NavSatStatus
from geometry_msgs.msg import PointStamped
from .sensor_device import SensorDevice
from controller import GPS
class GpsDevice(SensorDevice):
"""
ROS2 wrapper for Webots GPS node.
Creates suitable ROS2 interface based on Webots
[GPS](https://cyberbotics.com/doc/reference/gps) node instance:
It allows the following functinalities:
- Publishes position measurements of type `sensor_msgs::NavSatFix` if WGS84
- Publishes position measurements of type `geometry_msgs::PointStamped` if LOCAL
Args:
----
node (WebotsNode): The ROS2 node.
device_key (str): Unique identifier of the device used for configuration.
wb_device (Gps): Webots node of type GPS.
Kwargs:
params (dict): Inherited from `SensorDevice` + the following::
dict: {
'timestep': int, # Publish period in ms (default 128ms)
}
"""
def __init__(self, node, device_key, wb_device, params=None):
super().__init__(node, device_key, wb_device, params)
self.__speed_publisher = None
self.__gps_publisher = None
self.__coordinate_system = self._wb_device.getCoordinateSystem()
# Exit if disabled
if self._disable:
return
# Change default timestep
self._timestep = 128
qos_sensor_reliable = qos_profile_sensor_data
qos_sensor_reliable.reliability = QoSReliabilityPolicy.RELIABLE
# Create topics
self.__speed_publisher = node.create_publisher(
Float32, self._topic_name + '/speed', qos_sensor_reliable)
if self.__coordinate_system == GPS.WGS84:
self.__gps_publisher = node.create_publisher(
NavSatFix, self._topic_name + '/gps', qos_sensor_reliable)
else:
self.__gps_publisher = node.create_publisher(
PointStamped, self._topic_name + '/gps', qos_sensor_reliable)
def step(self):
stamp = super().step()
if not stamp:
return
if self.__gps_publisher.get_subscription_count() > 0 or \
self.__speed_publisher.get_subscription_count() > 0 or \
self._always_publish:
self._wb_device.enable(self._timestep)
msg = Float32()
msg.data = self._wb_device.getSpeed()
self.__speed_publisher.publish(msg)
if self.__coordinate_system == GPS.WGS84:
msg = NavSatFix()
msg.header.stamp = stamp
msg.header.frame_id = self._frame_id
msg.latitude = self._wb_device.getValues()[0]
msg.longitude = self._wb_device.getValues()[1]
msg.altitude = self._wb_device.getValues()[2]
msg.position_covariance_type = NavSatFix.COVARIANCE_TYPE_UNKNOWN
msg.status.service = NavSatStatus.SERVICE_GPS
self.__gps_publisher.publish(msg)
else:
msg = PointStamped()
msg.header.stamp = stamp
msg.header.frame_id = self._frame_id
msg.point.x = self._wb_device.getValues()[0]
msg.point.y = self._wb_device.getValues()[1]
msg.point.z = self._wb_device.getValues()[2]
self.__gps_publisher.publish(msg)
else:
self._wb_device.disable() | en | 0.711737 | # Copyright 1996-2021 Cyberbotics Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Webots GPS device wrapper for ROS2. ROS2 wrapper for Webots GPS node. Creates suitable ROS2 interface based on Webots [GPS](https://cyberbotics.com/doc/reference/gps) node instance: It allows the following functinalities: - Publishes position measurements of type `sensor_msgs::NavSatFix` if WGS84 - Publishes position measurements of type `geometry_msgs::PointStamped` if LOCAL Args: ---- node (WebotsNode): The ROS2 node. device_key (str): Unique identifier of the device used for configuration. wb_device (Gps): Webots node of type GPS. Kwargs: params (dict): Inherited from `SensorDevice` + the following:: dict: { 'timestep': int, # Publish period in ms (default 128ms) } # Exit if disabled # Change default timestep # Create topics | 2.574276 | 3 |
players/jeff.py | jtreim/cant-stop | 0 | 9501 | <gh_stars>0
from .player import Player
class JeffPlayer(Player):
"""
JeffPlayer focuses on the odds for continuing turns.
To pick which move, calculates a move value based on odds of continued
turns, moving forward less likely columns when possible, and winning
columns over opponents.
"""
ODDS = 'odds'
ROLLS = 'rolls'
ONE_COLUMN_ODDS = {
'2': { ODDS: .13, ROLLS: 0 },
'3': { ODDS: .23, ROLLS: 0 },
'4': { ODDS: .36, ROLLS: 0 },
'5': { ODDS: .45, ROLLS: 1 },
'6': { ODDS: .56, ROLLS: 1 },
'7': { ODDS: .64, ROLLS: 2 },
'8': { ODDS: .56, ROLLS: 1 },
'9': { ODDS: .45, ROLLS: 1 },
'10': { ODDS: .36, ROLLS: 0 },
'11': { ODDS: .23, ROLLS: 0 },
'12': { ODDS: .13, ROLLS: 0 },
}
TWO_COLUMN_ODDS = {
'2': {
'3': { ODDS: .32, ROLLS: 0 },
'4': { ODDS: .44, ROLLS: 1 },
'5': { ODDS: .53, ROLLS: 1 },
'6': { ODDS: .63, ROLLS: 2 },
'7': { ODDS: .71, ROLLS: 2 },
'8': { ODDS: .67, ROLLS: 2 },
'9': { ODDS: .56, ROLLS: 1 },
'10': { ODDS: .47, ROLLS: 1 },
'11': { ODDS: .36, ROLLS: 1 },
'12': { ODDS: .26, ROLLS: 0 },
},
'3': {
'4': { ODDS: .47, ROLLS: 1 },
'5': { ODDS: .53, ROLLS: 1 },
'6': { ODDS: .64, ROLLS: 2 },
'7': { ODDS: .71, ROLLS: 2 },
'8': { ODDS: .68, ROLLS: 2 },
'9': { ODDS: .64, ROLLS: 2 },
'10': { ODDS: .56, ROLLS: 1 },
'11': { ODDS: .45, ROLLS: 1 },
'12': { ODDS: .36, ROLLS: 1 },
},
'4': {
'5': { ODDS: .61, ROLLS: 2 },
'6': { ODDS: .72, ROLLS: 3 },
'7': { ODDS: .77, ROLLS: 3 },
'8': { ODDS: .75, ROLLS: 3 },
'9': { ODDS: .68, ROLLS: 3 },
'10': { ODDS: .67, ROLLS: 2 },
'11': { ODDS: .56, ROLLS: 1 },
'12': { ODDS: .47, ROLLS: 1 },
},
'5': {
'6': { ODDS: .73, ROLLS: 3 },
'7': { ODDS: .78, ROLLS: 4 },
'8': { ODDS: .77, ROLLS: 3 },
'9': { ODDS: .75, ROLLS: 2 },
'10': { ODDS: .69, ROLLS: 2 },
'11': { ODDS: .68, ROLLS: 2 },
'12': { ODDS: .64, ROLLS: 1 },
},
'6': {
'7': { ODDS: .84, ROLLS: 5 },
'8': { ODDS: .82, ROLLS: 5 },
'9': { ODDS: .77, ROLLS: 3 },
'10': { ODDS: .75, ROLLS: 3 },
'11': { ODDS: .68, ROLLS: 2 },
'12': { ODDS: .67, ROLLS: 2 },
},
'7': {
'8': { ODDS: .84, ROLLS: 5 },
'9': { ODDS: .78, ROLLS: 4 },
'10': { ODDS: .77, ROLLS: 3 },
'11': { ODDS: .71, ROLLS: 2 },
'12': { ODDS: .71, ROLLS: 2 },
},
'8': {
'9': { ODDS: .73, ROLLS: 3 },
'10': { ODDS: .72, ROLLS: 3 },
'11': { ODDS: .64, ROLLS: 2 },
'12': { ODDS: .63, ROLLS: 2 },
},
'9': {
'10': { ODDS: .61, ROLLS: 2 },
'11': { ODDS: .53, ROLLS: 1 },
'12': { ODDS: .53, ROLLS: 1 },
},
'10': {
'11': { ODDS: .47, ROLLS: 1 },
'12': { ODDS: .44, ROLLS: 1 },
},
'11': {
'12': { ODDS: .32, ROLLS: 0 }
},
}
THREE_COLUMN_ODDS = {
'2': {
'3': {
'4': { ODDS: .52, ROLLS: 1 },
'5': { ODDS: .58, ROLLS: 1 },
'6': { ODDS: .68, ROLLS: 2 },
'7': { ODDS: .75, ROLLS: 3 },
'8': { ODDS: .76, ROLLS: 3 },
'9': { ODDS: .71, ROLLS: 2 },
'10': { ODDS: .63, ROLLS: 2 },
'11': { ODDS: .53, ROLLS: 1 },
'12': { ODDS: .44, ROLLS: 1 },
},
'4': {
'5': { ODDS: .66, ROLLS: 2 },
'6': { ODDS: .76, ROLLS: 3 },
'7': { ODDS: .81, ROLLS: 4 },
'8': { ODDS: .82, ROLLS: 5 },
'9': { ODDS: .76, ROLLS: 3 },
'10': { ODDS: .74, ROLLS: 3 },
'11': { ODDS: .63, ROLLS: 2 },
'12': { ODDS: .55, ROLLS: 1 },
},
'5': {
'6': { ODDS: .77, ROLLS: 3 },
'7': { ODDS: .81, ROLLS: 4 },
'8': { ODDS: .83, ROLLS: 5 },
'9': { ODDS: .76, ROLLS: 3 },
'10': { ODDS: .76, ROLLS: 3 },
'11': { ODDS: .71, ROLLS: 2 },
'12': { ODDS: .63, ROLLS: 2 },
},
'6': {
'7': { ODDS: .86, ROLLS: 6 },
'8': { ODDS: .88, ROLLS: 7 },
'9': { ODDS: .83, ROLLS: 5 },
'10': { ODDS: .81, ROLLS: 4 },
'11': { ODDS: .76, ROLLS: 3 },
'12': { ODDS: .74, ROLLS: 3 },
},
'7': {
'8': { ODDS: .89, ROLLS: 8 },
'9': { ODDS: .84, ROLLS: 5 },
'10': { ODDS: .83, ROLLS: 5 },
'11': { ODDS: .78, ROLLS: 4 },
'12': { ODDS: .78, ROLLS: 4 },
},
'8': {
'9': { ODDS: .71, ROLLS: 2 },
'10': { ODDS: .63, ROLLS: 2 },
'11': { ODDS: .53, ROLLS: 1 },
'12': { ODDS: .44, ROLLS: 1 },
},
'9': {
'10': { ODDS: .71, ROLLS: 2 },
'11': { ODDS: .64, ROLLS: 2 },
'12': { ODDS: .63, ROLLS: 2 },
},
'10': {
'11': { ODDS: .58, ROLLS: 1 },
'12': { ODDS: .55, ROLLS: 1 },
},
'11': {
'12': { ODDS: .44, ROLLS: 1 },
},
},
'3': {
'4': {
'5': { ODDS: .67, ROLLS: 2 },
'6': { ODDS: .74, ROLLS: 3 },
'7': { ODDS: .79, ROLLS: 4 },
'8': { ODDS: .80, ROLLS: 4 },
'9': { ODDS: .78, ROLLS: 4 },
'10': { ODDS: .76, ROLLS: 3 },
'11': { ODDS: .66, ROLLS: 2 },
'12': { ODDS: .58, ROLLS: 1 },
},
'5': {
'6': { ODDS: .77, ROLLS: 3 },
'7': { ODDS: .79, ROLLS: 4 },
'8': { ODDS: .81, ROLLS: 4 },
'9': { ODDS: .78, ROLLS: 4 },
'10': { ODDS: .76, ROLLS: 3 },
'11': { ODDS: .71, ROLLS: 2 },
'12': { ODDS: .64, ROLLS: 2 },
},
'6': {
'7': { ODDS: .86, ROLLS: 6 },
'8': { ODDS: .85, ROLLS: 6 },
'9': { ODDS: .83, ROLLS: 5 },
'10': { ODDS: .82, ROLLS: 5 },
'11': { ODDS: .76, ROLLS: 3 },
'12': { ODDS: .74, ROLLS: 3 },
},
'7': {
'8': { ODDS: .89, ROLLS: 8 },
'9': { ODDS: .84, ROLLS: 5 },
'10': { ODDS: .84, ROLLS: 5 },
'11': { ODDS: .78, ROLLS: 4 },
'12': { ODDS: .78, ROLLS: 4 },
},
'8': {
'9': { ODDS: .84, ROLLS: 5 },
'10': { ODDS: .83, ROLLS: 5 },
'11': { ODDS: .76, ROLLS: 3 },
'12': { ODDS: .76, ROLLS: 3 },
},
'9': {
'10': { ODDS: .78, ROLLS: 4 },
'11': { ODDS: .71, ROLLS: 2 },
'12': { ODDS: .71, ROLLS: 2 },
},
'10': {
'11': { ODDS: .66, ROLLS: 2 },
'12': { ODDS: .63, ROLLS: 2 },
},
'11': {
'12': { ODDS: .53, ROLLS: 1 },
},
},
'4': {
'5': {
'6': { ODDS: .80, ROLLS: 4 },
'7': { ODDS: .85, ROLLS: 6 },
'8': { ODDS: .85, ROLLS: 6 },
'9': { ODDS: .80, ROLLS: 4 },
'10': { ODDS: .82, ROLLS: 5 },
'11': { ODDS: .78, ROLLS: 4 },
'12': { ODDS: .71, ROLLS: 2 },
},
'6': {
'7': { ODDS: .89, ROLLS: 8 },
'8': { ODDS: .91, ROLLS: 10 },
'9': { ODDS: .86, ROLLS: 6 },
'10': { ODDS: .88, ROLLS: 7 },
'11': { ODDS: .83, ROLLS: 5 },
'12': { ODDS: .82, ROLLS: 5 },
},
'7': {
'8': { ODDS: .90, ROLLS: 9 },
'9': { ODDS: .89, ROLLS: 8 },
'10': { ODDS: .88, ROLLS: 7 },
'11': { ODDS: .84, ROLLS: 5 },
'12': { ODDS: .83, ROLLS: 5 },
},
'8': {
'9': { ODDS: .86, ROLLS: 6 },
'10': { ODDS: .88, ROLLS: 7 },
'11': { ODDS: .82, ROLLS: 5 },
'12': { ODDS: .81, ROLLS: 4 },
},
'9': {
'10': { ODDS: .82, ROLLS: 5 },
'11': { ODDS: .76, ROLLS: 3 },
'12': { ODDS: .76, ROLLS: 3 },
},
'10': {
'11': { ODDS: .76, ROLLS: 3 },
'12': { ODDS: .74, ROLLS: 3 },
},
'11': {
'12': { ODDS: .63, ROLLS: 2 },
},
},
'5': {
'6': {
'7': { ODDS: .89, ROLLS: 8 },
'8': { ODDS: .90, ROLLS: 9 },
'9': { ODDS: .87, ROLLS: 7 },
'10': { ODDS: .86, ROLLS: 6 },
'11': { ODDS: .84, ROLLS: 5 },
'12': { ODDS: .82, ROLLS: 5 },
},
'7': {
'8': { ODDS: .91, ROLLS: 10 },
'9': { ODDS: .85, ROLLS: 6 },
'10': { ODDS: .89, ROLLS: 8 },
'11': { ODDS: .84, ROLLS: 5 },
'12': { ODDS: .84, ROLLS: 5 },
},
'8': {
'9': { ODDS: .87, ROLLS: 7 },
'10': { ODDS: .86, ROLLS: 6 },
'11': { ODDS: .83, ROLLS: 5 },
'12': { ODDS: .83, ROLLS: 5 },
},
'9': {
'10': { ODDS: .80, ROLLS: 4 },
'11': { ODDS: .78, ROLLS: 4 },
'12': { ODDS: .76, ROLLS: 3 },
},
'10': {
'11': { ODDS: .78, ROLLS: 4 },
'12': { ODDS: .76, ROLLS: 3 },
},
'11': {
'12': { ODDS: .71, ROLLS: 2 },
},
},
'6': {
'7': {
'8': { ODDS: .92, ROLLS: 12 },
'9': { ODDS: .91, ROLLS: 10 },
'10': { ODDS: .90, ROLLS: 9 },
'11': { ODDS: .89, ROLLS: 8 },
'12': { ODDS: .89, ROLLS: 8 },
},
'8': {
'9': { ODDS: .90, ROLLS: 9 },
'10': { ODDS: .91, ROLLS: 10 },
'11': { ODDS: .85, ROLLS: 6 },
'12': { ODDS: .88, ROLLS: 7 },
},
'9': {
'10': { ODDS: .85, ROLLS: 6 },
'11': { ODDS: .81, ROLLS: 4 },
'12': { ODDS: .83, ROLLS: 5 },
},
'10': {
'11': { ODDS: .80, ROLLS: 4 },
'12': { ODDS: .82, ROLLS: 5 },
},
'11': {
'12': { ODDS: .76, ROLLS: 3 },
},
},
'7': {
'8': {
'9': { ODDS: .89, ROLLS: 8 },
'10': { ODDS: .89, ROLLS: 8 },
'11': { ODDS: .86, ROLLS: 6 },
'12': { ODDS: .86, ROLLS: 6 },
},
'9': {
'10': { ODDS: .85, ROLLS: 6 },
'11': { ODDS: .79, ROLLS: 4 },
'12': { ODDS: .81, ROLLS: 4 },
},
'10': {
'11': { ODDS: .79, ROLLS: 4 },
'12': { ODDS: .81, ROLLS: 4 },
},
'11': {
'12': { ODDS: .75, ROLLS: 3 },
},
},
'8': {
'9': {
'10': { ODDS: .80, ROLLS: 4 },
'11': { ODDS: .77, ROLLS: 3 },
'12': { ODDS: .77, ROLLS: 3 },
},
'10': {
'11': { ODDS: .74, ROLLS: 3 },
'12': { ODDS: .76, ROLLS: 3 },
},
'11': {
'12': { ODDS: .68, ROLLS: 2 },
},
},
'9': {
'10': {
'11': { ODDS: .67, ROLLS: 2 },
'12': { ODDS: .66, ROLLS: 2 },
},
'11': {
'12': { ODDS: .58, ROLLS: 1 },
},
},
'10': {
'11': {
'12': { ODDS: .52, ROLLS: 1 },
},
},
}
NEW_COLUMN_PENALTY = 1
FINISH_COLUMN_REWARD = 1
FAVORITE_COLUMN_THRESHOLD = 2/3
CONTESTED_COLUMN = 1
MY_PROGRESS_MODIFIER = .5
OPPONENT_PROGRESS_MODIFIER = .5
STEP_DIVISOR = .08
ROUGH_ODDS_THRESHOLD = .2
DESPERATION_TURNS = 2
def get_progress(self, board, changes):
"""
Returns progress percentages for leader's & player's progress
Leaders are opponents farthest for each given column
"""
leader_progress = {}
my_progress = {}
for key in board.keys():
leader_progress[key] = {}
leader = board[key]['players'][0][0]
lead = board[key]['players'][0][1] / board[key]['steps']
if leader == self.name:
leader = board[key]['players'][1][0]
lead = board[key]['players'][1][1]
for player in board[key]['players']:
progress = player[1] / board[key]['steps']
if lead < progress and player[0] != self.name:
leader = player[0]
lead = progress
if player[0] == self.name:
my_progress[key] = player[1] + changes[key]
my_progress[key] /= board[key]['steps']
leader_progress[key]['leader'] = leader
leader_progress[key]['progress'] = lead
return leader_progress, my_progress
def get_started_columns(self, changes):
"""
Return list of columns that I've started according to changes
"""
started = []
for col in changes.keys():
if col == 'turn':
continue
if changes[col] > 0:
started.append(col)
return sorted(started, key=lambda column: int(column))
def get_finished_columns(self, board, my_progress):
"""
Return a list of all columns finished, including those finished with
my current progress.
"""
finished = []
for key in board.keys():
for player in board[key]['players']:
if player[1] == board[key]['steps']:
finished.append(key)
if key not in finished and my_progress[key] == 1:
finished.append(key)
return sorted(finished, key=lambda column: int(column))
def continue_based_on_odds(self, started, turns):
"""
Determine whether to continue simply based on optimal number of
turns to take.
"""
if len(started) == 3:
col1, col2, col3 = started[0], started[1], started[2]
return self.THREE_COLUMN_ODDS[col1][col2][col3][self.ROLLS] > turns
if len(started) == 2:
col1, col2 = started[0], started[1]
return self.TWO_COLUMN_ODDS[col1][col2][self.ROLLS] > turns
return self.ONE_COLUMN_ODDS[started[0]][self.ROLLS] > turns
def continue_based_on_new_column(self, board, started, finished, turns):
"""
Continue based on chances of getting a new valid column.
Rough estimation for converting 2 column odds to 3 columns.
"""
base_odds = self.TWO_COLUMN_ODDS[started[0]][started[1]][self.ODDS]
base_rolls = self.TWO_COLUMN_ODDS[started[0]][started[1]][self.ROLLS]
available = [col for col in board.keys() if col not in started and col not in finished]
odds = 0
for col in available:
odds += (base_odds * self.ONE_COLUMN_ODDS[col][self.ODDS])
# Quick and dirty estimation
new_rolls = (odds - self.ROUGH_ODDS_THRESHOLD) / self.STEP_DIVISOR
return base_rolls + new_rolls > turns
def continue_based_on_new_columns(self, board, started, finished, turns):
"""
Continue based on chances of getting 2 new valid columns.
Rough estimation for converting 1 column odds to 3 columns.
"""
base_odds = self.ONE_COLUMN_ODDS[started[0]][self.ODDS]
base_rolls = self.ONE_COLUMN_ODDS[started[0]][self.ROLLS]
available = [col for col in board.keys() if col not in started and col not in finished]
odds = 0
for i in range(len(available)):
for j in range(i+1, len(available)):
col1, col2 = available[i], available[j]
odds += (base_odds * self.TWO_COLUMN_ODDS[col1][col2][self.ODDS])
# Quick and dirty estimation
new_rolls = (odds - self.ROUGH_ODDS_THRESHOLD) / self.STEP_DIVISOR
return base_rolls + new_rolls > turns
def opponent_might_win(self, leader_progress):
"""
Check to see if opponent might win in the next turn.
"""
opponents = {}
for col in leader_progress.keys():
leader = leader_progress[col]['leader']
if leader == self.name:
continue
if leader not in opponents.keys():
opponents[leader] = 0
if leader_progress[col]['progress'] == 1.0:
opponents[leader] += 1
if opponents[leader] >= 2:
return True
return False
def started_columns_are_contested(
self, board, changes, my_progress, started):
"""
Check to see if any of my columns I've started are currently contested.
"""
for col in started:
players = board[col]['players']
step_size = 1 / board[col]['steps']
for player in players:
if player[0] == self.name:
continue
# Opponent is within 1/3 of my progress, and it's not finished
if abs(my_progress[col] - player[1] * step_size) <= 1/3 and \
my_progress[col] != 1:
return True
def did_finish_column(self, started, my_progress):
"""
Did I finish a column this turn?
"""
for col in started:
if my_progress[col] == 1.0:
return True
def is_continuing_turn(self, board, changes):
"""
Decide to continue rolling. Based on if I just won the game,
optimal rolling turns, I finished a column, and
number of columns already finished in the game.
"""
leader_progress, my_progress = self.get_progress(board, changes)
started_columns = self.get_started_columns(changes)
finished_columns = self.get_finished_columns(board, my_progress)
# No reason to stop before starting 3 columns and none are finished.
if len(started_columns) < 3 and len(finished_columns) == 0:
return True
# Stop if I won
if len(self.get_my_finished(my_progress)) >= 3:
return False
# If I finished a column, let's just end there.
if self.did_finish_column(started_columns, my_progress):
return False
# If I started 3 columns, and I'm not finishing a column,
# just roll optimal number of times.
if len(started_columns) == 3:
return self.continue_based_on_odds(
started_columns, changes['turn'])
# Columns are finished, but fewer than 3 columns started
if len(started_columns) == 2:
return self.continue_based_on_new_column(
board, started_columns, finished_columns, changes['turn'])
elif len(started_columns) == 1:
return self.continue_based_on_new_columns(
board, started_columns, finished_columns, changes['turn'])
# Shouldn't ever get here...continuing without starting a column...
return True
def determine_move_value(self, move, leader_progress, my_progress, board, started):
"""
Assign a move value primarily based on odds of continuing turns, with
bias towards not starting new columns and finishing columns.
"""
value = 0
if len(move) == 2 and move[0] != move[1]:
col1, col2 = str(move[0]), str(move[1])
value = self.TWO_COLUMN_ODDS[col1][col2][self.ODDS]
elif len(move) == 2:
col = str(move[0])
value = 2 * (self.ONE_COLUMN_ODDS[col][self.ODDS])
else:
col = str(move[0])
value = self.ONE_COLUMN_ODDS[col][self.ODDS]
unique_columns = set(move)
for c in unique_columns:
col = str(c)
step_size = 1 / board[col]['steps']
# Reward for finishing a column
if my_progress[col] + step_size == 1:
value += self.FINISH_COLUMN_REWARD
# Penalize for starting new columns
if str(c) not in started:
value -= self.NEW_COLUMN_PENALTY
# Less likely columns are desirable when 3 columns have started
if len(started) == 3:
value += (1 - self.ONE_COLUMN_ODDS[col][self.ODDS])
return value
def get_my_finished(self, my_progress):
finished_columns = []
for col in my_progress.keys():
if my_progress[col] == 1:
finished_columns.append(col)
return finished_columns
def look_for_the_win(self, board, my_progress, moves):
winning_move = None
finished = self.get_my_finished(my_progress)
for move in moves:
columns_finished = 0
# Consider moving twice on same column
if len(move) == 2 and move[0] == move[1]:
col = str(move[0])
step_size = 2 / board[col]['steps']
if step_size + my_progress[col] == 1:
columns_finished += 1
else:
# Otherwise, maybe I can finish two at a time
for m in move:
col = str(m)
step_size = 1 / board[col]['steps']
if step_size + my_progress[col] == 1:
columns_finished += 1
# If finishing these columns wins me the game, let's do it
if len(finished) + columns_finished >= 3:
winning_move = move
break
return winning_move
def compare_with_leader(self, leader_progress, my_progress, board, col):
step_size = 1 / board[col]['steps']
return (my_progress[col] - leader_progress[col]['progress']) / step_size
def choose_move(self, moves, board, changes, invalid_move=False):
leader_progress, my_progress = self.get_progress(board, changes)
started = self.get_started_columns(changes)
# Look for moves that let me win
best_move = self.look_for_the_win(board, my_progress, moves)
if best_move is not None:
return best_move
# Choose move based on best move value
best_move = moves[0]
best_move_value = self.determine_move_value(
best_move, leader_progress, my_progress, board, started)
for i in range(1, len(moves)):
move = moves[i]
move_value = self.determine_move_value(
move, leader_progress, my_progress, board, started)
if move_value > best_move_value:
best_move = move
best_move_value = move_value
return best_move
| from .player import Player
class JeffPlayer(Player):
"""
JeffPlayer focuses on the odds for continuing turns.
To pick which move, calculates a move value based on odds of continued
turns, moving forward less likely columns when possible, and winning
columns over opponents.
"""
ODDS = 'odds'
ROLLS = 'rolls'
ONE_COLUMN_ODDS = {
'2': { ODDS: .13, ROLLS: 0 },
'3': { ODDS: .23, ROLLS: 0 },
'4': { ODDS: .36, ROLLS: 0 },
'5': { ODDS: .45, ROLLS: 1 },
'6': { ODDS: .56, ROLLS: 1 },
'7': { ODDS: .64, ROLLS: 2 },
'8': { ODDS: .56, ROLLS: 1 },
'9': { ODDS: .45, ROLLS: 1 },
'10': { ODDS: .36, ROLLS: 0 },
'11': { ODDS: .23, ROLLS: 0 },
'12': { ODDS: .13, ROLLS: 0 },
}
TWO_COLUMN_ODDS = {
'2': {
'3': { ODDS: .32, ROLLS: 0 },
'4': { ODDS: .44, ROLLS: 1 },
'5': { ODDS: .53, ROLLS: 1 },
'6': { ODDS: .63, ROLLS: 2 },
'7': { ODDS: .71, ROLLS: 2 },
'8': { ODDS: .67, ROLLS: 2 },
'9': { ODDS: .56, ROLLS: 1 },
'10': { ODDS: .47, ROLLS: 1 },
'11': { ODDS: .36, ROLLS: 1 },
'12': { ODDS: .26, ROLLS: 0 },
},
'3': {
'4': { ODDS: .47, ROLLS: 1 },
'5': { ODDS: .53, ROLLS: 1 },
'6': { ODDS: .64, ROLLS: 2 },
'7': { ODDS: .71, ROLLS: 2 },
'8': { ODDS: .68, ROLLS: 2 },
'9': { ODDS: .64, ROLLS: 2 },
'10': { ODDS: .56, ROLLS: 1 },
'11': { ODDS: .45, ROLLS: 1 },
'12': { ODDS: .36, ROLLS: 1 },
},
'4': {
'5': { ODDS: .61, ROLLS: 2 },
'6': { ODDS: .72, ROLLS: 3 },
'7': { ODDS: .77, ROLLS: 3 },
'8': { ODDS: .75, ROLLS: 3 },
'9': { ODDS: .68, ROLLS: 3 },
'10': { ODDS: .67, ROLLS: 2 },
'11': { ODDS: .56, ROLLS: 1 },
'12': { ODDS: .47, ROLLS: 1 },
},
'5': {
'6': { ODDS: .73, ROLLS: 3 },
'7': { ODDS: .78, ROLLS: 4 },
'8': { ODDS: .77, ROLLS: 3 },
'9': { ODDS: .75, ROLLS: 2 },
'10': { ODDS: .69, ROLLS: 2 },
'11': { ODDS: .68, ROLLS: 2 },
'12': { ODDS: .64, ROLLS: 1 },
},
'6': {
'7': { ODDS: .84, ROLLS: 5 },
'8': { ODDS: .82, ROLLS: 5 },
'9': { ODDS: .77, ROLLS: 3 },
'10': { ODDS: .75, ROLLS: 3 },
'11': { ODDS: .68, ROLLS: 2 },
'12': { ODDS: .67, ROLLS: 2 },
},
'7': {
'8': { ODDS: .84, ROLLS: 5 },
'9': { ODDS: .78, ROLLS: 4 },
'10': { ODDS: .77, ROLLS: 3 },
'11': { ODDS: .71, ROLLS: 2 },
'12': { ODDS: .71, ROLLS: 2 },
},
'8': {
'9': { ODDS: .73, ROLLS: 3 },
'10': { ODDS: .72, ROLLS: 3 },
'11': { ODDS: .64, ROLLS: 2 },
'12': { ODDS: .63, ROLLS: 2 },
},
'9': {
'10': { ODDS: .61, ROLLS: 2 },
'11': { ODDS: .53, ROLLS: 1 },
'12': { ODDS: .53, ROLLS: 1 },
},
'10': {
'11': { ODDS: .47, ROLLS: 1 },
'12': { ODDS: .44, ROLLS: 1 },
},
'11': {
'12': { ODDS: .32, ROLLS: 0 }
},
}
THREE_COLUMN_ODDS = {
'2': {
'3': {
'4': { ODDS: .52, ROLLS: 1 },
'5': { ODDS: .58, ROLLS: 1 },
'6': { ODDS: .68, ROLLS: 2 },
'7': { ODDS: .75, ROLLS: 3 },
'8': { ODDS: .76, ROLLS: 3 },
'9': { ODDS: .71, ROLLS: 2 },
'10': { ODDS: .63, ROLLS: 2 },
'11': { ODDS: .53, ROLLS: 1 },
'12': { ODDS: .44, ROLLS: 1 },
},
'4': {
'5': { ODDS: .66, ROLLS: 2 },
'6': { ODDS: .76, ROLLS: 3 },
'7': { ODDS: .81, ROLLS: 4 },
'8': { ODDS: .82, ROLLS: 5 },
'9': { ODDS: .76, ROLLS: 3 },
'10': { ODDS: .74, ROLLS: 3 },
'11': { ODDS: .63, ROLLS: 2 },
'12': { ODDS: .55, ROLLS: 1 },
},
'5': {
'6': { ODDS: .77, ROLLS: 3 },
'7': { ODDS: .81, ROLLS: 4 },
'8': { ODDS: .83, ROLLS: 5 },
'9': { ODDS: .76, ROLLS: 3 },
'10': { ODDS: .76, ROLLS: 3 },
'11': { ODDS: .71, ROLLS: 2 },
'12': { ODDS: .63, ROLLS: 2 },
},
'6': {
'7': { ODDS: .86, ROLLS: 6 },
'8': { ODDS: .88, ROLLS: 7 },
'9': { ODDS: .83, ROLLS: 5 },
'10': { ODDS: .81, ROLLS: 4 },
'11': { ODDS: .76, ROLLS: 3 },
'12': { ODDS: .74, ROLLS: 3 },
},
'7': {
'8': { ODDS: .89, ROLLS: 8 },
'9': { ODDS: .84, ROLLS: 5 },
'10': { ODDS: .83, ROLLS: 5 },
'11': { ODDS: .78, ROLLS: 4 },
'12': { ODDS: .78, ROLLS: 4 },
},
'8': {
'9': { ODDS: .71, ROLLS: 2 },
'10': { ODDS: .63, ROLLS: 2 },
'11': { ODDS: .53, ROLLS: 1 },
'12': { ODDS: .44, ROLLS: 1 },
},
'9': {
'10': { ODDS: .71, ROLLS: 2 },
'11': { ODDS: .64, ROLLS: 2 },
'12': { ODDS: .63, ROLLS: 2 },
},
'10': {
'11': { ODDS: .58, ROLLS: 1 },
'12': { ODDS: .55, ROLLS: 1 },
},
'11': {
'12': { ODDS: .44, ROLLS: 1 },
},
},
'3': {
'4': {
'5': { ODDS: .67, ROLLS: 2 },
'6': { ODDS: .74, ROLLS: 3 },
'7': { ODDS: .79, ROLLS: 4 },
'8': { ODDS: .80, ROLLS: 4 },
'9': { ODDS: .78, ROLLS: 4 },
'10': { ODDS: .76, ROLLS: 3 },
'11': { ODDS: .66, ROLLS: 2 },
'12': { ODDS: .58, ROLLS: 1 },
},
'5': {
'6': { ODDS: .77, ROLLS: 3 },
'7': { ODDS: .79, ROLLS: 4 },
'8': { ODDS: .81, ROLLS: 4 },
'9': { ODDS: .78, ROLLS: 4 },
'10': { ODDS: .76, ROLLS: 3 },
'11': { ODDS: .71, ROLLS: 2 },
'12': { ODDS: .64, ROLLS: 2 },
},
'6': {
'7': { ODDS: .86, ROLLS: 6 },
'8': { ODDS: .85, ROLLS: 6 },
'9': { ODDS: .83, ROLLS: 5 },
'10': { ODDS: .82, ROLLS: 5 },
'11': { ODDS: .76, ROLLS: 3 },
'12': { ODDS: .74, ROLLS: 3 },
},
'7': {
'8': { ODDS: .89, ROLLS: 8 },
'9': { ODDS: .84, ROLLS: 5 },
'10': { ODDS: .84, ROLLS: 5 },
'11': { ODDS: .78, ROLLS: 4 },
'12': { ODDS: .78, ROLLS: 4 },
},
'8': {
'9': { ODDS: .84, ROLLS: 5 },
'10': { ODDS: .83, ROLLS: 5 },
'11': { ODDS: .76, ROLLS: 3 },
'12': { ODDS: .76, ROLLS: 3 },
},
'9': {
'10': { ODDS: .78, ROLLS: 4 },
'11': { ODDS: .71, ROLLS: 2 },
'12': { ODDS: .71, ROLLS: 2 },
},
'10': {
'11': { ODDS: .66, ROLLS: 2 },
'12': { ODDS: .63, ROLLS: 2 },
},
'11': {
'12': { ODDS: .53, ROLLS: 1 },
},
},
'4': {
'5': {
'6': { ODDS: .80, ROLLS: 4 },
'7': { ODDS: .85, ROLLS: 6 },
'8': { ODDS: .85, ROLLS: 6 },
'9': { ODDS: .80, ROLLS: 4 },
'10': { ODDS: .82, ROLLS: 5 },
'11': { ODDS: .78, ROLLS: 4 },
'12': { ODDS: .71, ROLLS: 2 },
},
'6': {
'7': { ODDS: .89, ROLLS: 8 },
'8': { ODDS: .91, ROLLS: 10 },
'9': { ODDS: .86, ROLLS: 6 },
'10': { ODDS: .88, ROLLS: 7 },
'11': { ODDS: .83, ROLLS: 5 },
'12': { ODDS: .82, ROLLS: 5 },
},
'7': {
'8': { ODDS: .90, ROLLS: 9 },
'9': { ODDS: .89, ROLLS: 8 },
'10': { ODDS: .88, ROLLS: 7 },
'11': { ODDS: .84, ROLLS: 5 },
'12': { ODDS: .83, ROLLS: 5 },
},
'8': {
'9': { ODDS: .86, ROLLS: 6 },
'10': { ODDS: .88, ROLLS: 7 },
'11': { ODDS: .82, ROLLS: 5 },
'12': { ODDS: .81, ROLLS: 4 },
},
'9': {
'10': { ODDS: .82, ROLLS: 5 },
'11': { ODDS: .76, ROLLS: 3 },
'12': { ODDS: .76, ROLLS: 3 },
},
'10': {
'11': { ODDS: .76, ROLLS: 3 },
'12': { ODDS: .74, ROLLS: 3 },
},
'11': {
'12': { ODDS: .63, ROLLS: 2 },
},
},
'5': {
'6': {
'7': { ODDS: .89, ROLLS: 8 },
'8': { ODDS: .90, ROLLS: 9 },
'9': { ODDS: .87, ROLLS: 7 },
'10': { ODDS: .86, ROLLS: 6 },
'11': { ODDS: .84, ROLLS: 5 },
'12': { ODDS: .82, ROLLS: 5 },
},
'7': {
'8': { ODDS: .91, ROLLS: 10 },
'9': { ODDS: .85, ROLLS: 6 },
'10': { ODDS: .89, ROLLS: 8 },
'11': { ODDS: .84, ROLLS: 5 },
'12': { ODDS: .84, ROLLS: 5 },
},
'8': {
'9': { ODDS: .87, ROLLS: 7 },
'10': { ODDS: .86, ROLLS: 6 },
'11': { ODDS: .83, ROLLS: 5 },
'12': { ODDS: .83, ROLLS: 5 },
},
'9': {
'10': { ODDS: .80, ROLLS: 4 },
'11': { ODDS: .78, ROLLS: 4 },
'12': { ODDS: .76, ROLLS: 3 },
},
'10': {
'11': { ODDS: .78, ROLLS: 4 },
'12': { ODDS: .76, ROLLS: 3 },
},
'11': {
'12': { ODDS: .71, ROLLS: 2 },
},
},
'6': {
'7': {
'8': { ODDS: .92, ROLLS: 12 },
'9': { ODDS: .91, ROLLS: 10 },
'10': { ODDS: .90, ROLLS: 9 },
'11': { ODDS: .89, ROLLS: 8 },
'12': { ODDS: .89, ROLLS: 8 },
},
'8': {
'9': { ODDS: .90, ROLLS: 9 },
'10': { ODDS: .91, ROLLS: 10 },
'11': { ODDS: .85, ROLLS: 6 },
'12': { ODDS: .88, ROLLS: 7 },
},
'9': {
'10': { ODDS: .85, ROLLS: 6 },
'11': { ODDS: .81, ROLLS: 4 },
'12': { ODDS: .83, ROLLS: 5 },
},
'10': {
'11': { ODDS: .80, ROLLS: 4 },
'12': { ODDS: .82, ROLLS: 5 },
},
'11': {
'12': { ODDS: .76, ROLLS: 3 },
},
},
'7': {
'8': {
'9': { ODDS: .89, ROLLS: 8 },
'10': { ODDS: .89, ROLLS: 8 },
'11': { ODDS: .86, ROLLS: 6 },
'12': { ODDS: .86, ROLLS: 6 },
},
'9': {
'10': { ODDS: .85, ROLLS: 6 },
'11': { ODDS: .79, ROLLS: 4 },
'12': { ODDS: .81, ROLLS: 4 },
},
'10': {
'11': { ODDS: .79, ROLLS: 4 },
'12': { ODDS: .81, ROLLS: 4 },
},
'11': {
'12': { ODDS: .75, ROLLS: 3 },
},
},
'8': {
'9': {
'10': { ODDS: .80, ROLLS: 4 },
'11': { ODDS: .77, ROLLS: 3 },
'12': { ODDS: .77, ROLLS: 3 },
},
'10': {
'11': { ODDS: .74, ROLLS: 3 },
'12': { ODDS: .76, ROLLS: 3 },
},
'11': {
'12': { ODDS: .68, ROLLS: 2 },
},
},
'9': {
'10': {
'11': { ODDS: .67, ROLLS: 2 },
'12': { ODDS: .66, ROLLS: 2 },
},
'11': {
'12': { ODDS: .58, ROLLS: 1 },
},
},
'10': {
'11': {
'12': { ODDS: .52, ROLLS: 1 },
},
},
}
NEW_COLUMN_PENALTY = 1
FINISH_COLUMN_REWARD = 1
FAVORITE_COLUMN_THRESHOLD = 2/3
CONTESTED_COLUMN = 1
MY_PROGRESS_MODIFIER = .5
OPPONENT_PROGRESS_MODIFIER = .5
STEP_DIVISOR = .08
ROUGH_ODDS_THRESHOLD = .2
DESPERATION_TURNS = 2
def get_progress(self, board, changes):
"""
Returns progress percentages for leader's & player's progress
Leaders are opponents farthest for each given column
"""
leader_progress = {}
my_progress = {}
for key in board.keys():
leader_progress[key] = {}
leader = board[key]['players'][0][0]
lead = board[key]['players'][0][1] / board[key]['steps']
if leader == self.name:
leader = board[key]['players'][1][0]
lead = board[key]['players'][1][1]
for player in board[key]['players']:
progress = player[1] / board[key]['steps']
if lead < progress and player[0] != self.name:
leader = player[0]
lead = progress
if player[0] == self.name:
my_progress[key] = player[1] + changes[key]
my_progress[key] /= board[key]['steps']
leader_progress[key]['leader'] = leader
leader_progress[key]['progress'] = lead
return leader_progress, my_progress
def get_started_columns(self, changes):
"""
Return list of columns that I've started according to changes
"""
started = []
for col in changes.keys():
if col == 'turn':
continue
if changes[col] > 0:
started.append(col)
return sorted(started, key=lambda column: int(column))
def get_finished_columns(self, board, my_progress):
"""
Return a list of all columns finished, including those finished with
my current progress.
"""
finished = []
for key in board.keys():
for player in board[key]['players']:
if player[1] == board[key]['steps']:
finished.append(key)
if key not in finished and my_progress[key] == 1:
finished.append(key)
return sorted(finished, key=lambda column: int(column))
def continue_based_on_odds(self, started, turns):
"""
Determine whether to continue simply based on optimal number of
turns to take.
"""
if len(started) == 3:
col1, col2, col3 = started[0], started[1], started[2]
return self.THREE_COLUMN_ODDS[col1][col2][col3][self.ROLLS] > turns
if len(started) == 2:
col1, col2 = started[0], started[1]
return self.TWO_COLUMN_ODDS[col1][col2][self.ROLLS] > turns
return self.ONE_COLUMN_ODDS[started[0]][self.ROLLS] > turns
def continue_based_on_new_column(self, board, started, finished, turns):
"""
Continue based on chances of getting a new valid column.
Rough estimation for converting 2 column odds to 3 columns.
"""
base_odds = self.TWO_COLUMN_ODDS[started[0]][started[1]][self.ODDS]
base_rolls = self.TWO_COLUMN_ODDS[started[0]][started[1]][self.ROLLS]
available = [col for col in board.keys() if col not in started and col not in finished]
odds = 0
for col in available:
odds += (base_odds * self.ONE_COLUMN_ODDS[col][self.ODDS])
# Quick and dirty estimation
new_rolls = (odds - self.ROUGH_ODDS_THRESHOLD) / self.STEP_DIVISOR
return base_rolls + new_rolls > turns
def continue_based_on_new_columns(self, board, started, finished, turns):
"""
Continue based on chances of getting 2 new valid columns.
Rough estimation for converting 1 column odds to 3 columns.
"""
base_odds = self.ONE_COLUMN_ODDS[started[0]][self.ODDS]
base_rolls = self.ONE_COLUMN_ODDS[started[0]][self.ROLLS]
available = [col for col in board.keys() if col not in started and col not in finished]
odds = 0
for i in range(len(available)):
for j in range(i+1, len(available)):
col1, col2 = available[i], available[j]
odds += (base_odds * self.TWO_COLUMN_ODDS[col1][col2][self.ODDS])
# Quick and dirty estimation
new_rolls = (odds - self.ROUGH_ODDS_THRESHOLD) / self.STEP_DIVISOR
return base_rolls + new_rolls > turns
def opponent_might_win(self, leader_progress):
"""
Check to see if opponent might win in the next turn.
"""
opponents = {}
for col in leader_progress.keys():
leader = leader_progress[col]['leader']
if leader == self.name:
continue
if leader not in opponents.keys():
opponents[leader] = 0
if leader_progress[col]['progress'] == 1.0:
opponents[leader] += 1
if opponents[leader] >= 2:
return True
return False
def started_columns_are_contested(
self, board, changes, my_progress, started):
"""
Check to see if any of my columns I've started are currently contested.
"""
for col in started:
players = board[col]['players']
step_size = 1 / board[col]['steps']
for player in players:
if player[0] == self.name:
continue
# Opponent is within 1/3 of my progress, and it's not finished
if abs(my_progress[col] - player[1] * step_size) <= 1/3 and \
my_progress[col] != 1:
return True
def did_finish_column(self, started, my_progress):
"""
Did I finish a column this turn?
"""
for col in started:
if my_progress[col] == 1.0:
return True
def is_continuing_turn(self, board, changes):
"""
Decide to continue rolling. Based on if I just won the game,
optimal rolling turns, I finished a column, and
number of columns already finished in the game.
"""
leader_progress, my_progress = self.get_progress(board, changes)
started_columns = self.get_started_columns(changes)
finished_columns = self.get_finished_columns(board, my_progress)
# No reason to stop before starting 3 columns and none are finished.
if len(started_columns) < 3 and len(finished_columns) == 0:
return True
# Stop if I won
if len(self.get_my_finished(my_progress)) >= 3:
return False
# If I finished a column, let's just end there.
if self.did_finish_column(started_columns, my_progress):
return False
# If I started 3 columns, and I'm not finishing a column,
# just roll optimal number of times.
if len(started_columns) == 3:
return self.continue_based_on_odds(
started_columns, changes['turn'])
# Columns are finished, but fewer than 3 columns started
if len(started_columns) == 2:
return self.continue_based_on_new_column(
board, started_columns, finished_columns, changes['turn'])
elif len(started_columns) == 1:
return self.continue_based_on_new_columns(
board, started_columns, finished_columns, changes['turn'])
# Shouldn't ever get here...continuing without starting a column...
return True
def determine_move_value(self, move, leader_progress, my_progress, board, started):
"""
Assign a move value primarily based on odds of continuing turns, with
bias towards not starting new columns and finishing columns.
"""
value = 0
if len(move) == 2 and move[0] != move[1]:
col1, col2 = str(move[0]), str(move[1])
value = self.TWO_COLUMN_ODDS[col1][col2][self.ODDS]
elif len(move) == 2:
col = str(move[0])
value = 2 * (self.ONE_COLUMN_ODDS[col][self.ODDS])
else:
col = str(move[0])
value = self.ONE_COLUMN_ODDS[col][self.ODDS]
unique_columns = set(move)
for c in unique_columns:
col = str(c)
step_size = 1 / board[col]['steps']
# Reward for finishing a column
if my_progress[col] + step_size == 1:
value += self.FINISH_COLUMN_REWARD
# Penalize for starting new columns
if str(c) not in started:
value -= self.NEW_COLUMN_PENALTY
# Less likely columns are desirable when 3 columns have started
if len(started) == 3:
value += (1 - self.ONE_COLUMN_ODDS[col][self.ODDS])
return value
def get_my_finished(self, my_progress):
finished_columns = []
for col in my_progress.keys():
if my_progress[col] == 1:
finished_columns.append(col)
return finished_columns
def look_for_the_win(self, board, my_progress, moves):
winning_move = None
finished = self.get_my_finished(my_progress)
for move in moves:
columns_finished = 0
# Consider moving twice on same column
if len(move) == 2 and move[0] == move[1]:
col = str(move[0])
step_size = 2 / board[col]['steps']
if step_size + my_progress[col] == 1:
columns_finished += 1
else:
# Otherwise, maybe I can finish two at a time
for m in move:
col = str(m)
step_size = 1 / board[col]['steps']
if step_size + my_progress[col] == 1:
columns_finished += 1
# If finishing these columns wins me the game, let's do it
if len(finished) + columns_finished >= 3:
winning_move = move
break
return winning_move
def compare_with_leader(self, leader_progress, my_progress, board, col):
step_size = 1 / board[col]['steps']
return (my_progress[col] - leader_progress[col]['progress']) / step_size
def choose_move(self, moves, board, changes, invalid_move=False):
leader_progress, my_progress = self.get_progress(board, changes)
started = self.get_started_columns(changes)
# Look for moves that let me win
best_move = self.look_for_the_win(board, my_progress, moves)
if best_move is not None:
return best_move
# Choose move based on best move value
best_move = moves[0]
best_move_value = self.determine_move_value(
best_move, leader_progress, my_progress, board, started)
for i in range(1, len(moves)):
move = moves[i]
move_value = self.determine_move_value(
move, leader_progress, my_progress, board, started)
if move_value > best_move_value:
best_move = move
best_move_value = move_value
return best_move | en | 0.901574 | JeffPlayer focuses on the odds for continuing turns. To pick which move, calculates a move value based on odds of continued turns, moving forward less likely columns when possible, and winning columns over opponents. Returns progress percentages for leader's & player's progress Leaders are opponents farthest for each given column Return list of columns that I've started according to changes Return a list of all columns finished, including those finished with my current progress. Determine whether to continue simply based on optimal number of turns to take. Continue based on chances of getting a new valid column. Rough estimation for converting 2 column odds to 3 columns. # Quick and dirty estimation Continue based on chances of getting 2 new valid columns. Rough estimation for converting 1 column odds to 3 columns. # Quick and dirty estimation Check to see if opponent might win in the next turn. Check to see if any of my columns I've started are currently contested. # Opponent is within 1/3 of my progress, and it's not finished Did I finish a column this turn? Decide to continue rolling. Based on if I just won the game, optimal rolling turns, I finished a column, and number of columns already finished in the game. # No reason to stop before starting 3 columns and none are finished. # Stop if I won # If I finished a column, let's just end there. # If I started 3 columns, and I'm not finishing a column, # just roll optimal number of times. # Columns are finished, but fewer than 3 columns started # Shouldn't ever get here...continuing without starting a column... Assign a move value primarily based on odds of continuing turns, with bias towards not starting new columns and finishing columns. # Reward for finishing a column # Penalize for starting new columns # Less likely columns are desirable when 3 columns have started # Consider moving twice on same column # Otherwise, maybe I can finish two at a time # If finishing these columns wins me the game, let's do it # Look for moves that let me win # Choose move based on best move value | 3.423329 | 3 |
Python2/src/main.py | nataddrho/digicueblue | 8 | 9502 | #!/usr/bin/env python
# <NAME> 10/13/2017
import serial
import serialport
import bgapi
import gui
import digicueblue
import traceback
import time
import threading
import sys
if sys.version_info[0] < 3:
import Tkinter as Tk
else:
import tkinter as Tk
class App(threading.Thread): # thread GUI to that BGAPI can run in background
def __init__(self, dcb):
self.dcb = dcb
threading.Thread.__init__(self)
self.start()
def callback(self):
self.root.quit()
def run(self):
self.root = Tk.Tk()
self.gui = gui.GUI(self.root, self.dcb)
self.root.mainloop()
def main():
try:
f = open("comport.cfg", "r")
comport = f.readline().strip(' ')
f.close()
except BaseException:
# open comport selection gui
serialport.launch_selection()
return
try:
# open serial port and launch application
print "Opening %s" % comport
ser = serial.Serial(comport, 115200, timeout=1, writeTimeout=1)
dcb = digicueblue.DigicueBlue(filename="data.csv", debugprint=False)
app = App(dcb)
bg = bgapi.Bluegiga(dcb, ser, debugprint=True)
except BaseException:
print traceback.format_exc()
try:
ser.close()
except BaseException:
pass
text = """Please make sure the BLED112 dongle is plugged into the COM port
specified in comport.cfg, and that no other programs are using the port.
Use the serialport GUI to help select the correct port."""
text = text.replace('\n', ' ')
text = text.replace('\t', '')
print text
serialport.launch_selection()
if __name__ == '__main__':
main()
| #!/usr/bin/env python
# <NAME> 10/13/2017
import serial
import serialport
import bgapi
import gui
import digicueblue
import traceback
import time
import threading
import sys
if sys.version_info[0] < 3:
import Tkinter as Tk
else:
import tkinter as Tk
class App(threading.Thread): # thread GUI to that BGAPI can run in background
def __init__(self, dcb):
self.dcb = dcb
threading.Thread.__init__(self)
self.start()
def callback(self):
self.root.quit()
def run(self):
self.root = Tk.Tk()
self.gui = gui.GUI(self.root, self.dcb)
self.root.mainloop()
def main():
try:
f = open("comport.cfg", "r")
comport = f.readline().strip(' ')
f.close()
except BaseException:
# open comport selection gui
serialport.launch_selection()
return
try:
# open serial port and launch application
print "Opening %s" % comport
ser = serial.Serial(comport, 115200, timeout=1, writeTimeout=1)
dcb = digicueblue.DigicueBlue(filename="data.csv", debugprint=False)
app = App(dcb)
bg = bgapi.Bluegiga(dcb, ser, debugprint=True)
except BaseException:
print traceback.format_exc()
try:
ser.close()
except BaseException:
pass
text = """Please make sure the BLED112 dongle is plugged into the COM port
specified in comport.cfg, and that no other programs are using the port.
Use the serialport GUI to help select the correct port."""
text = text.replace('\n', ' ')
text = text.replace('\t', '')
print text
serialport.launch_selection()
if __name__ == '__main__':
main()
| en | 0.786175 | #!/usr/bin/env python # <NAME> 10/13/2017 # thread GUI to that BGAPI can run in background # open comport selection gui # open serial port and launch application Please make sure the BLED112 dongle is plugged into the COM port specified in comport.cfg, and that no other programs are using the port. Use the serialport GUI to help select the correct port. | 2.824469 | 3 |
messager.py | plasticruler/newshound | 0 | 9503 | import requests
#newspi key <KEY>
class Article:
link:str
headline:str
summary:str
body:str
| import requests
#newspi key <KEY>
class Article:
link:str
headline:str
summary:str
body:str
| en | 0.203749 | #newspi key <KEY> | 1.549039 | 2 |
PyMaSC/handler/mappability.py | ronin-gw/PyMaSC | 2 | 9504 | <reponame>ronin-gw/PyMaSC<gh_stars>1-10
import logging
import os
import json
from multiprocessing import Process, Queue, Lock
import numpy as np
from PyMaSC.core.mappability import MappableLengthCalculator
from PyMaSC.utils.progress import ProgressHook, MultiLineProgressManager
from PyMaSC.utils.compatible import tostr, xrange
from PyMaSC.utils.output import prepare_outdir
from PyMaSC.utils.calc import exec_worker_pool
logger = logging.getLogger(__name__)
class BWIOError(IOError):
pass
class JSONIOError(IOError):
pass
class NeedUpdate(Exception):
pass
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (np.long, np.float, np.float_)):
return float(obj)
elif isinstance(obj, (np.uint, np.int32, np.int64)):
return int(obj)
else:
return super(self, NumpyEncoder).default(obj)
class MappabilityHandler(MappableLengthCalculator):
@staticmethod
def calc_mappable_len_required_shift_size(readlen, max_shift):
return max_shift - readlen + 1 if max_shift > 2*readlen - 1 else readlen
def __init__(self, path, max_shift=0, readlen=0, map_path=None, nworker=1):
max_shift = self.calc_mappable_len_required_shift_size(readlen, max_shift)
self.nworker = nworker
if not os.access(path, os.R_OK):
reason = "file is unreadable." if os.path.isfile(path) else "no such file."
logger.critical("Failed to open '{}': {}".format(path, reason))
raise BWIOError
super(MappabilityHandler, self).__init__(path, max_shift)
self.close()
self._progress.disable_bar()
self.need_save_stats = True
if map_path:
self.map_path = map_path
else:
self.map_path = os.path.splitext(path)[0] + "_mappability.json"
if not os.path.exists(self.map_path):
self._check_saving_directory_is_writable()
logger.info("Calcurate mappable length with max shift size {}.".format(max_shift))
elif not os.path.isfile(self.map_path):
logger.critical("Specified path is not file: '{}'".format(self.map_path))
raise JSONIOError
elif not os.access(self.map_path, os.R_OK):
logger.error("Failed to read '{}'".format(self.map_path))
else:
self._try_load_mappability_stats()
if self.need_save_stats:
self._check_stats_is_overwritable()
logger.info("Calcurate mappable length with max shift size {}.".format(max_shift))
else:
logger.info("Use mappability stats read from '{}'".format(self.map_path))
def _check_saving_directory_is_writable(self):
dirname = os.path.dirname(self.map_path)
dirname = dirname if dirname else '.'
if not prepare_outdir(dirname, logger):
raise JSONIOError
def _try_load_mappability_stats(self):
try:
stats = self._read_mappability_stats()
except IOError as e:
logger.error("Failed to read '{}'".format(self.map_path))
logger.error("[Errno {}] {}".format(e.errno, e.message))
except (TypeError, OverflowError, ValueError, KeyError, IndexError) as e:
logger.error("Failed to load json file: '{}'".format(self.map_path))
except NeedUpdate:
pass
else:
self._load_mappability_stats(stats)
def _read_mappability_stats(self):
with open(self.map_path) as f:
stats = json.load(f)
for k in ("max_shift", "__whole__", "references"):
if k not in stats:
logger.error("Mandatory key '{}' not found.".format(k))
raise KeyError(k)
if stats["max_shift"] < self.max_shift:
logger.info("Specified shift length longer than former analysis. The stats will be updated.")
raise NeedUpdate
if stats["max_shift"] != len(stats["__whole__"]) - 1:
logger.error("Max shift length for whole genome unmatched.")
raise IndexError
for ref in self.chromsizes:
if ref not in stats["references"]:
logger.error("Reference '{}' not found.".format(ref))
raise KeyError(ref)
if stats["max_shift"] != len(stats["references"][ref]) - 1:
logger.error("Max shift length for 'ref' unmatched.".format(ref))
raise IndexError
return stats
def _load_mappability_stats(self, stats):
self.mappable_len = stats["__whole__"][:self.max_shift + 1]
self.chrom2mappable_len = {ref: b[:self.max_shift + 1] for ref, b in stats["references"].items()}
self.chrom2is_called = {ref: True for ref in self.chromsizes}
self.is_called = True
self.need_save_stats = False
def _check_stats_is_overwritable(self):
if not os.access(self.map_path, os.W_OK):
logger.critical("Failed to overwrite '{}'".format(self.map_path))
raise JSONIOError
else:
logger.warning("Existing file '{}' will be overwritten.".format(self.map_path))
def save_mappability_stats(self):
if not self.need_save_stats:
return logger.info("Mappability stats updating is not required.")
logger.info("Save mappable length to '{}'".format(self.map_path))
try:
with open(self.map_path, 'w') as f:
json.dump({
"max_shift": self.max_shift,
"__whole__": self.mappable_len,
"references": self.chrom2mappable_len
}, f, indent=4, sort_keys=True, cls=NumpyEncoder)
except IOError as e:
logger.error("Faild to output: {}\n[Errno {}] {}".format(
e.filename, e.errno, e.message))
self.need_save_stats = False
def calc_mappability(self):
target_chroms = [tostr(c) for c, b in self.chrom2is_called.items() if b is False]
if not target_chroms:
return self._sumup_mappability()
order_queue = Queue()
report_queue = Queue()
logger_lock = Lock()
progress = MultiLineProgressManager()
workers = [MappabilityCalcWorker(self.path, self.max_shift, order_queue, report_queue, logger_lock)
for _ in range(min(self.nworker, len(target_chroms)))]
with exec_worker_pool(workers, target_chroms, order_queue):
while not self.is_called:
chrom, obj = report_queue.get()
if chrom is None: # update progress
chrom, body = obj
with logger_lock:
progress.update(chrom, body)
else:
length = obj
self.chrom2mappable_len[chrom] = tuple(length)
self.chrom2is_called[chrom] = True
if all(self.chrom2is_called.values()):
self.is_called = True
with logger_lock:
progress.erase(chrom)
progress.clean()
self._sumup_mappability()
def _sumup_mappability(self):
for length in self.chrom2mappable_len.values():
for i in xrange(self.max_shift + 1):
self.mappable_len[i] += length[i]
class MappabilityCalcWorker(Process):
def __init__(self, path, max_shift, order_queue, report_queue, logger_lock):
super(MappabilityCalcWorker, self).__init__()
self.calculator = MappableLengthCalculator(path, max_shift, logger_lock)
self.calculator._progress.disable_bar()
self.order_queue = order_queue
self.report_queue = report_queue
self.logger_lock = logger_lock
self.calculator._progress = ProgressHook(report_queue)
def run(self):
with self.logger_lock:
logger.debug("{}: Hello. My pid is {}.".format(self.name, os.getpid()))
while True:
chrom = self.order_queue.get()
if chrom is None:
break
with self.logger_lock:
logger.debug("{}: Process {}...".format(self.name, chrom))
self.calculator.calc_mappability(chrom)
self.report_queue.put((chrom, self.calculator.chrom2mappable_len[chrom]))
with self.logger_lock:
logger.debug("{}: Goodbye.".format(self.name))
self.calculator.close()
| import logging
import os
import json
from multiprocessing import Process, Queue, Lock
import numpy as np
from PyMaSC.core.mappability import MappableLengthCalculator
from PyMaSC.utils.progress import ProgressHook, MultiLineProgressManager
from PyMaSC.utils.compatible import tostr, xrange
from PyMaSC.utils.output import prepare_outdir
from PyMaSC.utils.calc import exec_worker_pool
logger = logging.getLogger(__name__)
class BWIOError(IOError):
pass
class JSONIOError(IOError):
pass
class NeedUpdate(Exception):
pass
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (np.long, np.float, np.float_)):
return float(obj)
elif isinstance(obj, (np.uint, np.int32, np.int64)):
return int(obj)
else:
return super(self, NumpyEncoder).default(obj)
class MappabilityHandler(MappableLengthCalculator):
@staticmethod
def calc_mappable_len_required_shift_size(readlen, max_shift):
return max_shift - readlen + 1 if max_shift > 2*readlen - 1 else readlen
def __init__(self, path, max_shift=0, readlen=0, map_path=None, nworker=1):
max_shift = self.calc_mappable_len_required_shift_size(readlen, max_shift)
self.nworker = nworker
if not os.access(path, os.R_OK):
reason = "file is unreadable." if os.path.isfile(path) else "no such file."
logger.critical("Failed to open '{}': {}".format(path, reason))
raise BWIOError
super(MappabilityHandler, self).__init__(path, max_shift)
self.close()
self._progress.disable_bar()
self.need_save_stats = True
if map_path:
self.map_path = map_path
else:
self.map_path = os.path.splitext(path)[0] + "_mappability.json"
if not os.path.exists(self.map_path):
self._check_saving_directory_is_writable()
logger.info("Calcurate mappable length with max shift size {}.".format(max_shift))
elif not os.path.isfile(self.map_path):
logger.critical("Specified path is not file: '{}'".format(self.map_path))
raise JSONIOError
elif not os.access(self.map_path, os.R_OK):
logger.error("Failed to read '{}'".format(self.map_path))
else:
self._try_load_mappability_stats()
if self.need_save_stats:
self._check_stats_is_overwritable()
logger.info("Calcurate mappable length with max shift size {}.".format(max_shift))
else:
logger.info("Use mappability stats read from '{}'".format(self.map_path))
def _check_saving_directory_is_writable(self):
dirname = os.path.dirname(self.map_path)
dirname = dirname if dirname else '.'
if not prepare_outdir(dirname, logger):
raise JSONIOError
def _try_load_mappability_stats(self):
try:
stats = self._read_mappability_stats()
except IOError as e:
logger.error("Failed to read '{}'".format(self.map_path))
logger.error("[Errno {}] {}".format(e.errno, e.message))
except (TypeError, OverflowError, ValueError, KeyError, IndexError) as e:
logger.error("Failed to load json file: '{}'".format(self.map_path))
except NeedUpdate:
pass
else:
self._load_mappability_stats(stats)
def _read_mappability_stats(self):
with open(self.map_path) as f:
stats = json.load(f)
for k in ("max_shift", "__whole__", "references"):
if k not in stats:
logger.error("Mandatory key '{}' not found.".format(k))
raise KeyError(k)
if stats["max_shift"] < self.max_shift:
logger.info("Specified shift length longer than former analysis. The stats will be updated.")
raise NeedUpdate
if stats["max_shift"] != len(stats["__whole__"]) - 1:
logger.error("Max shift length for whole genome unmatched.")
raise IndexError
for ref in self.chromsizes:
if ref not in stats["references"]:
logger.error("Reference '{}' not found.".format(ref))
raise KeyError(ref)
if stats["max_shift"] != len(stats["references"][ref]) - 1:
logger.error("Max shift length for 'ref' unmatched.".format(ref))
raise IndexError
return stats
def _load_mappability_stats(self, stats):
self.mappable_len = stats["__whole__"][:self.max_shift + 1]
self.chrom2mappable_len = {ref: b[:self.max_shift + 1] for ref, b in stats["references"].items()}
self.chrom2is_called = {ref: True for ref in self.chromsizes}
self.is_called = True
self.need_save_stats = False
def _check_stats_is_overwritable(self):
if not os.access(self.map_path, os.W_OK):
logger.critical("Failed to overwrite '{}'".format(self.map_path))
raise JSONIOError
else:
logger.warning("Existing file '{}' will be overwritten.".format(self.map_path))
def save_mappability_stats(self):
if not self.need_save_stats:
return logger.info("Mappability stats updating is not required.")
logger.info("Save mappable length to '{}'".format(self.map_path))
try:
with open(self.map_path, 'w') as f:
json.dump({
"max_shift": self.max_shift,
"__whole__": self.mappable_len,
"references": self.chrom2mappable_len
}, f, indent=4, sort_keys=True, cls=NumpyEncoder)
except IOError as e:
logger.error("Faild to output: {}\n[Errno {}] {}".format(
e.filename, e.errno, e.message))
self.need_save_stats = False
def calc_mappability(self):
target_chroms = [tostr(c) for c, b in self.chrom2is_called.items() if b is False]
if not target_chroms:
return self._sumup_mappability()
order_queue = Queue()
report_queue = Queue()
logger_lock = Lock()
progress = MultiLineProgressManager()
workers = [MappabilityCalcWorker(self.path, self.max_shift, order_queue, report_queue, logger_lock)
for _ in range(min(self.nworker, len(target_chroms)))]
with exec_worker_pool(workers, target_chroms, order_queue):
while not self.is_called:
chrom, obj = report_queue.get()
if chrom is None: # update progress
chrom, body = obj
with logger_lock:
progress.update(chrom, body)
else:
length = obj
self.chrom2mappable_len[chrom] = tuple(length)
self.chrom2is_called[chrom] = True
if all(self.chrom2is_called.values()):
self.is_called = True
with logger_lock:
progress.erase(chrom)
progress.clean()
self._sumup_mappability()
def _sumup_mappability(self):
for length in self.chrom2mappable_len.values():
for i in xrange(self.max_shift + 1):
self.mappable_len[i] += length[i]
class MappabilityCalcWorker(Process):
def __init__(self, path, max_shift, order_queue, report_queue, logger_lock):
super(MappabilityCalcWorker, self).__init__()
self.calculator = MappableLengthCalculator(path, max_shift, logger_lock)
self.calculator._progress.disable_bar()
self.order_queue = order_queue
self.report_queue = report_queue
self.logger_lock = logger_lock
self.calculator._progress = ProgressHook(report_queue)
def run(self):
with self.logger_lock:
logger.debug("{}: Hello. My pid is {}.".format(self.name, os.getpid()))
while True:
chrom = self.order_queue.get()
if chrom is None:
break
with self.logger_lock:
logger.debug("{}: Process {}...".format(self.name, chrom))
self.calculator.calc_mappability(chrom)
self.report_queue.put((chrom, self.calculator.chrom2mappable_len[chrom]))
with self.logger_lock:
logger.debug("{}: Goodbye.".format(self.name))
self.calculator.close() | en | 0.294978 | # update progress | 2.10618 | 2 |
courses/backend/django-for-everybody/Web Application Technologies and Django/resources/dj4e-samples/tmpl/views.py | Nahid-Hassan/fullstack-software-development | 297 | 9505 | <filename>courses/backend/django-for-everybody/Web Application Technologies and Django/resources/dj4e-samples/tmpl/views.py
from django.shortcuts import render
from django.views import View
# Create your views here.
def simple(request):
return render(request, 'tmpl/simple.html')
def guess(request) :
context = {'zap' : '42' }
return render(request, 'tmpl/guess.html', context)
def special(request) :
context = {'txt' : '<b>bold</b>',
'zap' : '42' }
return render(request, 'tmpl/special.html', context)
def loop(request) :
f = ['Apple', 'Orange', 'Banana', 'Lychee']
n = ['peanut', 'cashew']
x = {'fruits' : f, 'nuts' : n, 'zap' : '42' }
return render(request, 'tmpl/loop.html', x)
def cond(request) :
x = {'guess' : '42' }
return render(request, 'tmpl/cond.html', x)
def nested(request) :
x = {'outer' : { 'inner' : '42' } }
return render(request, 'tmpl/nested.html', x)
# Call this with a parameter number
class GameView(View) :
def get(self, request, guess) :
x = {'guess' : int(guess) }
return render(request, 'tmpl/cond.html', x)
# Using inheritance (extend)
class Game2View(View) :
def get(self, request, guess) :
x = {'guess' : int(guess) }
return render(request, 'tmpl/cond2.html', x)
| <filename>courses/backend/django-for-everybody/Web Application Technologies and Django/resources/dj4e-samples/tmpl/views.py
from django.shortcuts import render
from django.views import View
# Create your views here.
def simple(request):
return render(request, 'tmpl/simple.html')
def guess(request) :
context = {'zap' : '42' }
return render(request, 'tmpl/guess.html', context)
def special(request) :
context = {'txt' : '<b>bold</b>',
'zap' : '42' }
return render(request, 'tmpl/special.html', context)
def loop(request) :
f = ['Apple', 'Orange', 'Banana', 'Lychee']
n = ['peanut', 'cashew']
x = {'fruits' : f, 'nuts' : n, 'zap' : '42' }
return render(request, 'tmpl/loop.html', x)
def cond(request) :
x = {'guess' : '42' }
return render(request, 'tmpl/cond.html', x)
def nested(request) :
x = {'outer' : { 'inner' : '42' } }
return render(request, 'tmpl/nested.html', x)
# Call this with a parameter number
class GameView(View) :
def get(self, request, guess) :
x = {'guess' : int(guess) }
return render(request, 'tmpl/cond.html', x)
# Using inheritance (extend)
class Game2View(View) :
def get(self, request, guess) :
x = {'guess' : int(guess) }
return render(request, 'tmpl/cond2.html', x)
| en | 0.605007 | # Create your views here. # Call this with a parameter number # Using inheritance (extend) | 2.394191 | 2 |
webapp/ex.py | jykim-rust/python | 0 | 9506 | <reponame>jykim-rust/python<gh_stars>0
from flask import escape
'''with open('ex') as full:
for line in full:
print(line,end='**')
'''
'''
a=[]
with open('ex') as full:
for line in full:
a.append(line.split('|'))
print(a)
'''
'''
with open('ex') as full:
for line in full.readline():
print(line)
'''
contents=[]
with open('ex') as log:
for line in log:
#contents.append([])
for item in line.split('|'):
contents.append(item)
print(contents)
| from flask import escape
'''with open('ex') as full:
for line in full:
print(line,end='**')
'''
'''
a=[]
with open('ex') as full:
for line in full:
a.append(line.split('|'))
print(a)
'''
'''
with open('ex') as full:
for line in full.readline():
print(line)
'''
contents=[]
with open('ex') as log:
for line in log:
#contents.append([])
for item in line.split('|'):
contents.append(item)
print(contents) | en | 0.82993 | with open('ex') as full: for line in full: print(line,end='**') a=[] with open('ex') as full: for line in full: a.append(line.split('|')) print(a) with open('ex') as full: for line in full.readline(): print(line) #contents.append([]) | 2.963089 | 3 |
lib/twitter_utils.py | Vman45/ask-alexa-twitter | 310 | 9507 | <gh_stars>100-1000
import requests
import jsonpickle
from requests_oauthlib import OAuth1
from urllib.parse import parse_qs, urlencode
import cherrypy
from collections import defaultdict
import json
import os
import re
from collections import defaultdict
# For readable serializations
jsonpickle.set_encoder_options('json', sort_keys=True, indent=4)
class LocalCache(object):
""" Generic class for encapsulating twitter credential caching """
server_data_template = "{}.server"
user_data_template = "{0}.user.{1}"
def __init__(self, backup = "tmp/twitter.cache"):
self.backup = backup #Unique identifier for the backup of this cache
self.memcache = {
"users" : defaultdict(lambda : {}),
"server": defaultdict(lambda : {})
}
self.deserialize()
def users(self):
return self.memcache['users']
def set_user_state(self, user_id, state):
self.memcache['users'][user_id] = state
def update_user_state(self, user_id, state = {}):
self.memcache['users'][user_id].update(state)
def get_user_state(self, user_id):
return self.memcache['users'][user_id]
def clear_user_state(self, user_id):
return self.memcache['users'][user_id].clear()
def update_server_state(self, state_dict):
self.memcache['server'].update(state_dict)
def get_server_state(self):
return self.memcache['server']
def clear_server_state(self):
return self.memcache['server'].clear()
def initialize_user_queue(self, user_id, queue):
self.memcache['users'][user_id]['user_queue'] = ReadableQueue(queue)
def user_queue(self, user_id):
if 'user_queue' in self.memcache['users'][user_id]:
return self.memcache['users'][user_id]['user_queue']
def server_fname(self):
return self.server_data_template.format(self.backup)
def user_fname(self, user):
return self.user_data_template.format(self.backup, user)
def deserialize(self):
cache_loaded = False
if os.path.exists(self.server_fname()) and not os.path.isdir(self.backup):
try:
self.memcache = { "server" : {},
"users" : {} }
with open(self.server_fname()) as backupfile:
print ("Attempting to reload cache")
self.memcache['server'] = jsonpickle.decode(backupfile.read())
print ("Server cache loaded", json.dumps(self.memcache, indent=4))
for user in self.memcache['server']['user_list']:
# Try to load as much user data as possible
if os.path.exists(self.user_fname(user)):
print ("found path for user", user)
with open(self.user_fname(user)) as userfile:
user_data = jsonpickle.decode(userfile.read())
self.memcache['users'][user] = user_data
cache_loaded = True
except Exception as e:
print ("Cache file corrupted...")
raise e
if not cache_loaded:
print ("Cache could not be loaded")
pass
else:
print ("CACHE LOADED SUCCESSFULLY!")
def serialize(self):
json_to_serialize = self.memcache['server']
user_list = list(self.users().keys())
json_to_serialize.update({"user_list" : user_list})
with open(self.server_fname(), 'w') as backup_server:
# Serialize Server:
json_encoded = jsonpickle.encode(json_to_serialize)
backup_server.write(json_encoded)
for user in user_list:
user_data = self.get_user_state(user)
json_encoded = jsonpickle.encode(user_data)
with open(self.user_fname(user), 'w') as userfile:
userfile.write(json_encoded)
class ReadableQueue(object):
def __init__(self, queue=[], pos=0):
self.hashmap = { "queue" : [(i, e) for i,e in enumerate(queue)],
"pos" : pos }
return
def queue(self):
return self.hashmap['queue']
def is_empty(self):
return len(self.queue()) == 0
def is_finished(self):
return self.pos() == len(self.queue())
def pos(self):
return self.hashmap['pos']
def set_pos(self, val):
self.hashmap['pos'] = val
def get_next(self, offset=1):
if self.pos() < len(self.queue()):
temp_queue = self.queue()[self.pos(): self.pos() + offset]
self.set_pos(self.pos() + offset)
if self.pos() > len(self.queue()): self.set_pos(len(self.queue()))
return temp_queue
def read_out_next(self, offset=1):
return " ".join([readable.read_out(index) for index,readable in self.get_next(offset)])
def has_prev(self):
return self.pos() > 0
def get_prev(self, offset=1):
if self.pos() > 0:
self.set_pos(self.pos() - offset)
if self.pos() < 0:
offset = offset + self.pos()
# [1, current(2), 3] get_prev(offeset=3)
# pos :=> -2, offset :=> 3-2 = 1, pos :=> 0, then read 0 to 1
self.set_pos(0)
return self.queue()[self.pos() : offset]
return None
def read_out_prev(self, offset=1):
return " ".join([readable.read_out() for readable in self.get_prev(offset)])
#Local cache caches tokens for different users
local_cache = LocalCache()
def strip_html(text):
""" Get rid of ugly twitter html """
def reply_to(text):
replying_to = []
split_text = text.split()
for index, token in enumerate(split_text):
if token.startswith('@'): replying_to.append(token[1:])
else:
message = split_text[index:]
break
rply_msg = ""
if len(replying_to) > 0:
rply_msg = "Replying to "
for token in replying_to[:-1]: rply_msg += token+","
if len(replying_to)>1: rply_msg += 'and '
rply_msg += replying_to[-1]+". "
return rply_msg + " ".join(message)
text = reply_to(text)
text = text.replace('@', ' ')
return " ".join([token for token in text.split()
if ('http:' not in token) and ('https:' not in token)])
class Tweet(object):
def __init__(self, json_obj):
self.tweet = json_obj
def get_id(self):
return self.tweet['id']
def get_raw_text(self):
return self.tweet['text']
def _process_text(self):
text = strip_html(self.tweet['text'])
user_mentions = self.tweet['entities']['user_mentions']
text = text.replace('@', 'at ')
for user in user_mentions:
text = text.replace(user['screen_name'], user['name'])
return text
def get_screen_name(self):
return self.tweet['user']['screen_name']
def get_user_name(self):
return self.tweet['user']['name']
def read_out(self, index):
text = self._process_text()
return "tweet number {num} by {user} : {text} ,".format(num=index+1,
user=self.get_user_name(),
text = text)
def detailed_description(self):
response_builder = ["This tweet was posted by {user_name} whose twitter handle is {screen_name} the account description reads: {description}."
.format(screen_name=self.tweet['user']['screen_name'],
user_name=self.tweet['user']['name'],
description=self.tweet['user']['description'])]
if self.tweet['retweeted']:
response_builder += ["It's been retweeted {} times.".format(self.tweet['retweet_count'])]
if self.tweet['favorited']:
response_builder += ["{} people have favorited it.".format(self.tweet['favorites_count'])]
if self.tweet["in_reply_to_screen_name"]:
response_builder += ["it was posted in response to user {}.".format(self.tweet['in_reply_to_screen_name'])]
response_builder += ["the text of the tweet is, {}.".format(self._process_text())]
return " ".join(response_builder)
def user_mentions(self):
return self.tweet['user_mentions']
def get_cached_access_pair(uid):
if uid in local_cache.users():
access_token = local_cache.get_user_state(uid)['access_token']
access_secret = local_cache.get_user_state(uid)['access_secret']
return access_token, access_secret
else:
raise ValueError
def get_request_token(callback_url=None):
url = "https://api.twitter.com/oauth/request_token"
consumer_key, consumer_secret = local_cache.get_server_state()['twitter_keys']
auth = OAuth1(consumer_key, consumer_secret)
params = { "oauth_callback" : callback_url }
r = requests.post(url, auth=auth, params=params)
response_obj = parse_qs(r.text)
local_cache.update_server_state({ "request_token" : response_obj['oauth_token'][0],
"request_secret": response_obj['oauth_token_secret'][0] })
return response_obj['oauth_token_secret'], response_obj['oauth_token']
def authenticate_user_page(callback_url="", metadata=None):
url = "https://api.twitter.com/oauth/authenticate"
oauth_secret, oauth_token = get_request_token(callback_url)
local_cache.update_server_state({'metadata' : metadata })
params = { "force_login" : True,
"oauth_token": oauth_token }
r = requests.get(url, params=params)
return r.text
def post_tweet(user_id, message, additional_params={}):
"""
Helper function to post a tweet
"""
url = "https://api.twitter.com/1.1/statuses/update.json"
params = { "status" : message }
params.update(additional_params)
r = make_twitter_request(url, user_id, params, request_type='POST')
print (r.text)
return "Successfully posted a tweet {}".format(message)
def get_access_token(oauth_token, oauth_verifier):
url = "https://api.twitter.com/oauth/access_token"
params = {"oauth_verifier" : oauth_verifier}
server_state = local_cache.get_server_state()
request_token = server_state['request_token']
request_secret = server_state['request_secret']
consumer_key, consumer_secret = server_state['twitter_keys']
auth = OAuth1(consumer_key, consumer_secret, request_token, request_secret)
r = requests.post(url, params = params, auth=auth)
response_obj = parse_qs(r.text)
uid = response_obj['oauth_token'][0]
print ("Access token", uid)
local_cache.set_user_state(user_id = uid,
state = { "access_token" : response_obj['oauth_token'][0],
"access_secret" : response_obj['oauth_token_secret'][0],
'twitter_user_id': response_obj['user_id'][0],
'screen_name' : response_obj ['screen_name'][0]
})
local_cache.serialize()
fragments = {
"state" : local_cache.get_server_state()['metadata']['state'],
"access_token" : uid,
"token_type" : "Bearer"
}
return urlencode(fragments)
def get_twitter_auth(user_id):
consumer_key, consumer_secret = local_cache.get_server_state()['twitter_keys']
access_token, access_secret = get_cached_access_pair(user_id)
return OAuth1(consumer_key, consumer_secret, access_token, access_secret)
def process_tweets(tweet_list):
""" Clean tweets and enumerate, preserving only things that we are interested in """
return [Tweet(tweet) for tweet in tweet_list]
def make_twitter_request(url, user_id, params={}, request_type='GET'):
""" Generically make a request to twitter API using a particular user's authorization """
if request_type == "GET":
return requests.get(url, auth=get_twitter_auth(user_id), params=params)
elif request_type == "POST":
return requests.post(url, auth=get_twitter_auth(user_id), params=params)
def get_user_twitter_details(user_id, params={}):
url = "https://api.twitter.com/1.1/users/lookup.json"
user_cache = local_cache.get_user_state(user_id)
params.update({"user_id": user_cache['twitter_user_id'] })
response = make_twitter_request(url, user_id, params)
return response.json()
def geo_search(user_id, search_location):
"""
Search for a location - free form
"""
url = "https://api.twitter.com/1.1/geo/search.json"
params = {"query" : search_location }
response = make_twitter_request(url, user_id, params).json()
return response
def closest_trend_search(user_id, params={}):
#url = "https://api.twitter.com/1.1/trends/place.json"
url = "https://api.twitter.com/1.1/trends/closest.json"
response = make_twitter_request(url, user_id, params).json()
return response
def list_trends(user_id, woe_id):
url = "https://api.twitter.com/1.1/trends/place.json"
params = { "id" : woe_id }
response = make_twitter_request(url, user_id, params).json()
return response
def read_out_tweets(processed_tweets, speech_convertor=None):
"""
Input - list of processed 'Tweets'
output - list of spoken responses
"""
return ["tweet number {num} by {user}. {text}.".format(num=index+1, user=user, text=text)
for index, (user, text) in enumerate(processed_tweets)]
def request_tweet_list(url, user_id, params={}):
return process_tweets(make_twitter_request(url, user_id).json())
def get_home_tweets(user_id, input_params={}):
url = "https://api.twitter.com/1.1/statuses/home_timeline.json"
print ("Trying to get home tweets")
response = request_tweet_list(url, user_id)
return response
def get_retweets_of_me(user_id, input_params={}):
""" returns recently retweeted tweets """
url = "https://api.twitter.com/1.1/statuses/retweets_of_me.json"
print ("trying to get retweets")
return request_tweet_list(url, user_id)
def get_my_favourite_tweets(user_id, input_params = {}):
""" Returns a user's favourite tweets """
url = "https://api.twitter.com/1.1/favorites/list.json"
return request_tweet_list(url, user_id)
def get_user_latest_tweets(user_id, params={}):
url = "https://api.twitter.com/1.1/statuses/user_timeline.json?"
return request_tweet_list(url, user_id, params)
def get_latest_twitter_mentions(user_id):
url = "https://api.twitter.com/1.1/statuses/mentions_timeline.json"
return request_tweet_list(url, user_id)
def search_for_tweets_about(user_id, params):
""" Search twitter API """
url = "https://api.twitter.com/1.1/search/tweets.json"
response = make_twitter_request(url, user_id, params)
return process_tweets(response.json()["statuses"])
| import requests
import jsonpickle
from requests_oauthlib import OAuth1
from urllib.parse import parse_qs, urlencode
import cherrypy
from collections import defaultdict
import json
import os
import re
from collections import defaultdict
# For readable serializations
jsonpickle.set_encoder_options('json', sort_keys=True, indent=4)
class LocalCache(object):
""" Generic class for encapsulating twitter credential caching """
server_data_template = "{}.server"
user_data_template = "{0}.user.{1}"
def __init__(self, backup = "tmp/twitter.cache"):
self.backup = backup #Unique identifier for the backup of this cache
self.memcache = {
"users" : defaultdict(lambda : {}),
"server": defaultdict(lambda : {})
}
self.deserialize()
def users(self):
return self.memcache['users']
def set_user_state(self, user_id, state):
self.memcache['users'][user_id] = state
def update_user_state(self, user_id, state = {}):
self.memcache['users'][user_id].update(state)
def get_user_state(self, user_id):
return self.memcache['users'][user_id]
def clear_user_state(self, user_id):
return self.memcache['users'][user_id].clear()
def update_server_state(self, state_dict):
self.memcache['server'].update(state_dict)
def get_server_state(self):
return self.memcache['server']
def clear_server_state(self):
return self.memcache['server'].clear()
def initialize_user_queue(self, user_id, queue):
self.memcache['users'][user_id]['user_queue'] = ReadableQueue(queue)
def user_queue(self, user_id):
if 'user_queue' in self.memcache['users'][user_id]:
return self.memcache['users'][user_id]['user_queue']
def server_fname(self):
return self.server_data_template.format(self.backup)
def user_fname(self, user):
return self.user_data_template.format(self.backup, user)
def deserialize(self):
cache_loaded = False
if os.path.exists(self.server_fname()) and not os.path.isdir(self.backup):
try:
self.memcache = { "server" : {},
"users" : {} }
with open(self.server_fname()) as backupfile:
print ("Attempting to reload cache")
self.memcache['server'] = jsonpickle.decode(backupfile.read())
print ("Server cache loaded", json.dumps(self.memcache, indent=4))
for user in self.memcache['server']['user_list']:
# Try to load as much user data as possible
if os.path.exists(self.user_fname(user)):
print ("found path for user", user)
with open(self.user_fname(user)) as userfile:
user_data = jsonpickle.decode(userfile.read())
self.memcache['users'][user] = user_data
cache_loaded = True
except Exception as e:
print ("Cache file corrupted...")
raise e
if not cache_loaded:
print ("Cache could not be loaded")
pass
else:
print ("CACHE LOADED SUCCESSFULLY!")
def serialize(self):
json_to_serialize = self.memcache['server']
user_list = list(self.users().keys())
json_to_serialize.update({"user_list" : user_list})
with open(self.server_fname(), 'w') as backup_server:
# Serialize Server:
json_encoded = jsonpickle.encode(json_to_serialize)
backup_server.write(json_encoded)
for user in user_list:
user_data = self.get_user_state(user)
json_encoded = jsonpickle.encode(user_data)
with open(self.user_fname(user), 'w') as userfile:
userfile.write(json_encoded)
class ReadableQueue(object):
def __init__(self, queue=[], pos=0):
self.hashmap = { "queue" : [(i, e) for i,e in enumerate(queue)],
"pos" : pos }
return
def queue(self):
return self.hashmap['queue']
def is_empty(self):
return len(self.queue()) == 0
def is_finished(self):
return self.pos() == len(self.queue())
def pos(self):
return self.hashmap['pos']
def set_pos(self, val):
self.hashmap['pos'] = val
def get_next(self, offset=1):
if self.pos() < len(self.queue()):
temp_queue = self.queue()[self.pos(): self.pos() + offset]
self.set_pos(self.pos() + offset)
if self.pos() > len(self.queue()): self.set_pos(len(self.queue()))
return temp_queue
def read_out_next(self, offset=1):
return " ".join([readable.read_out(index) for index,readable in self.get_next(offset)])
def has_prev(self):
return self.pos() > 0
def get_prev(self, offset=1):
if self.pos() > 0:
self.set_pos(self.pos() - offset)
if self.pos() < 0:
offset = offset + self.pos()
# [1, current(2), 3] get_prev(offeset=3)
# pos :=> -2, offset :=> 3-2 = 1, pos :=> 0, then read 0 to 1
self.set_pos(0)
return self.queue()[self.pos() : offset]
return None
def read_out_prev(self, offset=1):
return " ".join([readable.read_out() for readable in self.get_prev(offset)])
#Local cache caches tokens for different users
local_cache = LocalCache()
def strip_html(text):
""" Get rid of ugly twitter html """
def reply_to(text):
replying_to = []
split_text = text.split()
for index, token in enumerate(split_text):
if token.startswith('@'): replying_to.append(token[1:])
else:
message = split_text[index:]
break
rply_msg = ""
if len(replying_to) > 0:
rply_msg = "Replying to "
for token in replying_to[:-1]: rply_msg += token+","
if len(replying_to)>1: rply_msg += 'and '
rply_msg += replying_to[-1]+". "
return rply_msg + " ".join(message)
text = reply_to(text)
text = text.replace('@', ' ')
return " ".join([token for token in text.split()
if ('http:' not in token) and ('https:' not in token)])
class Tweet(object):
def __init__(self, json_obj):
self.tweet = json_obj
def get_id(self):
return self.tweet['id']
def get_raw_text(self):
return self.tweet['text']
def _process_text(self):
text = strip_html(self.tweet['text'])
user_mentions = self.tweet['entities']['user_mentions']
text = text.replace('@', 'at ')
for user in user_mentions:
text = text.replace(user['screen_name'], user['name'])
return text
def get_screen_name(self):
return self.tweet['user']['screen_name']
def get_user_name(self):
return self.tweet['user']['name']
def read_out(self, index):
text = self._process_text()
return "tweet number {num} by {user} : {text} ,".format(num=index+1,
user=self.get_user_name(),
text = text)
def detailed_description(self):
response_builder = ["This tweet was posted by {user_name} whose twitter handle is {screen_name} the account description reads: {description}."
.format(screen_name=self.tweet['user']['screen_name'],
user_name=self.tweet['user']['name'],
description=self.tweet['user']['description'])]
if self.tweet['retweeted']:
response_builder += ["It's been retweeted {} times.".format(self.tweet['retweet_count'])]
if self.tweet['favorited']:
response_builder += ["{} people have favorited it.".format(self.tweet['favorites_count'])]
if self.tweet["in_reply_to_screen_name"]:
response_builder += ["it was posted in response to user {}.".format(self.tweet['in_reply_to_screen_name'])]
response_builder += ["the text of the tweet is, {}.".format(self._process_text())]
return " ".join(response_builder)
def user_mentions(self):
return self.tweet['user_mentions']
def get_cached_access_pair(uid):
if uid in local_cache.users():
access_token = local_cache.get_user_state(uid)['access_token']
access_secret = local_cache.get_user_state(uid)['access_secret']
return access_token, access_secret
else:
raise ValueError
def get_request_token(callback_url=None):
url = "https://api.twitter.com/oauth/request_token"
consumer_key, consumer_secret = local_cache.get_server_state()['twitter_keys']
auth = OAuth1(consumer_key, consumer_secret)
params = { "oauth_callback" : callback_url }
r = requests.post(url, auth=auth, params=params)
response_obj = parse_qs(r.text)
local_cache.update_server_state({ "request_token" : response_obj['oauth_token'][0],
"request_secret": response_obj['oauth_token_secret'][0] })
return response_obj['oauth_token_secret'], response_obj['oauth_token']
def authenticate_user_page(callback_url="", metadata=None):
url = "https://api.twitter.com/oauth/authenticate"
oauth_secret, oauth_token = get_request_token(callback_url)
local_cache.update_server_state({'metadata' : metadata })
params = { "force_login" : True,
"oauth_token": oauth_token }
r = requests.get(url, params=params)
return r.text
def post_tweet(user_id, message, additional_params={}):
"""
Helper function to post a tweet
"""
url = "https://api.twitter.com/1.1/statuses/update.json"
params = { "status" : message }
params.update(additional_params)
r = make_twitter_request(url, user_id, params, request_type='POST')
print (r.text)
return "Successfully posted a tweet {}".format(message)
def get_access_token(oauth_token, oauth_verifier):
url = "https://api.twitter.com/oauth/access_token"
params = {"oauth_verifier" : oauth_verifier}
server_state = local_cache.get_server_state()
request_token = server_state['request_token']
request_secret = server_state['request_secret']
consumer_key, consumer_secret = server_state['twitter_keys']
auth = OAuth1(consumer_key, consumer_secret, request_token, request_secret)
r = requests.post(url, params = params, auth=auth)
response_obj = parse_qs(r.text)
uid = response_obj['oauth_token'][0]
print ("Access token", uid)
local_cache.set_user_state(user_id = uid,
state = { "access_token" : response_obj['oauth_token'][0],
"access_secret" : response_obj['oauth_token_secret'][0],
'twitter_user_id': response_obj['user_id'][0],
'screen_name' : response_obj ['screen_name'][0]
})
local_cache.serialize()
fragments = {
"state" : local_cache.get_server_state()['metadata']['state'],
"access_token" : uid,
"token_type" : "Bearer"
}
return urlencode(fragments)
def get_twitter_auth(user_id):
consumer_key, consumer_secret = local_cache.get_server_state()['twitter_keys']
access_token, access_secret = get_cached_access_pair(user_id)
return OAuth1(consumer_key, consumer_secret, access_token, access_secret)
def process_tweets(tweet_list):
""" Clean tweets and enumerate, preserving only things that we are interested in """
return [Tweet(tweet) for tweet in tweet_list]
def make_twitter_request(url, user_id, params={}, request_type='GET'):
""" Generically make a request to twitter API using a particular user's authorization """
if request_type == "GET":
return requests.get(url, auth=get_twitter_auth(user_id), params=params)
elif request_type == "POST":
return requests.post(url, auth=get_twitter_auth(user_id), params=params)
def get_user_twitter_details(user_id, params={}):
url = "https://api.twitter.com/1.1/users/lookup.json"
user_cache = local_cache.get_user_state(user_id)
params.update({"user_id": user_cache['twitter_user_id'] })
response = make_twitter_request(url, user_id, params)
return response.json()
def geo_search(user_id, search_location):
"""
Search for a location - free form
"""
url = "https://api.twitter.com/1.1/geo/search.json"
params = {"query" : search_location }
response = make_twitter_request(url, user_id, params).json()
return response
def closest_trend_search(user_id, params={}):
#url = "https://api.twitter.com/1.1/trends/place.json"
url = "https://api.twitter.com/1.1/trends/closest.json"
response = make_twitter_request(url, user_id, params).json()
return response
def list_trends(user_id, woe_id):
url = "https://api.twitter.com/1.1/trends/place.json"
params = { "id" : woe_id }
response = make_twitter_request(url, user_id, params).json()
return response
def read_out_tweets(processed_tweets, speech_convertor=None):
"""
Input - list of processed 'Tweets'
output - list of spoken responses
"""
return ["tweet number {num} by {user}. {text}.".format(num=index+1, user=user, text=text)
for index, (user, text) in enumerate(processed_tweets)]
def request_tweet_list(url, user_id, params={}):
return process_tweets(make_twitter_request(url, user_id).json())
def get_home_tweets(user_id, input_params={}):
url = "https://api.twitter.com/1.1/statuses/home_timeline.json"
print ("Trying to get home tweets")
response = request_tweet_list(url, user_id)
return response
def get_retweets_of_me(user_id, input_params={}):
""" returns recently retweeted tweets """
url = "https://api.twitter.com/1.1/statuses/retweets_of_me.json"
print ("trying to get retweets")
return request_tweet_list(url, user_id)
def get_my_favourite_tweets(user_id, input_params = {}):
""" Returns a user's favourite tweets """
url = "https://api.twitter.com/1.1/favorites/list.json"
return request_tweet_list(url, user_id)
def get_user_latest_tweets(user_id, params={}):
url = "https://api.twitter.com/1.1/statuses/user_timeline.json?"
return request_tweet_list(url, user_id, params)
def get_latest_twitter_mentions(user_id):
url = "https://api.twitter.com/1.1/statuses/mentions_timeline.json"
return request_tweet_list(url, user_id)
def search_for_tweets_about(user_id, params):
""" Search twitter API """
url = "https://api.twitter.com/1.1/search/tweets.json"
response = make_twitter_request(url, user_id, params)
return process_tweets(response.json()["statuses"]) | en | 0.702653 | # For readable serializations Generic class for encapsulating twitter credential caching #Unique identifier for the backup of this cache # Try to load as much user data as possible # Serialize Server: # [1, current(2), 3] get_prev(offeset=3) # pos :=> -2, offset :=> 3-2 = 1, pos :=> 0, then read 0 to 1 #Local cache caches tokens for different users Get rid of ugly twitter html Helper function to post a tweet Clean tweets and enumerate, preserving only things that we are interested in Generically make a request to twitter API using a particular user's authorization Search for a location - free form #url = "https://api.twitter.com/1.1/trends/place.json" Input - list of processed 'Tweets' output - list of spoken responses returns recently retweeted tweets Returns a user's favourite tweets Search twitter API | 2.431614 | 2 |
sundry/serializable.py | jamesabel/sundry | 2 | 9508 | <reponame>jamesabel/sundry
import json
from enum import Enum
from decimal import Decimal
def convert_serializable_special_cases(o):
"""
Convert an object to a type that is fairly generally serializable (e.g. json serializable).
This only handles the cases that need converting. The json module handles all the rest.
For JSON, with json.dump or json.dumps with argument default=convert_serializable.
Example:
json.dumps(my_animal, indent=4, default=_convert_serializable)
:param o: object to be converted to a type that is serializable
:return: a serializable representation
"""
if isinstance(o, Enum):
serializable_representation = o.value
elif isinstance(o, Decimal):
# decimal.Decimal (e.g. in AWS DynamoDB), both integer and floating point
if o % 1 == 0:
# if representable with an integer, use an integer
serializable_representation = int(o)
else:
# not representable with an integer so use a float
serializable_representation = float(o)
else:
raise NotImplementedError(f"can not serialize {o} since type={type(o)}")
return serializable_representation
def make_serializable(o):
# Convert an object to a type that is fairly generally serializable (e.g. json serializable).
return json.loads(json.dumps(o, default=convert_serializable_special_cases, sort_keys=True))
| import json
from enum import Enum
from decimal import Decimal
def convert_serializable_special_cases(o):
"""
Convert an object to a type that is fairly generally serializable (e.g. json serializable).
This only handles the cases that need converting. The json module handles all the rest.
For JSON, with json.dump or json.dumps with argument default=convert_serializable.
Example:
json.dumps(my_animal, indent=4, default=_convert_serializable)
:param o: object to be converted to a type that is serializable
:return: a serializable representation
"""
if isinstance(o, Enum):
serializable_representation = o.value
elif isinstance(o, Decimal):
# decimal.Decimal (e.g. in AWS DynamoDB), both integer and floating point
if o % 1 == 0:
# if representable with an integer, use an integer
serializable_representation = int(o)
else:
# not representable with an integer so use a float
serializable_representation = float(o)
else:
raise NotImplementedError(f"can not serialize {o} since type={type(o)}")
return serializable_representation
def make_serializable(o):
# Convert an object to a type that is fairly generally serializable (e.g. json serializable).
return json.loads(json.dumps(o, default=convert_serializable_special_cases, sort_keys=True)) | en | 0.580736 | Convert an object to a type that is fairly generally serializable (e.g. json serializable). This only handles the cases that need converting. The json module handles all the rest. For JSON, with json.dump or json.dumps with argument default=convert_serializable. Example: json.dumps(my_animal, indent=4, default=_convert_serializable) :param o: object to be converted to a type that is serializable :return: a serializable representation # decimal.Decimal (e.g. in AWS DynamoDB), both integer and floating point # if representable with an integer, use an integer # not representable with an integer so use a float # Convert an object to a type that is fairly generally serializable (e.g. json serializable). | 3.60143 | 4 |
legacy/neural_qa/train.py | FrancisLiang/models-1 | 4 | 9509 | import sys
import os
import argparse
import numpy as np
import paddle.v2 as paddle
import reader
import utils
import network
import config
from utils import logger
def save_model(trainer, model_save_dir, parameters, pass_id):
f = os.path.join(model_save_dir, "params_pass_%05d.tar.gz" % pass_id)
logger.info("model saved to %s" % f)
with utils.open_file(f, "w") as f:
trainer.save_parameter_to_tar(f)
def show_parameter_init_info(parameters):
"""
Print the information of initialization mean and standard deviation of
parameters
:param parameters: the parameters created in a model
"""
logger.info("Parameter init info:")
for p in parameters:
p_val = parameters.get(p)
logger.info(("%-25s : initial_mean=%-7.4f initial_std=%-7.4f "
"actual_mean=%-7.4f actual_std=%-7.4f dims=%s") %
(p, parameters.__param_conf__[p].initial_mean,
parameters.__param_conf__[p].initial_std, p_val.mean(),
p_val.std(), parameters.__param_conf__[p].dims))
logger.info("\n")
def show_parameter_status(parameters):
"""
Print some statistical information of parameters in a network
:param parameters: the parameters created in a model
"""
for p in parameters:
abs_val = np.abs(parameters.get(p))
abs_grad = np.abs(parameters.get_grad(p))
logger.info(
("%-25s avg_abs_val=%-10.6f max_val=%-10.6f avg_abs_grad=%-10.6f "
"max_grad=%-10.6f min_val=%-10.6f min_grad=%-10.6f") %
(p, abs_val.mean(), abs_val.max(), abs_grad.mean(), abs_grad.max(),
abs_val.min(), abs_grad.min()))
def train(conf):
if not os.path.exists(conf.model_save_dir):
os.makedirs(conf.model_save_dir, mode=0755)
settings = reader.Settings(
vocab=conf.vocab,
is_training=True,
label_schema=conf.label_schema,
negative_sample_ratio=conf.negative_sample_ratio,
hit_ans_negative_sample_ratio=conf.hit_ans_negative_sample_ratio,
keep_first_b=conf.keep_first_b,
seed=conf.seed)
samples_per_pass = conf.batch_size * conf.batches_per_pass
train_reader = paddle.batch(
paddle.reader.buffered(
reader.create_reader(conf.train_data_path, settings,
samples_per_pass),
size=samples_per_pass),
batch_size=conf.batch_size)
# TODO(lipeng17) v2 API does not support parallel_nn yet. Therefore, we can
# only use CPU currently
paddle.init(
use_gpu=conf.use_gpu,
trainer_count=conf.trainer_count,
seed=conf.paddle_seed)
# network config
cost = network.training_net(conf)
# create parameters
# NOTE: parameter values are not initilized here, therefore, we need to
# print parameter initialization info in the beginning of the first batch
parameters = paddle.parameters.create(cost)
# create optimizer
rmsprop_optimizer = paddle.optimizer.RMSProp(
learning_rate=conf.learning_rate,
rho=conf.rho,
epsilon=conf.epsilon,
model_average=paddle.optimizer.ModelAverage(
average_window=conf.average_window,
max_average_window=conf.max_average_window))
# create trainer
trainer = paddle.trainer.SGD(cost=cost,
parameters=parameters,
update_equation=rmsprop_optimizer)
# begin training network
def _event_handler(event):
"""
Define end batch and end pass event handler
"""
if isinstance(event, paddle.event.EndIteration):
sys.stderr.write(".")
batch_num = event.batch_id + 1
total_batch = conf.batches_per_pass * event.pass_id + batch_num
if batch_num % conf.log_period == 0:
sys.stderr.write("\n")
logger.info("Total batch=%d Batch=%d CurrentCost=%f Eval: %s" \
% (total_batch, batch_num, event.cost, event.metrics))
if batch_num % conf.show_parameter_status_period == 0:
show_parameter_status(parameters)
elif isinstance(event, paddle.event.EndPass):
save_model(trainer, conf.model_save_dir, parameters, event.pass_id)
elif isinstance(event, paddle.event.BeginIteration):
if event.batch_id == 0 and event.pass_id == 0:
show_parameter_init_info(parameters)
## for debugging purpose
#with utils.open_file("config", "w") as config:
# print >> config, paddle.layer.parse_network(cost)
trainer.train(
reader=train_reader,
event_handler=_event_handler,
feeding=network.feeding,
num_passes=conf.num_passes)
logger.info("Training has finished.")
def main():
conf = config.TrainingConfig()
logger.info("loading word embeddings...")
conf.vocab, conf.wordvecs = utils.load_wordvecs(conf.word_dict_path,
conf.wordvecs_path)
logger.info("loaded")
logger.info("length of word dictionary is : %d." % len(conf.vocab))
train(conf)
if __name__ == "__main__":
main()
| import sys
import os
import argparse
import numpy as np
import paddle.v2 as paddle
import reader
import utils
import network
import config
from utils import logger
def save_model(trainer, model_save_dir, parameters, pass_id):
f = os.path.join(model_save_dir, "params_pass_%05d.tar.gz" % pass_id)
logger.info("model saved to %s" % f)
with utils.open_file(f, "w") as f:
trainer.save_parameter_to_tar(f)
def show_parameter_init_info(parameters):
"""
Print the information of initialization mean and standard deviation of
parameters
:param parameters: the parameters created in a model
"""
logger.info("Parameter init info:")
for p in parameters:
p_val = parameters.get(p)
logger.info(("%-25s : initial_mean=%-7.4f initial_std=%-7.4f "
"actual_mean=%-7.4f actual_std=%-7.4f dims=%s") %
(p, parameters.__param_conf__[p].initial_mean,
parameters.__param_conf__[p].initial_std, p_val.mean(),
p_val.std(), parameters.__param_conf__[p].dims))
logger.info("\n")
def show_parameter_status(parameters):
"""
Print some statistical information of parameters in a network
:param parameters: the parameters created in a model
"""
for p in parameters:
abs_val = np.abs(parameters.get(p))
abs_grad = np.abs(parameters.get_grad(p))
logger.info(
("%-25s avg_abs_val=%-10.6f max_val=%-10.6f avg_abs_grad=%-10.6f "
"max_grad=%-10.6f min_val=%-10.6f min_grad=%-10.6f") %
(p, abs_val.mean(), abs_val.max(), abs_grad.mean(), abs_grad.max(),
abs_val.min(), abs_grad.min()))
def train(conf):
if not os.path.exists(conf.model_save_dir):
os.makedirs(conf.model_save_dir, mode=0755)
settings = reader.Settings(
vocab=conf.vocab,
is_training=True,
label_schema=conf.label_schema,
negative_sample_ratio=conf.negative_sample_ratio,
hit_ans_negative_sample_ratio=conf.hit_ans_negative_sample_ratio,
keep_first_b=conf.keep_first_b,
seed=conf.seed)
samples_per_pass = conf.batch_size * conf.batches_per_pass
train_reader = paddle.batch(
paddle.reader.buffered(
reader.create_reader(conf.train_data_path, settings,
samples_per_pass),
size=samples_per_pass),
batch_size=conf.batch_size)
# TODO(lipeng17) v2 API does not support parallel_nn yet. Therefore, we can
# only use CPU currently
paddle.init(
use_gpu=conf.use_gpu,
trainer_count=conf.trainer_count,
seed=conf.paddle_seed)
# network config
cost = network.training_net(conf)
# create parameters
# NOTE: parameter values are not initilized here, therefore, we need to
# print parameter initialization info in the beginning of the first batch
parameters = paddle.parameters.create(cost)
# create optimizer
rmsprop_optimizer = paddle.optimizer.RMSProp(
learning_rate=conf.learning_rate,
rho=conf.rho,
epsilon=conf.epsilon,
model_average=paddle.optimizer.ModelAverage(
average_window=conf.average_window,
max_average_window=conf.max_average_window))
# create trainer
trainer = paddle.trainer.SGD(cost=cost,
parameters=parameters,
update_equation=rmsprop_optimizer)
# begin training network
def _event_handler(event):
"""
Define end batch and end pass event handler
"""
if isinstance(event, paddle.event.EndIteration):
sys.stderr.write(".")
batch_num = event.batch_id + 1
total_batch = conf.batches_per_pass * event.pass_id + batch_num
if batch_num % conf.log_period == 0:
sys.stderr.write("\n")
logger.info("Total batch=%d Batch=%d CurrentCost=%f Eval: %s" \
% (total_batch, batch_num, event.cost, event.metrics))
if batch_num % conf.show_parameter_status_period == 0:
show_parameter_status(parameters)
elif isinstance(event, paddle.event.EndPass):
save_model(trainer, conf.model_save_dir, parameters, event.pass_id)
elif isinstance(event, paddle.event.BeginIteration):
if event.batch_id == 0 and event.pass_id == 0:
show_parameter_init_info(parameters)
## for debugging purpose
#with utils.open_file("config", "w") as config:
# print >> config, paddle.layer.parse_network(cost)
trainer.train(
reader=train_reader,
event_handler=_event_handler,
feeding=network.feeding,
num_passes=conf.num_passes)
logger.info("Training has finished.")
def main():
conf = config.TrainingConfig()
logger.info("loading word embeddings...")
conf.vocab, conf.wordvecs = utils.load_wordvecs(conf.word_dict_path,
conf.wordvecs_path)
logger.info("loaded")
logger.info("length of word dictionary is : %d." % len(conf.vocab))
train(conf)
if __name__ == "__main__":
main()
| en | 0.499496 | Print the information of initialization mean and standard deviation of parameters :param parameters: the parameters created in a model Print some statistical information of parameters in a network :param parameters: the parameters created in a model # TODO(lipeng17) v2 API does not support parallel_nn yet. Therefore, we can # only use CPU currently # network config # create parameters # NOTE: parameter values are not initilized here, therefore, we need to # print parameter initialization info in the beginning of the first batch # create optimizer # create trainer # begin training network Define end batch and end pass event handler ## for debugging purpose #with utils.open_file("config", "w") as config: # print >> config, paddle.layer.parse_network(cost) | 2.486817 | 2 |
yggdrasil/drivers/MatlabModelDriver.py | astro-friedel/yggdrasil | 0 | 9510 | <reponame>astro-friedel/yggdrasil<gh_stars>0
import subprocess
import uuid as uuid_gen
import logging
from datetime import datetime
import os
import psutil
import warnings
import weakref
from yggdrasil import backwards, tools, platform, serialize
from yggdrasil.languages import get_language_dir
from yggdrasil.config import ygg_cfg
from yggdrasil.drivers.InterpretedModelDriver import InterpretedModelDriver
from yggdrasil.tools import TimeOut, sleep
logger = logging.getLogger(__name__)
try: # pragma: matlab
disable_engine = ygg_cfg.get('matlab', 'disable_engine', 'False').lower()
if platform._is_win or (disable_engine == 'true'):
_matlab_engine_installed = False
if not tools.is_subprocess():
logger.debug("matlab.engine disabled")
else:
import matlab.engine
_matlab_engine_installed = True
except ImportError: # pragma: no matlab
logger.debug("Could not import matlab.engine. "
+ "Matlab support for using a sharedEngine will be disabled.")
_matlab_engine_installed = False
_top_lang_dir = get_language_dir('matlab')
_compat_map = {
'R2015b': ['2.7', '3.3', '3.4'],
'R2017a': ['2.7', '3.3', '3.4', '3.5'],
'R2017b': ['2.7', '3.3', '3.4', '3.5', '3.6'],
'R2018b': ['2.7', '3.3', '3.4', '3.5', '3.6']}
def kill_all():
r"""Kill all Matlab shared engines."""
if platform._is_win: # pragma: windows
os.system(('taskkill /F /IM matlab.engine.shareEngine /T'))
else:
os.system(('pkill -f matlab.engine.shareEngine'))
def locate_matlab_engine_processes(): # pragma: matlab
r"""Get all of the active matlab sharedEngine processes.
Returns:
list: Active matlab sharedEngine processes.
"""
out = []
for p in psutil.process_iter():
p.info = p.as_dict(attrs=['name', 'pid', 'cmdline'])
if (((p.info['name'] == 'MATLAB')
and ('matlab.engine.shareEngine' in p.info['cmdline']))):
out.append(p) # p.info['pid'])
return out
def is_matlab_running():
r"""Determine if there is a Matlab engine running.
Returns:
bool: True if there is a Matlab engine running, False otherwise.
"""
if not _matlab_engine_installed: # pragma: no matlab
out = False
else: # pragma: matlab
out = (len(matlab.engine.find_matlab()) != 0)
return out
def locate_matlabroot(): # pragma: matlab
r"""Find directory that servers as matlab root.
Returns:
str: Full path to matlabroot directory.
"""
return MatlabModelDriver.get_matlab_info()[0]
def install_matlab_engine(): # pragma: matlab
r"""Install the MATLAB engine API for Python."""
if not _matlab_engine_installed:
mtl_root = locate_matlabroot()
mtl_setup = os.path.join(mtl_root, 'extern', 'engines', 'python')
cmd = 'python setup.py install'
result = subprocess.check_output(cmd, cwd=mtl_setup)
print(result)
def start_matlab_engine(skip_connect=False, timeout=None): # pragma: matlab
r"""Start a Matlab shared engine session inside a detached screen
session.
Args:
skip_connect (bool, optional): If True, the engine is not connected.
Defaults to False.
timeout (int, optional): Time (in seconds) that should be waited for
Matlab to start up. Defaults to None and is set from the config
option ('matlab', 'startup_waittime_s').
Returns:
tuple: Information on the started session including the name of the
screen session running matlab, the created engine object, the name
of the matlab session, and the matlab engine process.
Raises:
RuntimeError: If Matlab is not installed.
"""
if not _matlab_engine_installed: # pragma: no matlab
raise RuntimeError("Matlab engine is not installed.")
if timeout is None:
timeout = float(ygg_cfg.get('matlab', 'startup_waittime_s', 10))
old_process = set(locate_matlab_engine_processes())
old_matlab = set(matlab.engine.find_matlab())
screen_session = str('ygg_matlab' + datetime.today().strftime("%Y%j%H%M%S")
+ '_%d' % len(old_matlab))
try:
args = ['screen', '-dmS', screen_session, '-c',
os.path.join(_top_lang_dir, 'matlab_screenrc'),
'matlab', '-nodisplay', '-nosplash', '-nodesktop', '-nojvm',
'-r', '"matlab.engine.shareEngine"']
subprocess.call(' '.join(args), shell=True)
T = TimeOut(timeout)
while ((len(set(matlab.engine.find_matlab()) - old_matlab) == 0)
and not T.is_out):
logger.debug('Waiting for matlab engine to start')
sleep(1) # Usually 3 seconds
except KeyboardInterrupt: # pragma: debug
args = ['screen', '-X', '-S', screen_session, 'quit']
subprocess.call(' '.join(args), shell=True)
raise
if (len(set(matlab.engine.find_matlab()) - old_matlab) == 0): # pragma: debug
raise Exception("start_matlab timed out at %f s" % T.elapsed)
new_matlab = list(set(matlab.engine.find_matlab()) - old_matlab)[0]
new_process = list(set(locate_matlab_engine_processes()) - old_process)[0]
# Connect to the engine
matlab_engine = None
if not skip_connect:
matlab_engine = connect_matlab_engine(new_matlab, first_connect=True)
return screen_session, matlab_engine, new_matlab, new_process
def connect_matlab_engine(matlab_session, first_connect=False): # pragma: matlab
r"""Connect to Matlab engine.
Args:
matlab_session (str): Name of the Matlab session that should be
connected to.
first_connect (bool, optional): If True, this is the first time
Python is connecting to the Matlab shared engine and certain
environment variables should be set. Defaults to False.
Returns:
MatlabEngine: Matlab engine that was connected.
"""
matlab_engine = matlab.engine.connect_matlab(matlab_session)
matlab_engine.eval('clear classes;', nargout=0)
err = backwards.StringIO()
try:
matlab_engine.eval("YggInterface('YGG_MSG_MAX');", nargout=0,
stderr=err)
except BaseException:
for x in MatlabModelDriver.paths_to_add:
matlab_engine.addpath(x, nargout=0)
matlab_engine.eval("os = py.importlib.import_module('os');", nargout=0)
if not first_connect:
if backwards.PY2:
matlab_engine.eval("py.reload(os);", nargout=0)
else:
# matlab_engine.eval("py.importlib.reload(os);", nargout=0)
pass
return matlab_engine
def stop_matlab_engine(screen_session, matlab_engine, matlab_session,
matlab_process, keep_engine=False): # pragma: matlab
r"""Stop a Matlab shared engine session running inside a detached screen
session.
Args:
screen_session (str): Name of the screen session that the shared
Matlab session was started in.
matlab_engine (MatlabEngine): Matlab engine that should be stopped.
matlab_session (str): Name of Matlab session that the Matlab engine is
connected to.
matlab_process (psutil.Process): Process running the Matlab shared engine.
keep_engine (bool, optional): If True, the references to the engine will be
removed so it is not deleted. Defaults to False.
Raises:
RuntimeError: If Matlab is not installed.
"""
if not _matlab_engine_installed: # pragma: no matlab
raise RuntimeError("Matlab engine is not installed.")
if keep_engine and (matlab_engine is not None):
if '_matlab' in matlab_engine.__dict__:
matlab_engine.quit()
return
# Remove weakrefs to engine to prevent stopping engine more than once
if matlab_engine is not None:
# Remove weak references so engine not deleted on exit
eng_ref = weakref.getweakrefs(matlab_engine)
for x in eng_ref:
if x in matlab.engine._engines:
matlab.engine._engines.remove(x)
# Either exit the engine or remove its reference
if matlab_session in matlab.engine.find_matlab():
try:
matlab_engine.eval('exit', nargout=0)
except BaseException:
pass
else: # pragma: no cover
matlab_engine.__dict__.pop('_matlab', None)
# Stop the screen session containing the Matlab shared session
if screen_session is not None:
if matlab_session in matlab.engine.find_matlab():
os.system(('screen -X -S %s quit') % screen_session)
T = TimeOut(5)
while ((matlab_session in matlab.engine.find_matlab())
and not T.is_out):
logger.debug("Waiting for matlab engine to exit")
sleep(1)
if (matlab_session in matlab.engine.find_matlab()): # pragma: debug
if matlab_process is not None:
matlab_process.terminate()
logger.error("stop_matlab_engine timed out at %f s. " % T.elapsed
+ "Killed Matlab sharedEngine process.")
class MatlabProcess(tools.YggClass): # pragma: matlab
r"""Add features to mimic subprocess.Popen while running Matlab function
asynchronously.
Args:
target (func): Matlab function that should be called.
args (list, tuple): Arguments that should be passed to target.
kwargs (dict, optional): Keyword arguments that should be passed to
target. Defaults to empty dict.
name (str, optional): A name for the process. Generated if not provided.
matlab_engine (MatlabEngine, optional): MatlabEngine that should be used
to get errors. Defaults to None and errors will not be recovered
unless passed through stdout and stderr before shutdown.
Attributes:
stdout (StringIO): File like string buffer that stdout from target will
be written to.
stderr (StringIO): File like string buffer that stderr from target will
be written to.
target (func): Matlab function that should be called.
args (list, tuple): Arguments that should be passed to target.
kwargs (dict): Keyword arguments that should be passed to target.
future (MatlabFutureResult): Future result from async function. This
will be None until start is called.
matlab_engine (MatlabEngine): MatlabEngine that should be used to get
errors.
Raises:
RuntimeError: If Matlab is not installed.
"""
def __init__(self, target, args, kwargs=None, name=None, matlab_engine=None):
if not _matlab_engine_installed: # pragma: no matlab
raise RuntimeError("Matlab engine is not installed.")
if kwargs is None:
kwargs = {}
self.stdout = backwards.sio.StringIO()
self.stderr = backwards.sio.StringIO()
self._stdout_line = None
self._stderr_line = None
self.target = target
self.args = args
self.kwargs = kwargs
self.kwargs.update(nargout=0, stdout=self.stdout, stderr=self.stderr)
self.kwargs['async'] = True # For python 3.7 where async is reserved
self.future = None
self.matlab_engine = matlab_engine
self._returncode = None
super(MatlabProcess, self).__init__(name)
def poll(self, *args, **kwargs):
r"""Fake poll."""
return self.returncode
@property
def stdout_line(self):
r"""str: Output to stdout from function call."""
if self._stdout_line is None:
if self.stdout is not None:
line = self.stdout.getvalue()
if line:
self._stdout_line = line
return self._stdout_line
@property
def stderr_line(self):
r"""str: Output to stderr from function call."""
if self._stderr_line is None:
if self.stderr is not None:
line = self.stderr.getvalue()
if line:
self._stderr_line = line
return self._stderr_line
def print_output(self):
r"""Print output from stdout and stderr."""
if self.stdout_line:
self.print_encoded(self.stdout_line, end="")
if self.stderr_line:
self.print_encoded(self.stderr_line, end="")
def start(self):
r"""Start asychronous call."""
self.future = self.target(*self.args, **self.kwargs)
def is_started(self):
r"""bool: Has start been called."""
return (self.future is not None)
def is_cancelled(self):
r"""bool: Was the async call cancelled or not."""
if self.is_started():
try:
return self.future.cancelled()
except matlab.engine.EngineError:
self.on_matlab_error()
return True
except BaseException:
return True
return False
def is_done(self):
r"""bool: Is the async call still running."""
if self.is_started():
try:
return self.future.done() or self.is_cancelled()
except matlab.engine.EngineError:
self.on_matlab_error()
return True
except BaseException:
return True
return False
def is_alive(self):
r"""bool: Is the async call funning."""
if self.is_started():
return (not self.is_done())
return False
@property
def returncode(self):
r"""int: Return code."""
if self.is_done():
if self.stderr_line: # or self.is_cancelled():
return -1
else:
return 0
else:
return self._returncode
def kill(self, *args, **kwargs):
r"""Cancel the async call."""
if self.is_alive():
try:
out = self.future.cancel()
self.debug("Result of cancelling Matlab call?: %s", out)
except matlab.engine.EngineError as e:
self.debug('Matlab Engine Error: %s' % e)
self.on_matlab_error()
except BaseException as e:
self.debug('Other error on kill: %s' % e)
self.print_output()
if self.is_alive():
self.info('Error killing Matlab script.')
self.matlab_engine.quit()
self.future = None
self._returncode = -1
assert(not self.is_alive())
def on_matlab_error(self):
r"""Actions performed on error in Matlab engine."""
# self.print_output()
self.debug('')
if self.matlab_engine is not None:
try:
self.matlab_engine.eval('exception = MException.last;', nargout=0)
self.matlab_engine.eval('getReport(exception)')
except matlab.engine.EngineError:
pass
class MatlabModelDriver(InterpretedModelDriver): # pragma: matlab
r"""Base class for running Matlab models.
Args:
name (str): Driver name.
args (str or list): Argument(s) for running the model in matlab.
Generally, this should be the full path to a Matlab script.
**kwargs: Additional keyword arguments are passed to parent class's
__init__ method.
Attributes:
started_matlab (bool): True if the driver had to start a new matlab
engine. False otherwise.
screen_session (str): Screen session that Matlab was started in.
mlengine (object): Matlab engine used to run script.
mlsession (str): Name of the Matlab session that was started.
Raises:
RuntimeError: If Matlab is not installed.
.. note:: Matlab models that call exit will shut down the shared engine.
"""
_schema_subtype_description = ('Model is written in Matlab.')
language = 'matlab'
language_ext = '.m'
base_languages = ['python']
default_interpreter_flags = ['-nodisplay', '-nosplash', '-nodesktop',
'-nojvm', '-batch']
version_flags = ["fprintf('R%s', version('-release')); exit();"]
path_env_variable = 'MATLABPATH'
comm_linger = (os.environ.get('YGG_MATLAB_ENGINE', '').lower() == 'true')
send_converters = {'pandas': serialize.consolidate_array,
'table': serialize.consolidate_array}
recv_converters = {'pandas': 'array'}
type_map = {
'int': 'intX',
'float': 'single, double',
'string': 'char',
'array': 'cell',
'object': 'containers.Map',
'boolean': 'logical',
'null': 'NaN',
'uint': 'uintX',
'complex': 'complex',
'bytes': 'char (utf-8)',
'unicode': 'char',
'1darray': 'mat',
'ndarray': 'mat',
'ply': 'containers.Map',
'obj': 'containers.Map',
'schema': 'containers.Map'}
function_param = {
'input': '{channel} = YggInterface(\'YggInput\', \'{channel_name}\');',
'output': '{channel} = YggInterface(\'YggOutput\', \'{channel_name}\');',
'recv': '[{flag_var}, {recv_var}] = {channel}.recv();',
'send': '{flag_var} = {channel}.send({send_var});',
'function_call': '{output_var} = {function_name}({input_var});',
'define': '{variable} = {value};',
'comment': '%',
'true': 'true',
'not': 'not',
'indent': 2 * ' ',
'quote': '\'',
'print': 'disp(\'{message}\');',
'fprintf': 'fprintf(\'{message}\', {variables});',
'error': 'error(\'{error_msg}\');',
'block_end': 'end;',
'if_begin': 'if ({cond})',
'for_begin': 'for {iter_var} = {iter_begin}:{iter_end}',
'while_begin': 'while ({cond})',
'break': 'break;',
'try_begin': 'try',
'try_except': 'catch {error_var}',
'assign': '{name} = {value};'}
def __init__(self, name, args, **kwargs):
self.using_matlab_engine = _matlab_engine_installed
if self.using_matlab_engine:
kwargs['skip_interpreter'] = True
self.model_wrapper = None
super(MatlabModelDriver, self).__init__(name, args, **kwargs)
self.started_matlab = False
self.screen_session = None
self.mlengine = None
self.mlsession = None
self.mlprocess = None
def parse_arguments(self, args):
r"""Sort model arguments to determine which one is the executable
and which ones are arguments.
Args:
args (list): List of arguments provided.
"""
super(MatlabModelDriver, self).parse_arguments(args)
model_base, model_ext = os.path.splitext(os.path.basename(self.model_file))
wrap_base = 'wrapped_%s_%s' % (model_base, self.uuid.replace('-', '_'))
# Matlab has a variable name limit of 62
wrap_base = wrap_base[:min(len(wrap_base), 60)]
self.model_wrapper = os.path.join(self.model_dir, wrap_base + model_ext)
self.wrapper_products.append(self.model_wrapper)
@classmethod
def write_error_wrapper(cls, fname, try_lines, matlab_engine=None):
r"""Write a wrapper for the model that encloses it in a try except so
that the error can be propagated appropriately.
Args:
fname (str): File where the wrapper should be written.
try_lines (list): List of lines to go in the try block.
model_file (str): Path to model that should be wrapped.
matlab_engine (MatlabEngine, optional): Matlab engine that will be
used to call the wrapper. If not provided, it is assumed the
error will be called using the Matlab interpreter on the command
line. Defautls to None.
Raises:
"""
# Create lines based on use of engine or not
if matlab_engine is not None:
catch_block = ["error(e.message);"]
else:
catch_block = ["rethrow(e);"]
# catch_block = ["fprintf('MATLAB ERROR:\\n%s\\n', e.message);",
# "disp(e.identifier);",
# "disp(e.stack);",
# "exit(0);"]
lines = cls.write_try_except(try_lines, catch_block)
if matlab_engine is None:
lines.append("exit(0);")
# Write lines
logger.debug('Wrapper:\n\t%s', '\n\t'.join(lines))
if fname is None:
return lines
else:
if os.path.isfile(fname): # pragma: debug
os.remove(fname)
with open(fname, 'w') as fd:
fd.write('\n'.join(lines))
logger.debug("Wrote wrapper to: %s" % fname)
@classmethod
def run_executable(cls, args, dont_wrap_error=False, fname_wrapper=None,
matlab_engine=None, **kwargs):
r"""Run a program using the executable for this language and the
provided arguments.
Args:
args (list): The program that should be run and any arguments
that should be provided to it.
dont_wrap_error (bool, optional): If False, the executable will be
wrapped in a try/catch block to prevent errors from stopping
Matlab shutdown. If True, the command will be executed as is
with the Matlab interpreter. Defaults to False.
fname_wrapper (str, optional): File where wrapper should be saved.
If not provided, one is created. Defaults to None.
matlab_engine (MatlabEngine, optional): Matlab engine that should be
used to run the command. If not provided, the Matlab interpreter
is used instead. Defaults to None.
**kwargs: Additional keyword arguments are passed to
cls.executable_command and tools.popen_nobuffer.
Returns:
str: Output to stdout from the run command.
Raises:
RuntimeError: If the language is not installed.
RuntimeError: If there is an error when running the command.
"""
# Strip file if first argument is a file
if os.path.isfile(args[0]):
kwargs.setdefault('working_dir', os.path.dirname(args[0]))
args = [os.path.splitext(os.path.basename(args[0]))[0]] + args[1:]
# Write wrapper
if (not dont_wrap_error) and (len(args) > 0):
if len(args) == 1:
# TODO: Will this work if there is a function defined in the
# script?
try_block = [args[0]]
if not try_block[0].endswith(';'):
try_block[0] += ';'
else:
# Put quotes around arguments since they would be strings when
# passed from the command line
func_call = "%s('%s'" % (args[0], args[1])
for a in args[2:]:
func_call += (", '%s'" % a)
func_call += ');'
try_block = [func_call]
if fname_wrapper is None:
fname_wrapper = 'wrapper_%s%s' % (str(uuid_gen.uuid4()),
cls.language_ext[0])
fname_wrapper = fname_wrapper.replace('-', '_')
working_dir = kwargs.get('working_dir', kwargs.get('cwd', None))
if working_dir is not None:
fname_wrapper = os.path.join(working_dir, fname_wrapper)
cls.write_error_wrapper(fname_wrapper, try_block,
matlab_engine=matlab_engine)
assert(os.path.isfile(fname_wrapper))
args = [os.path.splitext(os.path.basename(fname_wrapper))[0]]
# Call base, catching error to remove temp wrapper
try:
if matlab_engine is None:
kwargs['for_matlab'] = True
out = super(MatlabModelDriver, cls).run_executable(args, **kwargs)
else:
if kwargs.get('debug_flags', None): # pragma: debug
logger.warn("Debugging via valgrind, strace, etc. disabled "
"for Matlab when using a Matlab shared engine.")
assert(kwargs.get('return_process', False))
# Add environment variables
env = kwargs.get('env', {})
old_env = {}
new_env_str = ''
for k, v in env.items():
old_env[k] = matlab_engine.getenv(k)
matlab_engine.setenv(k, v, nargout=0)
new_env_str += "'%s', %s, " % (k, repr(v))
matlab_engine.eval('new_env = py.dict(pyargs(%s));'
% new_env_str[:-2], nargout=0)
matlab_engine.eval('os.environ.update(new_env);', nargout=0)
# Create matlab process using Matlab engine
out = MatlabProcess(name=args[0] + '.MatlabProcess',
target=getattr(matlab_engine, args[0]),
args=args[1:], matlab_engine=matlab_engine)
out.start()
finally:
if (((not kwargs.get('return_process', False))
and (fname_wrapper is not None))):
os.remove(fname_wrapper)
return out
@classmethod
def language_version(cls):
r"""Determine the version of this language.
Returns:
str: Version of compiler/interpreter for this language.
"""
return cls.get_matlab_info()[1]
@classmethod
def executable_command(cls, args, **kwargs):
r"""Compose a command for running a program in this language with the
provied arguments. If not already present, the interpreter command and
interpreter flags are prepended to the provided arguments.
Args:
args (list): The program that returned command should run and any
arguments that should be provided to it.
**kwargs: Additional keyword arguments are ignored.
Returns:
list: Arguments composing the command required to run the program
from the command line using the interpreter for this language.
"""
# if kwargs.get('exec_type', 'interpreter') == 'interpreter':
# args = ["\"%s\"" % (' '.join(args))]
return super(MatlabModelDriver, cls).executable_command(args, **kwargs)
@classmethod
def configure(cls, cfg):
r"""Add configuration options for this language. This includes locating
any required external libraries and setting option defaults.
Args:
cfg (YggConfigParser): Config class that options should be set for.
Returns:
list: Section, option, description tuples for options that could not
be set.
"""
out = InterpretedModelDriver.configure.__func__(cls, cfg)
opts = {
'startup_waittime_s': [('The time allowed for a Matlab engine to start'
'before timing out and reporting an error.'),
'10'],
'version': ['The version (release number) of installed Matlab.', ''],
'matlabroot': ['The path to the default installation of matlab.', '']}
if cfg.get(cls.language, 'disable', 'False').lower() != 'true':
try:
opts['matlabroot'][1], opts['version'][1] = cls.get_matlab_info()
except RuntimeError: # pragma: no matlab
pass
for k in opts.keys():
if not cfg.has_option(cls.language, k):
if opts[k][1]: # pragma: matlab
cfg.set(cls.language, k, opts[k][1])
else:
out.append((cls.language, k, opts[k][0]))
return out
@classmethod
def get_matlab_info(cls): # pragma: matlab
r"""Determine the root directory where Matlab is installed and the version
that is installed (if Matlab is installed at all). This will fail if Matlab
is not installed, cannot be started, or does not operate as expected.
Returns:
tuple: Matlab root directory and Matlab version string.
Raises:
RuntimeError: If Matlab cannot be started or the root directory or
release cannot be determiend.
"""
mtl_id = '=MATLABROOT='
cmd = ("fprintf('" + mtl_id + "%s" + mtl_id + "R%s" + mtl_id + "'"
+ ",matlabroot,version('-release'));")
mtl_proc = cls.run_executable([cmd])
mtl_id = backwards.match_stype(mtl_proc, mtl_id)
if mtl_id not in mtl_proc: # pragma: debug
raise RuntimeError(("Could not locate ID string (%s) in "
"output (%s).") % (mtl_id, mtl_proc))
parts = mtl_proc.split(mtl_id)
if len(parts) < 3: # pragma: debug
raise RuntimeError(("Could not get matlabroot/version from "
"output (%s).") % (mtl_proc))
matlabroot = backwards.as_str(parts[-3])
release = backwards.as_str(parts[-2])
return matlabroot, release
def start_matlab_engine(self):
r"""Start matlab session and connect to it."""
ml_attr = ['screen_session', 'mlengine', 'mlsession', 'mlprocess']
attempt_connect = (len(matlab.engine.find_matlab()) != 0)
# Connect to matlab if a session exists
if attempt_connect:
for mlsession in matlab.engine.find_matlab():
try:
self.debug("Trying to connect to session %s", mlsession)
self.mlengine = connect_matlab_engine(mlsession)
self.mlsession = mlsession
self.debug("Connected to existing shared engine: %s",
self.mlsession)
break
except matlab.engine.EngineError:
pass
# Start if not running or connect failed
if self.mlengine is None:
if attempt_connect:
self.debug("Starting a matlab shared engine (connect failed)")
else:
self.debug("Starting a matlab shared engine (none existing)")
out = start_matlab_engine()
for i, attr in enumerate(ml_attr):
setattr(self, attr, out[i])
self.started_matlab = True
# Add things to Matlab environment
self.mlengine.addpath(self.model_dir, nargout=0)
self.debug("Connected to matlab session '%s'" % self.mlsession)
def before_start(self):
r"""Actions to perform before the run loop."""
kwargs = dict(fname_wrapper=self.model_wrapper)
if self.using_matlab_engine:
self.start_matlab_engine()
kwargs.update(matlab_engine=self.mlengine,
no_queue_thread=True)
else:
kwargs.update(working_dir=self.model_dir)
with self.lock:
if self.using_matlab_engine and (self.mlengine is None): # pragma: debug
self.debug('Matlab engine not set. Stopping')
return
super(MatlabModelDriver, self).before_start(**kwargs)
def run_loop(self):
r"""Loop to check if model is still running and forward output."""
if self.using_matlab_engine:
self.model_process.print_output()
self.periodic_debug('matlab loop', period=100)('Looping')
if self.model_process.is_done():
self.model_process.print_output()
self.set_break_flag()
try:
self.model_process.future.result()
self.model_process.print_output()
except matlab.engine.EngineError:
self.model_process.print_output()
except BaseException:
self.model_process.print_output()
self.exception("Error running model.")
else:
self.sleep()
else:
super(MatlabModelDriver, self).run_loop()
def after_loop(self):
r"""Actions to perform after run_loop has finished. Mainly checking
if there was an error and then handling it."""
if self.using_matlab_engine:
if (self.model_process is not None) and self.model_process.is_alive():
self.info("Model process thread still alive")
self.kill_process()
return
super(MatlabModelDriver, self).after_loop()
if self.using_matlab_engine:
with self.lock:
self.cleanup()
def cleanup(self):
r"""Close the Matlab session and engine."""
if self.using_matlab_engine:
try:
stop_matlab_engine(self.screen_session, self.mlengine,
self.mlsession, self.mlprocess,
keep_engine=(not self.started_matlab))
except (SystemError, Exception) as e: # pragma: debug
self.error('Failed to exit matlab engine')
self.raise_error(e)
self.debug('Stopped Matlab')
self.screen_session = None
self.mlsession = None
self.started_matlab = False
self.mlengine = None
self.mlprocess = None
super(MatlabModelDriver, self).cleanup()
def check_exits(self):
r"""Check to make sure the program dosn't contain any exits as exits
will shut down the Matlab engine as well as the program.
Raises:
RuntimeError: If there are any exit calls in the file.
"""
has_exit = False
with open(self.raw_model_file, 'r') as fd:
for i, line in enumerate(fd):
if line.strip().startswith('exit'):
has_exit = True
break
if self.using_matlab_engine and has_exit:
warnings.warn(
"Line %d in '%s' contains an " % (
i, self.raw_model_file)
+ "'exit' call which will exit the MATLAB engine "
+ "such that it cannot be reused. Please replace 'exit' "
+ "with a return or error.")
def set_env(self):
r"""Get environment variables that should be set for the model process.
Returns:
dict: Environment variables for the model process.
"""
out = super(MatlabModelDriver, self).set_env()
if self.using_matlab_engine:
out['YGG_MATLAB_ENGINE'] = 'True'
# TODO: Move the following to InterpretedModelDriver once another
# language sets path_env_variable
path_list = []
prev_path = out.pop(self.path_env_variable, '')
if prev_path:
path_list.append(prev_path)
if isinstance(self.paths_to_add, list):
for x in self.paths_to_add:
if x not in prev_path:
path_list.append(x)
path_list.append(self.model_dir)
if path_list:
out[self.path_env_variable] = os.pathsep.join(path_list)
return out
@classmethod
def comm_atexit(cls, comm):
r"""Operations performed on comm at exit including draining receive.
Args:
comm (CommBase): Communication object.
"""
if comm.direction == 'recv':
while comm.recv(timeout=0)[0]:
comm.sleep()
else:
comm.send_eof()
comm.linger_close()
@classmethod
def decode_format(cls, format_str):
r"""Method for decoding format strings created in this language.
Args:
format_str (str): Encoded format string.
Returns:
str: Decoded format string.
"""
return backwards.decode_escape(format_str)
@classmethod
def prepare_output_variables(cls, vars_list):
r"""Concatenate a set of output variables such that it can be passed as
a single string to the function_call parameter.
Args:
vars_list (list): List of variable names to concatenate as output
from a function call.
Returns:
str: Concatentated variables list.
"""
out = super(MatlabModelDriver, cls).prepare_output_variables(vars_list)
if isinstance(vars_list, list) and (len(vars_list) > 1):
out = '[%s]' % out
return out
| import subprocess
import uuid as uuid_gen
import logging
from datetime import datetime
import os
import psutil
import warnings
import weakref
from yggdrasil import backwards, tools, platform, serialize
from yggdrasil.languages import get_language_dir
from yggdrasil.config import ygg_cfg
from yggdrasil.drivers.InterpretedModelDriver import InterpretedModelDriver
from yggdrasil.tools import TimeOut, sleep
logger = logging.getLogger(__name__)
try: # pragma: matlab
disable_engine = ygg_cfg.get('matlab', 'disable_engine', 'False').lower()
if platform._is_win or (disable_engine == 'true'):
_matlab_engine_installed = False
if not tools.is_subprocess():
logger.debug("matlab.engine disabled")
else:
import matlab.engine
_matlab_engine_installed = True
except ImportError: # pragma: no matlab
logger.debug("Could not import matlab.engine. "
+ "Matlab support for using a sharedEngine will be disabled.")
_matlab_engine_installed = False
_top_lang_dir = get_language_dir('matlab')
_compat_map = {
'R2015b': ['2.7', '3.3', '3.4'],
'R2017a': ['2.7', '3.3', '3.4', '3.5'],
'R2017b': ['2.7', '3.3', '3.4', '3.5', '3.6'],
'R2018b': ['2.7', '3.3', '3.4', '3.5', '3.6']}
def kill_all():
r"""Kill all Matlab shared engines."""
if platform._is_win: # pragma: windows
os.system(('taskkill /F /IM matlab.engine.shareEngine /T'))
else:
os.system(('pkill -f matlab.engine.shareEngine'))
def locate_matlab_engine_processes(): # pragma: matlab
r"""Get all of the active matlab sharedEngine processes.
Returns:
list: Active matlab sharedEngine processes.
"""
out = []
for p in psutil.process_iter():
p.info = p.as_dict(attrs=['name', 'pid', 'cmdline'])
if (((p.info['name'] == 'MATLAB')
and ('matlab.engine.shareEngine' in p.info['cmdline']))):
out.append(p) # p.info['pid'])
return out
def is_matlab_running():
r"""Determine if there is a Matlab engine running.
Returns:
bool: True if there is a Matlab engine running, False otherwise.
"""
if not _matlab_engine_installed: # pragma: no matlab
out = False
else: # pragma: matlab
out = (len(matlab.engine.find_matlab()) != 0)
return out
def locate_matlabroot(): # pragma: matlab
r"""Find directory that servers as matlab root.
Returns:
str: Full path to matlabroot directory.
"""
return MatlabModelDriver.get_matlab_info()[0]
def install_matlab_engine(): # pragma: matlab
r"""Install the MATLAB engine API for Python."""
if not _matlab_engine_installed:
mtl_root = locate_matlabroot()
mtl_setup = os.path.join(mtl_root, 'extern', 'engines', 'python')
cmd = 'python setup.py install'
result = subprocess.check_output(cmd, cwd=mtl_setup)
print(result)
def start_matlab_engine(skip_connect=False, timeout=None): # pragma: matlab
r"""Start a Matlab shared engine session inside a detached screen
session.
Args:
skip_connect (bool, optional): If True, the engine is not connected.
Defaults to False.
timeout (int, optional): Time (in seconds) that should be waited for
Matlab to start up. Defaults to None and is set from the config
option ('matlab', 'startup_waittime_s').
Returns:
tuple: Information on the started session including the name of the
screen session running matlab, the created engine object, the name
of the matlab session, and the matlab engine process.
Raises:
RuntimeError: If Matlab is not installed.
"""
if not _matlab_engine_installed: # pragma: no matlab
raise RuntimeError("Matlab engine is not installed.")
if timeout is None:
timeout = float(ygg_cfg.get('matlab', 'startup_waittime_s', 10))
old_process = set(locate_matlab_engine_processes())
old_matlab = set(matlab.engine.find_matlab())
screen_session = str('ygg_matlab' + datetime.today().strftime("%Y%j%H%M%S")
+ '_%d' % len(old_matlab))
try:
args = ['screen', '-dmS', screen_session, '-c',
os.path.join(_top_lang_dir, 'matlab_screenrc'),
'matlab', '-nodisplay', '-nosplash', '-nodesktop', '-nojvm',
'-r', '"matlab.engine.shareEngine"']
subprocess.call(' '.join(args), shell=True)
T = TimeOut(timeout)
while ((len(set(matlab.engine.find_matlab()) - old_matlab) == 0)
and not T.is_out):
logger.debug('Waiting for matlab engine to start')
sleep(1) # Usually 3 seconds
except KeyboardInterrupt: # pragma: debug
args = ['screen', '-X', '-S', screen_session, 'quit']
subprocess.call(' '.join(args), shell=True)
raise
if (len(set(matlab.engine.find_matlab()) - old_matlab) == 0): # pragma: debug
raise Exception("start_matlab timed out at %f s" % T.elapsed)
new_matlab = list(set(matlab.engine.find_matlab()) - old_matlab)[0]
new_process = list(set(locate_matlab_engine_processes()) - old_process)[0]
# Connect to the engine
matlab_engine = None
if not skip_connect:
matlab_engine = connect_matlab_engine(new_matlab, first_connect=True)
return screen_session, matlab_engine, new_matlab, new_process
def connect_matlab_engine(matlab_session, first_connect=False): # pragma: matlab
r"""Connect to Matlab engine.
Args:
matlab_session (str): Name of the Matlab session that should be
connected to.
first_connect (bool, optional): If True, this is the first time
Python is connecting to the Matlab shared engine and certain
environment variables should be set. Defaults to False.
Returns:
MatlabEngine: Matlab engine that was connected.
"""
matlab_engine = matlab.engine.connect_matlab(matlab_session)
matlab_engine.eval('clear classes;', nargout=0)
err = backwards.StringIO()
try:
matlab_engine.eval("YggInterface('YGG_MSG_MAX');", nargout=0,
stderr=err)
except BaseException:
for x in MatlabModelDriver.paths_to_add:
matlab_engine.addpath(x, nargout=0)
matlab_engine.eval("os = py.importlib.import_module('os');", nargout=0)
if not first_connect:
if backwards.PY2:
matlab_engine.eval("py.reload(os);", nargout=0)
else:
# matlab_engine.eval("py.importlib.reload(os);", nargout=0)
pass
return matlab_engine
def stop_matlab_engine(screen_session, matlab_engine, matlab_session,
matlab_process, keep_engine=False): # pragma: matlab
r"""Stop a Matlab shared engine session running inside a detached screen
session.
Args:
screen_session (str): Name of the screen session that the shared
Matlab session was started in.
matlab_engine (MatlabEngine): Matlab engine that should be stopped.
matlab_session (str): Name of Matlab session that the Matlab engine is
connected to.
matlab_process (psutil.Process): Process running the Matlab shared engine.
keep_engine (bool, optional): If True, the references to the engine will be
removed so it is not deleted. Defaults to False.
Raises:
RuntimeError: If Matlab is not installed.
"""
if not _matlab_engine_installed: # pragma: no matlab
raise RuntimeError("Matlab engine is not installed.")
if keep_engine and (matlab_engine is not None):
if '_matlab' in matlab_engine.__dict__:
matlab_engine.quit()
return
# Remove weakrefs to engine to prevent stopping engine more than once
if matlab_engine is not None:
# Remove weak references so engine not deleted on exit
eng_ref = weakref.getweakrefs(matlab_engine)
for x in eng_ref:
if x in matlab.engine._engines:
matlab.engine._engines.remove(x)
# Either exit the engine or remove its reference
if matlab_session in matlab.engine.find_matlab():
try:
matlab_engine.eval('exit', nargout=0)
except BaseException:
pass
else: # pragma: no cover
matlab_engine.__dict__.pop('_matlab', None)
# Stop the screen session containing the Matlab shared session
if screen_session is not None:
if matlab_session in matlab.engine.find_matlab():
os.system(('screen -X -S %s quit') % screen_session)
T = TimeOut(5)
while ((matlab_session in matlab.engine.find_matlab())
and not T.is_out):
logger.debug("Waiting for matlab engine to exit")
sleep(1)
if (matlab_session in matlab.engine.find_matlab()): # pragma: debug
if matlab_process is not None:
matlab_process.terminate()
logger.error("stop_matlab_engine timed out at %f s. " % T.elapsed
+ "Killed Matlab sharedEngine process.")
class MatlabProcess(tools.YggClass): # pragma: matlab
r"""Add features to mimic subprocess.Popen while running Matlab function
asynchronously.
Args:
target (func): Matlab function that should be called.
args (list, tuple): Arguments that should be passed to target.
kwargs (dict, optional): Keyword arguments that should be passed to
target. Defaults to empty dict.
name (str, optional): A name for the process. Generated if not provided.
matlab_engine (MatlabEngine, optional): MatlabEngine that should be used
to get errors. Defaults to None and errors will not be recovered
unless passed through stdout and stderr before shutdown.
Attributes:
stdout (StringIO): File like string buffer that stdout from target will
be written to.
stderr (StringIO): File like string buffer that stderr from target will
be written to.
target (func): Matlab function that should be called.
args (list, tuple): Arguments that should be passed to target.
kwargs (dict): Keyword arguments that should be passed to target.
future (MatlabFutureResult): Future result from async function. This
will be None until start is called.
matlab_engine (MatlabEngine): MatlabEngine that should be used to get
errors.
Raises:
RuntimeError: If Matlab is not installed.
"""
def __init__(self, target, args, kwargs=None, name=None, matlab_engine=None):
if not _matlab_engine_installed: # pragma: no matlab
raise RuntimeError("Matlab engine is not installed.")
if kwargs is None:
kwargs = {}
self.stdout = backwards.sio.StringIO()
self.stderr = backwards.sio.StringIO()
self._stdout_line = None
self._stderr_line = None
self.target = target
self.args = args
self.kwargs = kwargs
self.kwargs.update(nargout=0, stdout=self.stdout, stderr=self.stderr)
self.kwargs['async'] = True # For python 3.7 where async is reserved
self.future = None
self.matlab_engine = matlab_engine
self._returncode = None
super(MatlabProcess, self).__init__(name)
def poll(self, *args, **kwargs):
r"""Fake poll."""
return self.returncode
@property
def stdout_line(self):
r"""str: Output to stdout from function call."""
if self._stdout_line is None:
if self.stdout is not None:
line = self.stdout.getvalue()
if line:
self._stdout_line = line
return self._stdout_line
@property
def stderr_line(self):
r"""str: Output to stderr from function call."""
if self._stderr_line is None:
if self.stderr is not None:
line = self.stderr.getvalue()
if line:
self._stderr_line = line
return self._stderr_line
def print_output(self):
r"""Print output from stdout and stderr."""
if self.stdout_line:
self.print_encoded(self.stdout_line, end="")
if self.stderr_line:
self.print_encoded(self.stderr_line, end="")
def start(self):
r"""Start asychronous call."""
self.future = self.target(*self.args, **self.kwargs)
def is_started(self):
r"""bool: Has start been called."""
return (self.future is not None)
def is_cancelled(self):
r"""bool: Was the async call cancelled or not."""
if self.is_started():
try:
return self.future.cancelled()
except matlab.engine.EngineError:
self.on_matlab_error()
return True
except BaseException:
return True
return False
def is_done(self):
r"""bool: Is the async call still running."""
if self.is_started():
try:
return self.future.done() or self.is_cancelled()
except matlab.engine.EngineError:
self.on_matlab_error()
return True
except BaseException:
return True
return False
def is_alive(self):
r"""bool: Is the async call funning."""
if self.is_started():
return (not self.is_done())
return False
@property
def returncode(self):
r"""int: Return code."""
if self.is_done():
if self.stderr_line: # or self.is_cancelled():
return -1
else:
return 0
else:
return self._returncode
def kill(self, *args, **kwargs):
r"""Cancel the async call."""
if self.is_alive():
try:
out = self.future.cancel()
self.debug("Result of cancelling Matlab call?: %s", out)
except matlab.engine.EngineError as e:
self.debug('Matlab Engine Error: %s' % e)
self.on_matlab_error()
except BaseException as e:
self.debug('Other error on kill: %s' % e)
self.print_output()
if self.is_alive():
self.info('Error killing Matlab script.')
self.matlab_engine.quit()
self.future = None
self._returncode = -1
assert(not self.is_alive())
def on_matlab_error(self):
r"""Actions performed on error in Matlab engine."""
# self.print_output()
self.debug('')
if self.matlab_engine is not None:
try:
self.matlab_engine.eval('exception = MException.last;', nargout=0)
self.matlab_engine.eval('getReport(exception)')
except matlab.engine.EngineError:
pass
class MatlabModelDriver(InterpretedModelDriver): # pragma: matlab
r"""Base class for running Matlab models.
Args:
name (str): Driver name.
args (str or list): Argument(s) for running the model in matlab.
Generally, this should be the full path to a Matlab script.
**kwargs: Additional keyword arguments are passed to parent class's
__init__ method.
Attributes:
started_matlab (bool): True if the driver had to start a new matlab
engine. False otherwise.
screen_session (str): Screen session that Matlab was started in.
mlengine (object): Matlab engine used to run script.
mlsession (str): Name of the Matlab session that was started.
Raises:
RuntimeError: If Matlab is not installed.
.. note:: Matlab models that call exit will shut down the shared engine.
"""
_schema_subtype_description = ('Model is written in Matlab.')
language = 'matlab'
language_ext = '.m'
base_languages = ['python']
default_interpreter_flags = ['-nodisplay', '-nosplash', '-nodesktop',
'-nojvm', '-batch']
version_flags = ["fprintf('R%s', version('-release')); exit();"]
path_env_variable = 'MATLABPATH'
comm_linger = (os.environ.get('YGG_MATLAB_ENGINE', '').lower() == 'true')
send_converters = {'pandas': serialize.consolidate_array,
'table': serialize.consolidate_array}
recv_converters = {'pandas': 'array'}
type_map = {
'int': 'intX',
'float': 'single, double',
'string': 'char',
'array': 'cell',
'object': 'containers.Map',
'boolean': 'logical',
'null': 'NaN',
'uint': 'uintX',
'complex': 'complex',
'bytes': 'char (utf-8)',
'unicode': 'char',
'1darray': 'mat',
'ndarray': 'mat',
'ply': 'containers.Map',
'obj': 'containers.Map',
'schema': 'containers.Map'}
function_param = {
'input': '{channel} = YggInterface(\'YggInput\', \'{channel_name}\');',
'output': '{channel} = YggInterface(\'YggOutput\', \'{channel_name}\');',
'recv': '[{flag_var}, {recv_var}] = {channel}.recv();',
'send': '{flag_var} = {channel}.send({send_var});',
'function_call': '{output_var} = {function_name}({input_var});',
'define': '{variable} = {value};',
'comment': '%',
'true': 'true',
'not': 'not',
'indent': 2 * ' ',
'quote': '\'',
'print': 'disp(\'{message}\');',
'fprintf': 'fprintf(\'{message}\', {variables});',
'error': 'error(\'{error_msg}\');',
'block_end': 'end;',
'if_begin': 'if ({cond})',
'for_begin': 'for {iter_var} = {iter_begin}:{iter_end}',
'while_begin': 'while ({cond})',
'break': 'break;',
'try_begin': 'try',
'try_except': 'catch {error_var}',
'assign': '{name} = {value};'}
def __init__(self, name, args, **kwargs):
self.using_matlab_engine = _matlab_engine_installed
if self.using_matlab_engine:
kwargs['skip_interpreter'] = True
self.model_wrapper = None
super(MatlabModelDriver, self).__init__(name, args, **kwargs)
self.started_matlab = False
self.screen_session = None
self.mlengine = None
self.mlsession = None
self.mlprocess = None
def parse_arguments(self, args):
r"""Sort model arguments to determine which one is the executable
and which ones are arguments.
Args:
args (list): List of arguments provided.
"""
super(MatlabModelDriver, self).parse_arguments(args)
model_base, model_ext = os.path.splitext(os.path.basename(self.model_file))
wrap_base = 'wrapped_%s_%s' % (model_base, self.uuid.replace('-', '_'))
# Matlab has a variable name limit of 62
wrap_base = wrap_base[:min(len(wrap_base), 60)]
self.model_wrapper = os.path.join(self.model_dir, wrap_base + model_ext)
self.wrapper_products.append(self.model_wrapper)
@classmethod
def write_error_wrapper(cls, fname, try_lines, matlab_engine=None):
r"""Write a wrapper for the model that encloses it in a try except so
that the error can be propagated appropriately.
Args:
fname (str): File where the wrapper should be written.
try_lines (list): List of lines to go in the try block.
model_file (str): Path to model that should be wrapped.
matlab_engine (MatlabEngine, optional): Matlab engine that will be
used to call the wrapper. If not provided, it is assumed the
error will be called using the Matlab interpreter on the command
line. Defautls to None.
Raises:
"""
# Create lines based on use of engine or not
if matlab_engine is not None:
catch_block = ["error(e.message);"]
else:
catch_block = ["rethrow(e);"]
# catch_block = ["fprintf('MATLAB ERROR:\\n%s\\n', e.message);",
# "disp(e.identifier);",
# "disp(e.stack);",
# "exit(0);"]
lines = cls.write_try_except(try_lines, catch_block)
if matlab_engine is None:
lines.append("exit(0);")
# Write lines
logger.debug('Wrapper:\n\t%s', '\n\t'.join(lines))
if fname is None:
return lines
else:
if os.path.isfile(fname): # pragma: debug
os.remove(fname)
with open(fname, 'w') as fd:
fd.write('\n'.join(lines))
logger.debug("Wrote wrapper to: %s" % fname)
@classmethod
def run_executable(cls, args, dont_wrap_error=False, fname_wrapper=None,
matlab_engine=None, **kwargs):
r"""Run a program using the executable for this language and the
provided arguments.
Args:
args (list): The program that should be run and any arguments
that should be provided to it.
dont_wrap_error (bool, optional): If False, the executable will be
wrapped in a try/catch block to prevent errors from stopping
Matlab shutdown. If True, the command will be executed as is
with the Matlab interpreter. Defaults to False.
fname_wrapper (str, optional): File where wrapper should be saved.
If not provided, one is created. Defaults to None.
matlab_engine (MatlabEngine, optional): Matlab engine that should be
used to run the command. If not provided, the Matlab interpreter
is used instead. Defaults to None.
**kwargs: Additional keyword arguments are passed to
cls.executable_command and tools.popen_nobuffer.
Returns:
str: Output to stdout from the run command.
Raises:
RuntimeError: If the language is not installed.
RuntimeError: If there is an error when running the command.
"""
# Strip file if first argument is a file
if os.path.isfile(args[0]):
kwargs.setdefault('working_dir', os.path.dirname(args[0]))
args = [os.path.splitext(os.path.basename(args[0]))[0]] + args[1:]
# Write wrapper
if (not dont_wrap_error) and (len(args) > 0):
if len(args) == 1:
# TODO: Will this work if there is a function defined in the
# script?
try_block = [args[0]]
if not try_block[0].endswith(';'):
try_block[0] += ';'
else:
# Put quotes around arguments since they would be strings when
# passed from the command line
func_call = "%s('%s'" % (args[0], args[1])
for a in args[2:]:
func_call += (", '%s'" % a)
func_call += ');'
try_block = [func_call]
if fname_wrapper is None:
fname_wrapper = 'wrapper_%s%s' % (str(uuid_gen.uuid4()),
cls.language_ext[0])
fname_wrapper = fname_wrapper.replace('-', '_')
working_dir = kwargs.get('working_dir', kwargs.get('cwd', None))
if working_dir is not None:
fname_wrapper = os.path.join(working_dir, fname_wrapper)
cls.write_error_wrapper(fname_wrapper, try_block,
matlab_engine=matlab_engine)
assert(os.path.isfile(fname_wrapper))
args = [os.path.splitext(os.path.basename(fname_wrapper))[0]]
# Call base, catching error to remove temp wrapper
try:
if matlab_engine is None:
kwargs['for_matlab'] = True
out = super(MatlabModelDriver, cls).run_executable(args, **kwargs)
else:
if kwargs.get('debug_flags', None): # pragma: debug
logger.warn("Debugging via valgrind, strace, etc. disabled "
"for Matlab when using a Matlab shared engine.")
assert(kwargs.get('return_process', False))
# Add environment variables
env = kwargs.get('env', {})
old_env = {}
new_env_str = ''
for k, v in env.items():
old_env[k] = matlab_engine.getenv(k)
matlab_engine.setenv(k, v, nargout=0)
new_env_str += "'%s', %s, " % (k, repr(v))
matlab_engine.eval('new_env = py.dict(pyargs(%s));'
% new_env_str[:-2], nargout=0)
matlab_engine.eval('os.environ.update(new_env);', nargout=0)
# Create matlab process using Matlab engine
out = MatlabProcess(name=args[0] + '.MatlabProcess',
target=getattr(matlab_engine, args[0]),
args=args[1:], matlab_engine=matlab_engine)
out.start()
finally:
if (((not kwargs.get('return_process', False))
and (fname_wrapper is not None))):
os.remove(fname_wrapper)
return out
@classmethod
def language_version(cls):
r"""Determine the version of this language.
Returns:
str: Version of compiler/interpreter for this language.
"""
return cls.get_matlab_info()[1]
@classmethod
def executable_command(cls, args, **kwargs):
r"""Compose a command for running a program in this language with the
provied arguments. If not already present, the interpreter command and
interpreter flags are prepended to the provided arguments.
Args:
args (list): The program that returned command should run and any
arguments that should be provided to it.
**kwargs: Additional keyword arguments are ignored.
Returns:
list: Arguments composing the command required to run the program
from the command line using the interpreter for this language.
"""
# if kwargs.get('exec_type', 'interpreter') == 'interpreter':
# args = ["\"%s\"" % (' '.join(args))]
return super(MatlabModelDriver, cls).executable_command(args, **kwargs)
@classmethod
def configure(cls, cfg):
r"""Add configuration options for this language. This includes locating
any required external libraries and setting option defaults.
Args:
cfg (YggConfigParser): Config class that options should be set for.
Returns:
list: Section, option, description tuples for options that could not
be set.
"""
out = InterpretedModelDriver.configure.__func__(cls, cfg)
opts = {
'startup_waittime_s': [('The time allowed for a Matlab engine to start'
'before timing out and reporting an error.'),
'10'],
'version': ['The version (release number) of installed Matlab.', ''],
'matlabroot': ['The path to the default installation of matlab.', '']}
if cfg.get(cls.language, 'disable', 'False').lower() != 'true':
try:
opts['matlabroot'][1], opts['version'][1] = cls.get_matlab_info()
except RuntimeError: # pragma: no matlab
pass
for k in opts.keys():
if not cfg.has_option(cls.language, k):
if opts[k][1]: # pragma: matlab
cfg.set(cls.language, k, opts[k][1])
else:
out.append((cls.language, k, opts[k][0]))
return out
@classmethod
def get_matlab_info(cls): # pragma: matlab
r"""Determine the root directory where Matlab is installed and the version
that is installed (if Matlab is installed at all). This will fail if Matlab
is not installed, cannot be started, or does not operate as expected.
Returns:
tuple: Matlab root directory and Matlab version string.
Raises:
RuntimeError: If Matlab cannot be started or the root directory or
release cannot be determiend.
"""
mtl_id = '=MATLABROOT='
cmd = ("fprintf('" + mtl_id + "%s" + mtl_id + "R%s" + mtl_id + "'"
+ ",matlabroot,version('-release'));")
mtl_proc = cls.run_executable([cmd])
mtl_id = backwards.match_stype(mtl_proc, mtl_id)
if mtl_id not in mtl_proc: # pragma: debug
raise RuntimeError(("Could not locate ID string (%s) in "
"output (%s).") % (mtl_id, mtl_proc))
parts = mtl_proc.split(mtl_id)
if len(parts) < 3: # pragma: debug
raise RuntimeError(("Could not get matlabroot/version from "
"output (%s).") % (mtl_proc))
matlabroot = backwards.as_str(parts[-3])
release = backwards.as_str(parts[-2])
return matlabroot, release
def start_matlab_engine(self):
r"""Start matlab session and connect to it."""
ml_attr = ['screen_session', 'mlengine', 'mlsession', 'mlprocess']
attempt_connect = (len(matlab.engine.find_matlab()) != 0)
# Connect to matlab if a session exists
if attempt_connect:
for mlsession in matlab.engine.find_matlab():
try:
self.debug("Trying to connect to session %s", mlsession)
self.mlengine = connect_matlab_engine(mlsession)
self.mlsession = mlsession
self.debug("Connected to existing shared engine: %s",
self.mlsession)
break
except matlab.engine.EngineError:
pass
# Start if not running or connect failed
if self.mlengine is None:
if attempt_connect:
self.debug("Starting a matlab shared engine (connect failed)")
else:
self.debug("Starting a matlab shared engine (none existing)")
out = start_matlab_engine()
for i, attr in enumerate(ml_attr):
setattr(self, attr, out[i])
self.started_matlab = True
# Add things to Matlab environment
self.mlengine.addpath(self.model_dir, nargout=0)
self.debug("Connected to matlab session '%s'" % self.mlsession)
def before_start(self):
r"""Actions to perform before the run loop."""
kwargs = dict(fname_wrapper=self.model_wrapper)
if self.using_matlab_engine:
self.start_matlab_engine()
kwargs.update(matlab_engine=self.mlengine,
no_queue_thread=True)
else:
kwargs.update(working_dir=self.model_dir)
with self.lock:
if self.using_matlab_engine and (self.mlengine is None): # pragma: debug
self.debug('Matlab engine not set. Stopping')
return
super(MatlabModelDriver, self).before_start(**kwargs)
def run_loop(self):
r"""Loop to check if model is still running and forward output."""
if self.using_matlab_engine:
self.model_process.print_output()
self.periodic_debug('matlab loop', period=100)('Looping')
if self.model_process.is_done():
self.model_process.print_output()
self.set_break_flag()
try:
self.model_process.future.result()
self.model_process.print_output()
except matlab.engine.EngineError:
self.model_process.print_output()
except BaseException:
self.model_process.print_output()
self.exception("Error running model.")
else:
self.sleep()
else:
super(MatlabModelDriver, self).run_loop()
def after_loop(self):
r"""Actions to perform after run_loop has finished. Mainly checking
if there was an error and then handling it."""
if self.using_matlab_engine:
if (self.model_process is not None) and self.model_process.is_alive():
self.info("Model process thread still alive")
self.kill_process()
return
super(MatlabModelDriver, self).after_loop()
if self.using_matlab_engine:
with self.lock:
self.cleanup()
def cleanup(self):
r"""Close the Matlab session and engine."""
if self.using_matlab_engine:
try:
stop_matlab_engine(self.screen_session, self.mlengine,
self.mlsession, self.mlprocess,
keep_engine=(not self.started_matlab))
except (SystemError, Exception) as e: # pragma: debug
self.error('Failed to exit matlab engine')
self.raise_error(e)
self.debug('Stopped Matlab')
self.screen_session = None
self.mlsession = None
self.started_matlab = False
self.mlengine = None
self.mlprocess = None
super(MatlabModelDriver, self).cleanup()
def check_exits(self):
r"""Check to make sure the program dosn't contain any exits as exits
will shut down the Matlab engine as well as the program.
Raises:
RuntimeError: If there are any exit calls in the file.
"""
has_exit = False
with open(self.raw_model_file, 'r') as fd:
for i, line in enumerate(fd):
if line.strip().startswith('exit'):
has_exit = True
break
if self.using_matlab_engine and has_exit:
warnings.warn(
"Line %d in '%s' contains an " % (
i, self.raw_model_file)
+ "'exit' call which will exit the MATLAB engine "
+ "such that it cannot be reused. Please replace 'exit' "
+ "with a return or error.")
def set_env(self):
r"""Get environment variables that should be set for the model process.
Returns:
dict: Environment variables for the model process.
"""
out = super(MatlabModelDriver, self).set_env()
if self.using_matlab_engine:
out['YGG_MATLAB_ENGINE'] = 'True'
# TODO: Move the following to InterpretedModelDriver once another
# language sets path_env_variable
path_list = []
prev_path = out.pop(self.path_env_variable, '')
if prev_path:
path_list.append(prev_path)
if isinstance(self.paths_to_add, list):
for x in self.paths_to_add:
if x not in prev_path:
path_list.append(x)
path_list.append(self.model_dir)
if path_list:
out[self.path_env_variable] = os.pathsep.join(path_list)
return out
@classmethod
def comm_atexit(cls, comm):
r"""Operations performed on comm at exit including draining receive.
Args:
comm (CommBase): Communication object.
"""
if comm.direction == 'recv':
while comm.recv(timeout=0)[0]:
comm.sleep()
else:
comm.send_eof()
comm.linger_close()
@classmethod
def decode_format(cls, format_str):
r"""Method for decoding format strings created in this language.
Args:
format_str (str): Encoded format string.
Returns:
str: Decoded format string.
"""
return backwards.decode_escape(format_str)
@classmethod
def prepare_output_variables(cls, vars_list):
r"""Concatenate a set of output variables such that it can be passed as
a single string to the function_call parameter.
Args:
vars_list (list): List of variable names to concatenate as output
from a function call.
Returns:
str: Concatentated variables list.
"""
out = super(MatlabModelDriver, cls).prepare_output_variables(vars_list)
if isinstance(vars_list, list) and (len(vars_list) > 1):
out = '[%s]' % out
return out | en | 0.792247 | # pragma: matlab # pragma: no matlab Kill all Matlab shared engines. # pragma: windows # pragma: matlab Get all of the active matlab sharedEngine processes. Returns: list: Active matlab sharedEngine processes. # p.info['pid']) Determine if there is a Matlab engine running. Returns: bool: True if there is a Matlab engine running, False otherwise. # pragma: no matlab # pragma: matlab # pragma: matlab Find directory that servers as matlab root. Returns: str: Full path to matlabroot directory. # pragma: matlab Install the MATLAB engine API for Python. # pragma: matlab Start a Matlab shared engine session inside a detached screen session. Args: skip_connect (bool, optional): If True, the engine is not connected. Defaults to False. timeout (int, optional): Time (in seconds) that should be waited for Matlab to start up. Defaults to None and is set from the config option ('matlab', 'startup_waittime_s'). Returns: tuple: Information on the started session including the name of the screen session running matlab, the created engine object, the name of the matlab session, and the matlab engine process. Raises: RuntimeError: If Matlab is not installed. # pragma: no matlab # Usually 3 seconds # pragma: debug # pragma: debug # Connect to the engine # pragma: matlab Connect to Matlab engine. Args: matlab_session (str): Name of the Matlab session that should be connected to. first_connect (bool, optional): If True, this is the first time Python is connecting to the Matlab shared engine and certain environment variables should be set. Defaults to False. Returns: MatlabEngine: Matlab engine that was connected. # matlab_engine.eval("py.importlib.reload(os);", nargout=0) # pragma: matlab Stop a Matlab shared engine session running inside a detached screen session. Args: screen_session (str): Name of the screen session that the shared Matlab session was started in. matlab_engine (MatlabEngine): Matlab engine that should be stopped. matlab_session (str): Name of Matlab session that the Matlab engine is connected to. matlab_process (psutil.Process): Process running the Matlab shared engine. keep_engine (bool, optional): If True, the references to the engine will be removed so it is not deleted. Defaults to False. Raises: RuntimeError: If Matlab is not installed. # pragma: no matlab # Remove weakrefs to engine to prevent stopping engine more than once # Remove weak references so engine not deleted on exit # Either exit the engine or remove its reference # pragma: no cover # Stop the screen session containing the Matlab shared session # pragma: debug # pragma: matlab Add features to mimic subprocess.Popen while running Matlab function asynchronously. Args: target (func): Matlab function that should be called. args (list, tuple): Arguments that should be passed to target. kwargs (dict, optional): Keyword arguments that should be passed to target. Defaults to empty dict. name (str, optional): A name for the process. Generated if not provided. matlab_engine (MatlabEngine, optional): MatlabEngine that should be used to get errors. Defaults to None and errors will not be recovered unless passed through stdout and stderr before shutdown. Attributes: stdout (StringIO): File like string buffer that stdout from target will be written to. stderr (StringIO): File like string buffer that stderr from target will be written to. target (func): Matlab function that should be called. args (list, tuple): Arguments that should be passed to target. kwargs (dict): Keyword arguments that should be passed to target. future (MatlabFutureResult): Future result from async function. This will be None until start is called. matlab_engine (MatlabEngine): MatlabEngine that should be used to get errors. Raises: RuntimeError: If Matlab is not installed. # pragma: no matlab # For python 3.7 where async is reserved Fake poll. str: Output to stdout from function call. str: Output to stderr from function call. Print output from stdout and stderr. Start asychronous call. bool: Has start been called. bool: Was the async call cancelled or not. bool: Is the async call still running. bool: Is the async call funning. int: Return code. # or self.is_cancelled(): Cancel the async call. Actions performed on error in Matlab engine. # self.print_output() # pragma: matlab Base class for running Matlab models. Args: name (str): Driver name. args (str or list): Argument(s) for running the model in matlab. Generally, this should be the full path to a Matlab script. **kwargs: Additional keyword arguments are passed to parent class's __init__ method. Attributes: started_matlab (bool): True if the driver had to start a new matlab engine. False otherwise. screen_session (str): Screen session that Matlab was started in. mlengine (object): Matlab engine used to run script. mlsession (str): Name of the Matlab session that was started. Raises: RuntimeError: If Matlab is not installed. .. note:: Matlab models that call exit will shut down the shared engine. Sort model arguments to determine which one is the executable and which ones are arguments. Args: args (list): List of arguments provided. # Matlab has a variable name limit of 62 Write a wrapper for the model that encloses it in a try except so that the error can be propagated appropriately. Args: fname (str): File where the wrapper should be written. try_lines (list): List of lines to go in the try block. model_file (str): Path to model that should be wrapped. matlab_engine (MatlabEngine, optional): Matlab engine that will be used to call the wrapper. If not provided, it is assumed the error will be called using the Matlab interpreter on the command line. Defautls to None. Raises: # Create lines based on use of engine or not # catch_block = ["fprintf('MATLAB ERROR:\\n%s\\n', e.message);", # "disp(e.identifier);", # "disp(e.stack);", # "exit(0);"] # Write lines # pragma: debug Run a program using the executable for this language and the provided arguments. Args: args (list): The program that should be run and any arguments that should be provided to it. dont_wrap_error (bool, optional): If False, the executable will be wrapped in a try/catch block to prevent errors from stopping Matlab shutdown. If True, the command will be executed as is with the Matlab interpreter. Defaults to False. fname_wrapper (str, optional): File where wrapper should be saved. If not provided, one is created. Defaults to None. matlab_engine (MatlabEngine, optional): Matlab engine that should be used to run the command. If not provided, the Matlab interpreter is used instead. Defaults to None. **kwargs: Additional keyword arguments are passed to cls.executable_command and tools.popen_nobuffer. Returns: str: Output to stdout from the run command. Raises: RuntimeError: If the language is not installed. RuntimeError: If there is an error when running the command. # Strip file if first argument is a file # Write wrapper # TODO: Will this work if there is a function defined in the # script? # Put quotes around arguments since they would be strings when # passed from the command line # Call base, catching error to remove temp wrapper # pragma: debug # Add environment variables # Create matlab process using Matlab engine Determine the version of this language. Returns: str: Version of compiler/interpreter for this language. Compose a command for running a program in this language with the provied arguments. If not already present, the interpreter command and interpreter flags are prepended to the provided arguments. Args: args (list): The program that returned command should run and any arguments that should be provided to it. **kwargs: Additional keyword arguments are ignored. Returns: list: Arguments composing the command required to run the program from the command line using the interpreter for this language. # if kwargs.get('exec_type', 'interpreter') == 'interpreter': # args = ["\"%s\"" % (' '.join(args))] Add configuration options for this language. This includes locating any required external libraries and setting option defaults. Args: cfg (YggConfigParser): Config class that options should be set for. Returns: list: Section, option, description tuples for options that could not be set. # pragma: no matlab # pragma: matlab # pragma: matlab Determine the root directory where Matlab is installed and the version that is installed (if Matlab is installed at all). This will fail if Matlab is not installed, cannot be started, or does not operate as expected. Returns: tuple: Matlab root directory and Matlab version string. Raises: RuntimeError: If Matlab cannot be started or the root directory or release cannot be determiend. # pragma: debug # pragma: debug Start matlab session and connect to it. # Connect to matlab if a session exists # Start if not running or connect failed # Add things to Matlab environment Actions to perform before the run loop. # pragma: debug Loop to check if model is still running and forward output. Actions to perform after run_loop has finished. Mainly checking if there was an error and then handling it. Close the Matlab session and engine. # pragma: debug Check to make sure the program dosn't contain any exits as exits will shut down the Matlab engine as well as the program. Raises: RuntimeError: If there are any exit calls in the file. Get environment variables that should be set for the model process. Returns: dict: Environment variables for the model process. # TODO: Move the following to InterpretedModelDriver once another # language sets path_env_variable Operations performed on comm at exit including draining receive. Args: comm (CommBase): Communication object. Method for decoding format strings created in this language. Args: format_str (str): Encoded format string. Returns: str: Decoded format string. Concatenate a set of output variables such that it can be passed as a single string to the function_call parameter. Args: vars_list (list): List of variable names to concatenate as output from a function call. Returns: str: Concatentated variables list. | 1.933453 | 2 |
analysis/migrations/0032_auto_20210409_1333.py | SACGF/variantgrid | 5 | 9511 | <gh_stars>1-10
# Generated by Django 3.1.3 on 2021-04-09 04:03
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('snpdb', '0030_one_off_fix_cohort_sample_order'),
('analysis', '0031_auto_20210331_1826'),
]
operations = [
migrations.AddField(
model_name='varianttag',
name='genome_build',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='snpdb.genomebuild'),
),
migrations.AddField(
model_name='varianttag',
name='location',
field=models.CharField(choices=[('A', 'Analysis'), ('E', 'External Import'), ('G', 'Gene Page'), ('V', 'Variant Details')], default='A', max_length=1),
),
migrations.AlterField(
model_name='varianttag',
name='analysis',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='analysis.analysis'),
),
]
| # Generated by Django 3.1.3 on 2021-04-09 04:03
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('snpdb', '0030_one_off_fix_cohort_sample_order'),
('analysis', '0031_auto_20210331_1826'),
]
operations = [
migrations.AddField(
model_name='varianttag',
name='genome_build',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='snpdb.genomebuild'),
),
migrations.AddField(
model_name='varianttag',
name='location',
field=models.CharField(choices=[('A', 'Analysis'), ('E', 'External Import'), ('G', 'Gene Page'), ('V', 'Variant Details')], default='A', max_length=1),
),
migrations.AlterField(
model_name='varianttag',
name='analysis',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='analysis.analysis'),
),
] | en | 0.778796 | # Generated by Django 3.1.3 on 2021-04-09 04:03 | 1.403652 | 1 |
ted_sws/mapping_suite_processor/services/conceptual_mapping_generate_sparql_queries.py | meaningfy-ws/ted-sws | 1 | 9512 | <filename>ted_sws/mapping_suite_processor/services/conceptual_mapping_generate_sparql_queries.py<gh_stars>1-10
import pathlib
from typing import Iterator
import pandas as pd
from ted_sws.resources.prefixes import PREFIXES_DEFINITIONS
import re
CONCEPTUAL_MAPPINGS_RULES_SHEET_NAME = "Rules"
RULES_SF_FIELD_ID = 'Standard Form Field ID (M)'
RULES_SF_FIELD_NAME = 'Standard Form Field Name (M)'
RULES_E_FORM_BT_ID = 'eForm BT-ID (O)'
RULES_E_FORM_BT_NAME = 'eForm BT Name (O)'
RULES_BASE_XPATH = 'Base XPath (for anchoring) (M)'
RULES_FIELD_XPATH = 'Field XPath (M)'
RULES_CLASS_PATH = 'Class path (M)'
RULES_PROPERTY_PATH = 'Property path (M)'
DEFAULT_RQ_NAME = 'sparql_query_'
SPARQL_PREFIX_PATTERN = re.compile('(?:\\s+|^)(\\w+)?:')
SPARQL_PREFIX_LINE = 'PREFIX {prefix}: <{value}>'
def get_sparql_prefixes(sparql_q: str) -> set:
finds: list = re.findall(SPARQL_PREFIX_PATTERN, sparql_q)
return set(finds)
def sparql_validation_generator(data: pd.DataFrame) -> Iterator[str]:
"""
This function generates SPARQL queries based on data in the dataframe.
:param data:
:return:
"""
for index, row in data.iterrows():
sf_field_id = row[RULES_SF_FIELD_ID]
sf_field_name = row[RULES_SF_FIELD_NAME]
e_form_bt_id = row[RULES_E_FORM_BT_ID]
e_form_bt_name = row[RULES_E_FORM_BT_NAME]
base_xpath = row[RULES_BASE_XPATH]
field_xpath = row[RULES_FIELD_XPATH]
class_path = row[RULES_CLASS_PATH]
property_path = row[RULES_PROPERTY_PATH]
prefixes = [SPARQL_PREFIX_LINE.format(
prefix=prefix, value=PREFIXES_DEFINITIONS.get(prefix)
) for prefix in get_sparql_prefixes(property_path)]
yield f"#title: {sf_field_id} - {sf_field_name}\n" \
f"#description: “{sf_field_id} - {sf_field_name}” in SF corresponds to “{e_form_bt_id} {e_form_bt_name}” in eForms. The corresponding XML element is {base_xpath}{field_xpath}. The expected ontology instances are epo: {class_path} .\n" \
"\n" + "\n".join(prefixes) + "\n\n" \
f"ASK WHERE {{ {property_path} }}"
def mapping_suite_processor_generate_sparql_queries(conceptual_mappings_file_path: pathlib.Path,
output_sparql_queries_folder_path: pathlib.Path,
rq_name: str = DEFAULT_RQ_NAME):
"""
This function reads data from conceptual_mappings.xlsx and generates SPARQL validation queries in provided package.
:param conceptual_mappings_file_path:
:param output_sparql_queries_folder_path:
:param rq_name:
:return:
"""
with open(conceptual_mappings_file_path, 'rb') as excel_file:
conceptual_mappings_rules_df = pd.read_excel(excel_file, sheet_name=CONCEPTUAL_MAPPINGS_RULES_SHEET_NAME)
conceptual_mappings_rules_df.columns = conceptual_mappings_rules_df.iloc[0]
conceptual_mappings_rules_df = conceptual_mappings_rules_df[1:]
conceptual_mappings_rules_df = conceptual_mappings_rules_df[
conceptual_mappings_rules_df[RULES_PROPERTY_PATH].notnull()]
sparql_queries = sparql_validation_generator(conceptual_mappings_rules_df)
output_sparql_queries_folder_path.mkdir(parents=True, exist_ok=True)
for index, sparql_query in enumerate(sparql_queries):
output_file_path = output_sparql_queries_folder_path / f"{rq_name}{index}.rq"
with open(output_file_path, "w") as output_file:
output_file.write(sparql_query)
| <filename>ted_sws/mapping_suite_processor/services/conceptual_mapping_generate_sparql_queries.py<gh_stars>1-10
import pathlib
from typing import Iterator
import pandas as pd
from ted_sws.resources.prefixes import PREFIXES_DEFINITIONS
import re
CONCEPTUAL_MAPPINGS_RULES_SHEET_NAME = "Rules"
RULES_SF_FIELD_ID = 'Standard Form Field ID (M)'
RULES_SF_FIELD_NAME = 'Standard Form Field Name (M)'
RULES_E_FORM_BT_ID = 'eForm BT-ID (O)'
RULES_E_FORM_BT_NAME = 'eForm BT Name (O)'
RULES_BASE_XPATH = 'Base XPath (for anchoring) (M)'
RULES_FIELD_XPATH = 'Field XPath (M)'
RULES_CLASS_PATH = 'Class path (M)'
RULES_PROPERTY_PATH = 'Property path (M)'
DEFAULT_RQ_NAME = 'sparql_query_'
SPARQL_PREFIX_PATTERN = re.compile('(?:\\s+|^)(\\w+)?:')
SPARQL_PREFIX_LINE = 'PREFIX {prefix}: <{value}>'
def get_sparql_prefixes(sparql_q: str) -> set:
finds: list = re.findall(SPARQL_PREFIX_PATTERN, sparql_q)
return set(finds)
def sparql_validation_generator(data: pd.DataFrame) -> Iterator[str]:
"""
This function generates SPARQL queries based on data in the dataframe.
:param data:
:return:
"""
for index, row in data.iterrows():
sf_field_id = row[RULES_SF_FIELD_ID]
sf_field_name = row[RULES_SF_FIELD_NAME]
e_form_bt_id = row[RULES_E_FORM_BT_ID]
e_form_bt_name = row[RULES_E_FORM_BT_NAME]
base_xpath = row[RULES_BASE_XPATH]
field_xpath = row[RULES_FIELD_XPATH]
class_path = row[RULES_CLASS_PATH]
property_path = row[RULES_PROPERTY_PATH]
prefixes = [SPARQL_PREFIX_LINE.format(
prefix=prefix, value=PREFIXES_DEFINITIONS.get(prefix)
) for prefix in get_sparql_prefixes(property_path)]
yield f"#title: {sf_field_id} - {sf_field_name}\n" \
f"#description: “{sf_field_id} - {sf_field_name}” in SF corresponds to “{e_form_bt_id} {e_form_bt_name}” in eForms. The corresponding XML element is {base_xpath}{field_xpath}. The expected ontology instances are epo: {class_path} .\n" \
"\n" + "\n".join(prefixes) + "\n\n" \
f"ASK WHERE {{ {property_path} }}"
def mapping_suite_processor_generate_sparql_queries(conceptual_mappings_file_path: pathlib.Path,
output_sparql_queries_folder_path: pathlib.Path,
rq_name: str = DEFAULT_RQ_NAME):
"""
This function reads data from conceptual_mappings.xlsx and generates SPARQL validation queries in provided package.
:param conceptual_mappings_file_path:
:param output_sparql_queries_folder_path:
:param rq_name:
:return:
"""
with open(conceptual_mappings_file_path, 'rb') as excel_file:
conceptual_mappings_rules_df = pd.read_excel(excel_file, sheet_name=CONCEPTUAL_MAPPINGS_RULES_SHEET_NAME)
conceptual_mappings_rules_df.columns = conceptual_mappings_rules_df.iloc[0]
conceptual_mappings_rules_df = conceptual_mappings_rules_df[1:]
conceptual_mappings_rules_df = conceptual_mappings_rules_df[
conceptual_mappings_rules_df[RULES_PROPERTY_PATH].notnull()]
sparql_queries = sparql_validation_generator(conceptual_mappings_rules_df)
output_sparql_queries_folder_path.mkdir(parents=True, exist_ok=True)
for index, sparql_query in enumerate(sparql_queries):
output_file_path = output_sparql_queries_folder_path / f"{rq_name}{index}.rq"
with open(output_file_path, "w") as output_file:
output_file.write(sparql_query)
| en | 0.66582 | This function generates SPARQL queries based on data in the dataframe. :param data: :return: This function reads data from conceptual_mappings.xlsx and generates SPARQL validation queries in provided package. :param conceptual_mappings_file_path: :param output_sparql_queries_folder_path: :param rq_name: :return: | 2.769491 | 3 |
src/__init__.py | codeKgu/BiLevel-Graph-Neural-Network | 20 | 9513 | import sys
from os.path import dirname, abspath, join
cur_folder = dirname(abspath(__file__))
sys.path.insert(0, join(dirname(cur_folder), 'src'))
sys.path.insert(0, dirname(cur_folder))
print(cur_folder) | import sys
from os.path import dirname, abspath, join
cur_folder = dirname(abspath(__file__))
sys.path.insert(0, join(dirname(cur_folder), 'src'))
sys.path.insert(0, dirname(cur_folder))
print(cur_folder) | none | 1 | 2.526628 | 3 |
|
src/controllers/serie.py | igormotta92/gta-desafio-python-flask-api | 0 | 9514 | <filename>src/controllers/serie.py<gh_stars>0
# https://stackoverflow.com/questions/3300464/how-can-i-get-dict-from-sqlite-query
# from flask import Flask
from flask_restful import Resource, reqparse
from src.model.serie import SerieModel
from src.server.instance import server
from db import db
# books_db = [{"id": 0, "title": "War and Peace"}, {"id": 1, "title": "Clean Code"}]
api = server.api
class SeriesController(Resource):
@classmethod
def routes(self):
api.add_resource(Series, "/series/<int:id>")
api.add_resource(SeriesList, "/series")
class Series(Resource):
def get(self, id):
SerieModel.setConnectDataBase(db)
serie = SerieModel.find_by_id(id)
if not serie:
return {serie}, 204
return serie
def put(self, id):
SerieModel.setConnectDataBase(db)
serie = SerieModel.find_by_id_build(id)
if not serie:
return None, 204
# __columns__ = ("title" str, "resume" str, "genre" str, "rating" int, "season" int)
parser = reqparse.RequestParser()
parser.add_argument(
"title", type=str, required=True, help="Title cannot be blank"
)
parser.add_argument(
"resume", type=str, required=True, help="Resume cannot be blank"
)
parser.add_argument(
"rating",
type=int,
choices=range(1, 6),
required=True,
help="rating cannot be blank or range invalided",
)
parser.add_argument(
"genre", type=str, required=True, help="Genre cannot be blank"
)
parser.add_argument(
"season", type=int, required=True, help="Season cannot be blank"
)
data = parser.parse_args()
# update
serie.title = data.title
serie.resume = data.resume
serie.genre = data.genre
serie.rating = data.rating
serie.season = data.season
try:
serie.update()
except Exception as error:
return {"Error": str(error)}, 400
return None, 200, {"Location": f"http://127.0.0.1:5000/series/{id}"}
def delete(self, id):
SerieModel.setConnectDataBase(db)
serie = SerieModel.find_by_id_build(id)
if not serie:
return {}, 204
serie.delete()
return serie.to_dict(), 200
class SeriesList(Resource):
def get(self):
SerieModel.setConnectDataBase(db)
try:
series = SerieModel.find_all()
except Exception as error:
return {"Error": str(error)}, 400
return series
def post(self):
SerieModel.setConnectDataBase(db)
###
# __columns__ = ("title" str, "resume" str, "genre" str, "rating" int, "season" int)
# request
parser = reqparse.RequestParser()
parser.add_argument(
"title", type=str, required=True, help="Title cannot be blank"
)
parser.add_argument(
"resume", type=str, required=True, help="Resume cannot be blank"
)
parser.add_argument(
"genre", type=str, required=True, help="Genre cannot be blank"
)
parser.add_argument(
"rating",
type=int,
required=True,
choices=range(1, 6),
help="rating cannot be blank or range invalided",
)
parser.add_argument(
"season", type=str, required=True, help="Season cannot be blank"
)
data = parser.parse_args()
###
serie = SerieModel().build(
data.title, data.resume, data.genre, data.rating, data.season
)
try:
lastid = serie.insert().lastrowid
except Exception as error:
return {"Error": str(error)}, 400
return None, 201, {"Location": f"http://127.0.0.1:5000/series/{lastid}"}
| <filename>src/controllers/serie.py<gh_stars>0
# https://stackoverflow.com/questions/3300464/how-can-i-get-dict-from-sqlite-query
# from flask import Flask
from flask_restful import Resource, reqparse
from src.model.serie import SerieModel
from src.server.instance import server
from db import db
# books_db = [{"id": 0, "title": "War and Peace"}, {"id": 1, "title": "Clean Code"}]
api = server.api
class SeriesController(Resource):
@classmethod
def routes(self):
api.add_resource(Series, "/series/<int:id>")
api.add_resource(SeriesList, "/series")
class Series(Resource):
def get(self, id):
SerieModel.setConnectDataBase(db)
serie = SerieModel.find_by_id(id)
if not serie:
return {serie}, 204
return serie
def put(self, id):
SerieModel.setConnectDataBase(db)
serie = SerieModel.find_by_id_build(id)
if not serie:
return None, 204
# __columns__ = ("title" str, "resume" str, "genre" str, "rating" int, "season" int)
parser = reqparse.RequestParser()
parser.add_argument(
"title", type=str, required=True, help="Title cannot be blank"
)
parser.add_argument(
"resume", type=str, required=True, help="Resume cannot be blank"
)
parser.add_argument(
"rating",
type=int,
choices=range(1, 6),
required=True,
help="rating cannot be blank or range invalided",
)
parser.add_argument(
"genre", type=str, required=True, help="Genre cannot be blank"
)
parser.add_argument(
"season", type=int, required=True, help="Season cannot be blank"
)
data = parser.parse_args()
# update
serie.title = data.title
serie.resume = data.resume
serie.genre = data.genre
serie.rating = data.rating
serie.season = data.season
try:
serie.update()
except Exception as error:
return {"Error": str(error)}, 400
return None, 200, {"Location": f"http://127.0.0.1:5000/series/{id}"}
def delete(self, id):
SerieModel.setConnectDataBase(db)
serie = SerieModel.find_by_id_build(id)
if not serie:
return {}, 204
serie.delete()
return serie.to_dict(), 200
class SeriesList(Resource):
def get(self):
SerieModel.setConnectDataBase(db)
try:
series = SerieModel.find_all()
except Exception as error:
return {"Error": str(error)}, 400
return series
def post(self):
SerieModel.setConnectDataBase(db)
###
# __columns__ = ("title" str, "resume" str, "genre" str, "rating" int, "season" int)
# request
parser = reqparse.RequestParser()
parser.add_argument(
"title", type=str, required=True, help="Title cannot be blank"
)
parser.add_argument(
"resume", type=str, required=True, help="Resume cannot be blank"
)
parser.add_argument(
"genre", type=str, required=True, help="Genre cannot be blank"
)
parser.add_argument(
"rating",
type=int,
required=True,
choices=range(1, 6),
help="rating cannot be blank or range invalided",
)
parser.add_argument(
"season", type=str, required=True, help="Season cannot be blank"
)
data = parser.parse_args()
###
serie = SerieModel().build(
data.title, data.resume, data.genre, data.rating, data.season
)
try:
lastid = serie.insert().lastrowid
except Exception as error:
return {"Error": str(error)}, 400
return None, 201, {"Location": f"http://127.0.0.1:5000/series/{lastid}"}
| en | 0.639713 | # https://stackoverflow.com/questions/3300464/how-can-i-get-dict-from-sqlite-query # from flask import Flask # books_db = [{"id": 0, "title": "War and Peace"}, {"id": 1, "title": "Clean Code"}] # __columns__ = ("title" str, "resume" str, "genre" str, "rating" int, "season" int) # update ### # __columns__ = ("title" str, "resume" str, "genre" str, "rating" int, "season" int) # request ### | 3.067464 | 3 |
tests/test_random.py | hirnimeshrampuresoftware/python-tcod | 231 | 9515 | <filename>tests/test_random.py<gh_stars>100-1000
import copy
import pickle
import tcod
def test_tcod_random() -> None:
rand = tcod.random.Random(tcod.random.COMPLEMENTARY_MULTIPLY_WITH_CARRY)
assert 0 <= rand.randint(0, 100) <= 100
assert 0 <= rand.uniform(0, 100) <= 100
rand.guass(0, 1)
rand.inverse_guass(0, 1)
def test_tcod_random_copy() -> None:
rand = tcod.random.Random(tcod.random.MERSENNE_TWISTER)
rand2 = copy.copy(rand)
assert rand.uniform(0, 1) == rand2.uniform(0, 1)
assert rand.uniform(0, 1) == rand2.uniform(0, 1)
assert rand.uniform(0, 1) == rand2.uniform(0, 1)
def test_tcod_random_pickle() -> None:
rand = tcod.random.Random(tcod.random.MERSENNE_TWISTER)
rand2 = pickle.loads(pickle.dumps(rand))
assert rand.uniform(0, 1) == rand2.uniform(0, 1)
assert rand.uniform(0, 1) == rand2.uniform(0, 1)
assert rand.uniform(0, 1) == rand2.uniform(0, 1)
| <filename>tests/test_random.py<gh_stars>100-1000
import copy
import pickle
import tcod
def test_tcod_random() -> None:
rand = tcod.random.Random(tcod.random.COMPLEMENTARY_MULTIPLY_WITH_CARRY)
assert 0 <= rand.randint(0, 100) <= 100
assert 0 <= rand.uniform(0, 100) <= 100
rand.guass(0, 1)
rand.inverse_guass(0, 1)
def test_tcod_random_copy() -> None:
rand = tcod.random.Random(tcod.random.MERSENNE_TWISTER)
rand2 = copy.copy(rand)
assert rand.uniform(0, 1) == rand2.uniform(0, 1)
assert rand.uniform(0, 1) == rand2.uniform(0, 1)
assert rand.uniform(0, 1) == rand2.uniform(0, 1)
def test_tcod_random_pickle() -> None:
rand = tcod.random.Random(tcod.random.MERSENNE_TWISTER)
rand2 = pickle.loads(pickle.dumps(rand))
assert rand.uniform(0, 1) == rand2.uniform(0, 1)
assert rand.uniform(0, 1) == rand2.uniform(0, 1)
assert rand.uniform(0, 1) == rand2.uniform(0, 1)
| none | 1 | 2.554756 | 3 |
|
src/Products/Five/viewlet/viewlet.py | rbanffy/Zope | 289 | 9516 | ##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Viewlet.
"""
import os
import zope.viewlet.viewlet
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
class ViewletBase(zope.viewlet.viewlet.ViewletBase):
pass
class SimpleAttributeViewlet(zope.viewlet.viewlet.SimpleAttributeViewlet):
pass
class simple(zope.viewlet.viewlet.simple):
# We need to ensure that the proper __init__ is called.
__init__ = ViewletBase.__init__
def SimpleViewletClass(template, bases=(), attributes=None, name=''):
"""A function that can be used to generate a viewlet from a set of
information.
"""
# Create the base class hierarchy
bases += (simple, ViewletBase)
attrs = {'index': ViewPageTemplateFile(template),
'__name__': name}
if attributes:
attrs.update(attributes)
# Generate a derived view class.
class_ = type("SimpleViewletClass from %s" % template, bases, attrs)
return class_
class ResourceViewletBase(zope.viewlet.viewlet.ResourceViewletBase):
pass
def JavaScriptViewlet(path):
"""Create a viewlet that can simply insert a javascript link."""
src = os.path.join(os.path.dirname(__file__), 'javascript_viewlet.pt')
klass = type('JavaScriptViewlet',
(ResourceViewletBase, ViewletBase),
{'index': ViewPageTemplateFile(src), '_path': path})
return klass
class CSSResourceViewletBase(zope.viewlet.viewlet.CSSResourceViewletBase):
pass
def CSSViewlet(path, media="all", rel="stylesheet"):
"""Create a viewlet that can simply insert a javascript link."""
src = os.path.join(os.path.dirname(__file__), 'css_viewlet.pt')
klass = type('CSSViewlet',
(CSSResourceViewletBase, ViewletBase),
{'index': ViewPageTemplateFile(src),
'_path': path,
'_media': media,
'_rel': rel})
return klass
| ##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Viewlet.
"""
import os
import zope.viewlet.viewlet
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
class ViewletBase(zope.viewlet.viewlet.ViewletBase):
pass
class SimpleAttributeViewlet(zope.viewlet.viewlet.SimpleAttributeViewlet):
pass
class simple(zope.viewlet.viewlet.simple):
# We need to ensure that the proper __init__ is called.
__init__ = ViewletBase.__init__
def SimpleViewletClass(template, bases=(), attributes=None, name=''):
"""A function that can be used to generate a viewlet from a set of
information.
"""
# Create the base class hierarchy
bases += (simple, ViewletBase)
attrs = {'index': ViewPageTemplateFile(template),
'__name__': name}
if attributes:
attrs.update(attributes)
# Generate a derived view class.
class_ = type("SimpleViewletClass from %s" % template, bases, attrs)
return class_
class ResourceViewletBase(zope.viewlet.viewlet.ResourceViewletBase):
pass
def JavaScriptViewlet(path):
"""Create a viewlet that can simply insert a javascript link."""
src = os.path.join(os.path.dirname(__file__), 'javascript_viewlet.pt')
klass = type('JavaScriptViewlet',
(ResourceViewletBase, ViewletBase),
{'index': ViewPageTemplateFile(src), '_path': path})
return klass
class CSSResourceViewletBase(zope.viewlet.viewlet.CSSResourceViewletBase):
pass
def CSSViewlet(path, media="all", rel="stylesheet"):
"""Create a viewlet that can simply insert a javascript link."""
src = os.path.join(os.path.dirname(__file__), 'css_viewlet.pt')
klass = type('CSSViewlet',
(CSSResourceViewletBase, ViewletBase),
{'index': ViewPageTemplateFile(src),
'_path': path,
'_media': media,
'_rel': rel})
return klass
| en | 0.537869 | ############################################################################## # # Copyright (c) 2006 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## Viewlet. # We need to ensure that the proper __init__ is called. A function that can be used to generate a viewlet from a set of information. # Create the base class hierarchy # Generate a derived view class. Create a viewlet that can simply insert a javascript link. Create a viewlet that can simply insert a javascript link. | 2.041018 | 2 |
problema21.py | bptfreitas/Project-Euler | 0 | 9517 | <gh_stars>0
#Let d(n) be defined as the sum of proper divisors of n (numbers less than n which divide evenly into n).
#If d(a) = b and d(b) = a, where a b, then a and b are an amicable pair and each of a and b are called amicable numbers.
#For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71 and 142; so d(284) = 220.
#Evaluate the sum of all the amicable numbers under 10000.
import euler
def d(n):
return sum(euler.get_divisors(n))
print euler.get_divisors(284)
print sum(euler.get_divisors(284))
limit=10000
perc=5
step=perc*limit/100
cp=0
a=1
amics=[]
print "Starting..."
for a in range(1,limit+1):
b=d(a)
if a==d(b) and a!=b:
print "Pair:" + str(a) + " and " + str(b)
if (a not in amics):
amics.append(a)
if (b not in amics):
amics.append(b)
print "Sum of amicables:"
print sum(amics)
| #Let d(n) be defined as the sum of proper divisors of n (numbers less than n which divide evenly into n).
#If d(a) = b and d(b) = a, where a b, then a and b are an amicable pair and each of a and b are called amicable numbers.
#For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71 and 142; so d(284) = 220.
#Evaluate the sum of all the amicable numbers under 10000.
import euler
def d(n):
return sum(euler.get_divisors(n))
print euler.get_divisors(284)
print sum(euler.get_divisors(284))
limit=10000
perc=5
step=perc*limit/100
cp=0
a=1
amics=[]
print "Starting..."
for a in range(1,limit+1):
b=d(a)
if a==d(b) and a!=b:
print "Pair:" + str(a) + " and " + str(b)
if (a not in amics):
amics.append(a)
if (b not in amics):
amics.append(b)
print "Sum of amicables:"
print sum(amics) | en | 0.895524 | #Let d(n) be defined as the sum of proper divisors of n (numbers less than n which divide evenly into n). #If d(a) = b and d(b) = a, where a b, then a and b are an amicable pair and each of a and b are called amicable numbers. #For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71 and 142; so d(284) = 220. #Evaluate the sum of all the amicable numbers under 10000. | 3.662506 | 4 |
python-3.6.0/Doc/includes/email-unpack.py | emacslisp/python | 854 | 9518 | #!/usr/bin/env python3
"""Unpack a MIME message into a directory of files."""
import os
import email
import mimetypes
from email.policy import default
from argparse import ArgumentParser
def main():
parser = ArgumentParser(description="""\
Unpack a MIME message into a directory of files.
""")
parser.add_argument('-d', '--directory', required=True,
help="""Unpack the MIME message into the named
directory, which will be created if it doesn't already
exist.""")
parser.add_argument('msgfile')
args = parser.parse_args()
with open(args.msgfile, 'rb') as fp:
msg = email.message_from_binary_file(fp, policy=default)
try:
os.mkdir(args.directory)
except FileExistsError:
pass
counter = 1
for part in msg.walk():
# multipart/* are just containers
if part.get_content_maintype() == 'multipart':
continue
# Applications should really sanitize the given filename so that an
# email message can't be used to overwrite important files
filename = part.get_filename()
if not filename:
ext = mimetypes.guess_extension(part.get_content_type())
if not ext:
# Use a generic bag-of-bits extension
ext = '.bin'
filename = 'part-%03d%s' % (counter, ext)
counter += 1
with open(os.path.join(args.directory, filename), 'wb') as fp:
fp.write(part.get_payload(decode=True))
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
"""Unpack a MIME message into a directory of files."""
import os
import email
import mimetypes
from email.policy import default
from argparse import ArgumentParser
def main():
parser = ArgumentParser(description="""\
Unpack a MIME message into a directory of files.
""")
parser.add_argument('-d', '--directory', required=True,
help="""Unpack the MIME message into the named
directory, which will be created if it doesn't already
exist.""")
parser.add_argument('msgfile')
args = parser.parse_args()
with open(args.msgfile, 'rb') as fp:
msg = email.message_from_binary_file(fp, policy=default)
try:
os.mkdir(args.directory)
except FileExistsError:
pass
counter = 1
for part in msg.walk():
# multipart/* are just containers
if part.get_content_maintype() == 'multipart':
continue
# Applications should really sanitize the given filename so that an
# email message can't be used to overwrite important files
filename = part.get_filename()
if not filename:
ext = mimetypes.guess_extension(part.get_content_type())
if not ext:
# Use a generic bag-of-bits extension
ext = '.bin'
filename = 'part-%03d%s' % (counter, ext)
counter += 1
with open(os.path.join(args.directory, filename), 'wb') as fp:
fp.write(part.get_payload(decode=True))
if __name__ == '__main__':
main()
| en | 0.818776 | #!/usr/bin/env python3 Unpack a MIME message into a directory of files. \ Unpack a MIME message into a directory of files. Unpack the MIME message into the named directory, which will be created if it doesn't already exist. # multipart/* are just containers # Applications should really sanitize the given filename so that an # email message can't be used to overwrite important files # Use a generic bag-of-bits extension | 3.19143 | 3 |
src/streetview/logging_facility.py | juliantrue/Streetview-Segmenting | 1 | 9519 | import sys, os
import logging
import datetime
module_name = 'Streetview_Module'
debug_mode = True
class LoggingWrapper(object):
def __init__(self, log_folder_path=None):
self.debug_mode = debug_mode
# Create logger with module name
logger = logging.getLogger(module_name)
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
now = datetime.datetime.now()
log_file = '{}{}{}{}{}{}.log'.format(now.year, now.month, now.day,
now.hour, now.minute,
now.second)
# If no folder provided, output to stderr
if log_folder_path == None:
fh = logging.StreamHandler(sys.stderr)
else:
log_file = os.path.join(log_folder_path, log_file)
fh = logging.FileHandler(log_file)
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
| import sys, os
import logging
import datetime
module_name = 'Streetview_Module'
debug_mode = True
class LoggingWrapper(object):
def __init__(self, log_folder_path=None):
self.debug_mode = debug_mode
# Create logger with module name
logger = logging.getLogger(module_name)
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
now = datetime.datetime.now()
log_file = '{}{}{}{}{}{}.log'.format(now.year, now.month, now.day,
now.hour, now.minute,
now.second)
# If no folder provided, output to stderr
if log_folder_path == None:
fh = logging.StreamHandler(sys.stderr)
else:
log_file = os.path.join(log_folder_path, log_file)
fh = logging.FileHandler(log_file)
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
| en | 0.619471 | # Create logger with module name # create file handler which logs even debug messages # If no folder provided, output to stderr # create console handler with a higher log level # create formatter and add it to the handlers # add the handlers to the logger | 2.766502 | 3 |
tkinter_examples/draw_chess_board.py | DazEB2/SimplePyScripts | 117 | 9520 | <filename>tkinter_examples/draw_chess_board.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from tkinter import *
root = Tk()
root.title('Chess board')
canvas = Canvas(root, width=700, height=700, bg='#fff')
canvas.pack()
fill = '#fff'
outline = '#000'
size = 88
for i in range(8):
for j in range(8):
x1, y1, x2, y2 = i * size, j * size, i * size + size, j * size + size
canvas.create_rectangle(x1, y1, x2, y2, fill=fill, outline=outline)
fill, outline = outline, fill
fill, outline = outline, fill
root.mainloop()
| <filename>tkinter_examples/draw_chess_board.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from tkinter import *
root = Tk()
root.title('Chess board')
canvas = Canvas(root, width=700, height=700, bg='#fff')
canvas.pack()
fill = '#fff'
outline = '#000'
size = 88
for i in range(8):
for j in range(8):
x1, y1, x2, y2 = i * size, j * size, i * size + size, j * size + size
canvas.create_rectangle(x1, y1, x2, y2, fill=fill, outline=outline)
fill, outline = outline, fill
fill, outline = outline, fill
root.mainloop()
| en | 0.308914 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- | 3.927497 | 4 |
sandbox_api/asandbox.py | PremierLangage/sandbox-api | 4 | 9521 | # asandbox.py
#
# Authors:
# - <NAME> <<EMAIL>>
"""An asynchronous implementation of the Sandbox API."""
import io
import json
import os
from contextlib import AbstractAsyncContextManager
from typing import BinaryIO, Optional, Union
import aiohttp
from .exceptions import status_exceptions
from .utils import ENDPOINTS
class ASandbox(AbstractAsyncContextManager):
"""Interface a Sandbox server asynchronously."""
def __init__(self, url: str, total: Optional[float] = 60, connect: Optional[float] = None,
sock_connect: Optional[float] = None, sock_read: Optional[float] = None):
"""Initialize a sandbox with the given URL.
Default timeout for the whole operation is one minute, use the following
argument to override :
* total : The whole operation time including connection
establishment, request sending and response reading.
* connect : The time consists connection establishment for a new
connection or waiting for a free connection from a pool if
pool connection limits are exceeded.
* sock_connect : A timeout for connecting to a peer for a new
connection, not given from a pool.
* sock_read : The maximum allowed timeout for period between reading
a new data portion from a peer.
"""
self.url = url
self.session = aiohttp.ClientSession(
timeout=aiohttp.ClientTimeout(total, connect, sock_connect, sock_read)
)
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
async def close(self):
"""Close the aiohttp ClientSession."""
await self.session.close()
async def _build_url(self, endpoint: str, *args: str):
"""Build the url corresponding to <endpoint> with the given <args>."""
return os.path.join(self.url, ENDPOINTS[endpoint] % tuple(args))
async def libraries(self) -> dict:
"""Asynchronously retrieve libraries installed in the containers of the
sandbox."""
async with self.session.get(await self._build_url("libraries")) as response:
if response.status != 200:
raise status_exceptions(response)
return await response.json()
async def specifications(self) -> dict:
"""Asynchronously retrieve specifications of the sandbox."""
async with self.session.get(await self._build_url("specifications")) as response:
if response.status != 200:
raise status_exceptions(response)
return await response.json()
async def usage(self) -> dict:
"""Asynchronously retrieve current usage stats of the sandbox."""
async with self.session.get(await self._build_url("usages")) as response:
if response.status != 200:
raise status_exceptions(response)
return await response.json()
async def download(self, uuid: str, path: str = None) -> BinaryIO:
"""Asynchronously download an environment or a specific file inside an
environment."""
if path is None:
url = await self._build_url("environments", uuid)
else:
url = await self._build_url("files", uuid, path)
async with self.session.get(url) as response:
if response.status != 200:
raise status_exceptions(response)
return io.BytesIO(await response.read())
async def check(self, uuid: str, path: str = None) -> int:
"""Asynchronously check if an environment or a specific file inside an
environment exists."""
if path is None:
url = await self._build_url("environments", uuid)
else:
url = await self._build_url("files", uuid, path)
async with self.session.head(url) as response:
if response.status not in [200, 404]: # pragma: no cover
raise status_exceptions(response)
return 0 if response.status == 404 else response.headers["Content-Length"]
async def execute(self, config: Union[dict], environ: Optional[BinaryIO] = None) -> dict:
"""Asynchronously execute commands on the sandbox according to <config>
and <environ>, returning the response's json as a dict.
<environ>, if not None, will be consumed and closed and shall not be
used further."""
data = aiohttp.FormData()
data.add_field("config", json.dumps(config))
if environ is not None:
data.add_field("environment", environ)
async with self.session.post(await self._build_url("execute"), data=data) as response:
if response.status != 200:
raise status_exceptions(response)
return await response.json()
async def load(self, environ: dict) -> dict:
"""Asynchronously execute commands on the sandbox according to <config>
and <environ>, returning the response's json as a dict.
<environ>, if not None, will be consumed and closed and shall not be
used further."""
data = aiohttp.FormData()
data.add_field("data", json.dumps(environ))
async with self.session.post(await self._build_url("load/fr"), data=data) as response:
if response.status != 200:
raise status_exceptions(response)
return await response.json()
async def demo(self, environ: dict) -> dict:
"""Asynchronously execute commands on the sandbox according to <config>
and <environ>, returning the response's json as a dict.
<environ>, if not None, will be consumed and closed and shall not be
used further."""
data = aiohttp.FormData()
data.add_field("data", json.dumps(environ))
data.add_field("demo", True)
async with self.session.post(await self._build_url("demo"), data=data) as response:
if response.status != 200:
raise status_exceptions(response)
return await response.json()
async def playexo(self, config: dict, environ: dict) -> dict:
"""Asynchronously execute commands on the sandbox according to <config>
and <environ>, returning the response's json as a dict.
<environ>, if not None, will be consumed and closed and shall not be
used further."""
data = aiohttp.FormData()
data.add_field("data", json.dumps(environ))
data.add_field("config", json.dumps(config))
async with self.session.post(await self._build_url("exo"), data=data) as response:
if response.status != 200:
raise status_exceptions(response)
return await response.json()
async def exec(self, datas: dict = {}) -> dict:
"""Asynchronously execute commands on the sandbox according to <config>
and <environ>, returning the response's json as a dict.
<environ>, if not None, will be consumed and closed and shall not be
used further."""
data = aiohttp.FormData()
data.add_field("data", json.dumps(datas))
for key, value in datas.items():
data.add_field(str(key), value)
async with self.session.post(await self._build_url("exec"), data=data) as response:
if response.status != 200:
raise status_exceptions(response)
return await response.json()
| # asandbox.py
#
# Authors:
# - <NAME> <<EMAIL>>
"""An asynchronous implementation of the Sandbox API."""
import io
import json
import os
from contextlib import AbstractAsyncContextManager
from typing import BinaryIO, Optional, Union
import aiohttp
from .exceptions import status_exceptions
from .utils import ENDPOINTS
class ASandbox(AbstractAsyncContextManager):
"""Interface a Sandbox server asynchronously."""
def __init__(self, url: str, total: Optional[float] = 60, connect: Optional[float] = None,
sock_connect: Optional[float] = None, sock_read: Optional[float] = None):
"""Initialize a sandbox with the given URL.
Default timeout for the whole operation is one minute, use the following
argument to override :
* total : The whole operation time including connection
establishment, request sending and response reading.
* connect : The time consists connection establishment for a new
connection or waiting for a free connection from a pool if
pool connection limits are exceeded.
* sock_connect : A timeout for connecting to a peer for a new
connection, not given from a pool.
* sock_read : The maximum allowed timeout for period between reading
a new data portion from a peer.
"""
self.url = url
self.session = aiohttp.ClientSession(
timeout=aiohttp.ClientTimeout(total, connect, sock_connect, sock_read)
)
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
async def close(self):
"""Close the aiohttp ClientSession."""
await self.session.close()
async def _build_url(self, endpoint: str, *args: str):
"""Build the url corresponding to <endpoint> with the given <args>."""
return os.path.join(self.url, ENDPOINTS[endpoint] % tuple(args))
async def libraries(self) -> dict:
"""Asynchronously retrieve libraries installed in the containers of the
sandbox."""
async with self.session.get(await self._build_url("libraries")) as response:
if response.status != 200:
raise status_exceptions(response)
return await response.json()
async def specifications(self) -> dict:
"""Asynchronously retrieve specifications of the sandbox."""
async with self.session.get(await self._build_url("specifications")) as response:
if response.status != 200:
raise status_exceptions(response)
return await response.json()
async def usage(self) -> dict:
"""Asynchronously retrieve current usage stats of the sandbox."""
async with self.session.get(await self._build_url("usages")) as response:
if response.status != 200:
raise status_exceptions(response)
return await response.json()
async def download(self, uuid: str, path: str = None) -> BinaryIO:
"""Asynchronously download an environment or a specific file inside an
environment."""
if path is None:
url = await self._build_url("environments", uuid)
else:
url = await self._build_url("files", uuid, path)
async with self.session.get(url) as response:
if response.status != 200:
raise status_exceptions(response)
return io.BytesIO(await response.read())
async def check(self, uuid: str, path: str = None) -> int:
"""Asynchronously check if an environment or a specific file inside an
environment exists."""
if path is None:
url = await self._build_url("environments", uuid)
else:
url = await self._build_url("files", uuid, path)
async with self.session.head(url) as response:
if response.status not in [200, 404]: # pragma: no cover
raise status_exceptions(response)
return 0 if response.status == 404 else response.headers["Content-Length"]
async def execute(self, config: Union[dict], environ: Optional[BinaryIO] = None) -> dict:
"""Asynchronously execute commands on the sandbox according to <config>
and <environ>, returning the response's json as a dict.
<environ>, if not None, will be consumed and closed and shall not be
used further."""
data = aiohttp.FormData()
data.add_field("config", json.dumps(config))
if environ is not None:
data.add_field("environment", environ)
async with self.session.post(await self._build_url("execute"), data=data) as response:
if response.status != 200:
raise status_exceptions(response)
return await response.json()
async def load(self, environ: dict) -> dict:
"""Asynchronously execute commands on the sandbox according to <config>
and <environ>, returning the response's json as a dict.
<environ>, if not None, will be consumed and closed and shall not be
used further."""
data = aiohttp.FormData()
data.add_field("data", json.dumps(environ))
async with self.session.post(await self._build_url("load/fr"), data=data) as response:
if response.status != 200:
raise status_exceptions(response)
return await response.json()
async def demo(self, environ: dict) -> dict:
"""Asynchronously execute commands on the sandbox according to <config>
and <environ>, returning the response's json as a dict.
<environ>, if not None, will be consumed and closed and shall not be
used further."""
data = aiohttp.FormData()
data.add_field("data", json.dumps(environ))
data.add_field("demo", True)
async with self.session.post(await self._build_url("demo"), data=data) as response:
if response.status != 200:
raise status_exceptions(response)
return await response.json()
async def playexo(self, config: dict, environ: dict) -> dict:
"""Asynchronously execute commands on the sandbox according to <config>
and <environ>, returning the response's json as a dict.
<environ>, if not None, will be consumed and closed and shall not be
used further."""
data = aiohttp.FormData()
data.add_field("data", json.dumps(environ))
data.add_field("config", json.dumps(config))
async with self.session.post(await self._build_url("exo"), data=data) as response:
if response.status != 200:
raise status_exceptions(response)
return await response.json()
async def exec(self, datas: dict = {}) -> dict:
"""Asynchronously execute commands on the sandbox according to <config>
and <environ>, returning the response's json as a dict.
<environ>, if not None, will be consumed and closed and shall not be
used further."""
data = aiohttp.FormData()
data.add_field("data", json.dumps(datas))
for key, value in datas.items():
data.add_field(str(key), value)
async with self.session.post(await self._build_url("exec"), data=data) as response:
if response.status != 200:
raise status_exceptions(response)
return await response.json()
| en | 0.879242 | # asandbox.py # # Authors: # - <NAME> <<EMAIL>> An asynchronous implementation of the Sandbox API. Interface a Sandbox server asynchronously. Initialize a sandbox with the given URL. Default timeout for the whole operation is one minute, use the following argument to override : * total : The whole operation time including connection establishment, request sending and response reading. * connect : The time consists connection establishment for a new connection or waiting for a free connection from a pool if pool connection limits are exceeded. * sock_connect : A timeout for connecting to a peer for a new connection, not given from a pool. * sock_read : The maximum allowed timeout for period between reading a new data portion from a peer. Close the aiohttp ClientSession. Build the url corresponding to <endpoint> with the given <args>. Asynchronously retrieve libraries installed in the containers of the sandbox. Asynchronously retrieve specifications of the sandbox. Asynchronously retrieve current usage stats of the sandbox. Asynchronously download an environment or a specific file inside an environment. Asynchronously check if an environment or a specific file inside an environment exists. # pragma: no cover Asynchronously execute commands on the sandbox according to <config> and <environ>, returning the response's json as a dict. <environ>, if not None, will be consumed and closed and shall not be used further. Asynchronously execute commands on the sandbox according to <config> and <environ>, returning the response's json as a dict. <environ>, if not None, will be consumed and closed and shall not be used further. Asynchronously execute commands on the sandbox according to <config> and <environ>, returning the response's json as a dict. <environ>, if not None, will be consumed and closed and shall not be used further. Asynchronously execute commands on the sandbox according to <config> and <environ>, returning the response's json as a dict. <environ>, if not None, will be consumed and closed and shall not be used further. Asynchronously execute commands on the sandbox according to <config> and <environ>, returning the response's json as a dict. <environ>, if not None, will be consumed and closed and shall not be used further. | 2.849727 | 3 |
api/services/usuarios_services.py | jhonnattan123/fastapi_crud_example | 1 | 9522 | import datetime
from uuid import UUID
from api.actions import storage
from fastapi import HTTPException
from api.models.usuario import Usuario
from starlette.requests import Request
from api.dependencies import validar_email, validar_formato_fecha,validar_edad
FORMATO_FECHA = "%Y-%m-%d"
EDAD_MINIMA = 18
EDAD_MAXIMA = 100
class Usuarios_Services:
""" Sección de servicios para el manejo de la logica de negocio
Attributes:
FORMATO_FECHA (str): Formato de fecha para validar
EDAD_MINIMA (int): Edad minima para validar
EDAD_MAXIMA (int): Edad maxima para validar
"""
def agregar_usuario(self, usuario: Usuario, request: Request) -> dict:
""" Agrega un usuario a la base de datos.
:param usuario: Usuario a agregar
:param request: Request de FastAPI
"""
try:
if not validar_email(getattr(usuario, "email")):
raise HTTPException(
status_code=400,
detail="El email no es válido"
)
fecha_nacimiento = usuario.fecha_nacimiento
if not validar_formato_fecha(fecha_nacimiento, FORMATO_FECHA):
raise HTTPException(
status_code=400,
detail="El formato de la fecha de nacimiento no es válida"
)
usuario.fecha_nacimiento = datetime.datetime.strptime(fecha_nacimiento, FORMATO_FECHA)
if not validar_edad(usuario.fecha_nacimiento, EDAD_MINIMA, EDAD_MAXIMA):
raise HTTPException(
status_code=400,
detail="La edad no es válida"
)
usuario_id = storage.add(usuario, request)
return { "ID": usuario_id }
except Exception as e:
print("Error al agregar usuario: {}".format(str(e)))
raise e
def editar_usuario(self, usuario_id: UUID, usuario: Usuario, request: Request) -> dict:
""" Edita un usuario de la base de datos.
:param usuario_id: ID del usuario a editar
:param usuario: Usuario a editar
:param request: Request de FastAPI
"""
try:
if not validar_email(getattr(usuario, "email")):
raise HTTPException(
status_code=400,
detail="El email no es válido"
)
fecha_nacimiento = usuario.fecha_nacimiento
if not validar_formato_fecha(fecha_nacimiento, FORMATO_FECHA):
raise HTTPException(
status_code=400,
detail="El formato de la fecha de nacimiento no es válida"
)
usuario.fecha_nacimiento = datetime.datetime.strptime(fecha_nacimiento, FORMATO_FECHA)
if not validar_edad(usuario.fecha_nacimiento, EDAD_MINIMA, EDAD_MAXIMA):
raise HTTPException(
status_code=400,
detail="La edad no es válida"
)
storage.update(usuario_id, usuario, request)
return { "ID": usuario_id }
except Exception as e:
print("Error al editar usuario: {}".format(str(e)))
raise e
def eliminar_usuario(self, usuario_id: UUID, request: Request) -> dict:
""" Elimina un usuario de la base de datos.
:param usuario_id: ID del usuario a eliminar
:param request: Request de FastAPI
"""
try:
storage.delete(Usuario, usuario_id, request)
return { "ID": usuario_id }
except Exception as e:
print("Error al eliminar usuario: {}".format(str(e)))
raise e
def listar_usuarios(self, pagina: int, cantidad: int, order_by: str, sort: str, request: Request)-> dict:
""" Obtiene una lista de usuarios de la base de datos.
:param pagina: Pagina a retornar
:param cantidad: Cantidad de usuarios a retornar
:param order_by: Campo por el cual se ordenará la lista
:param sort: Orden ascendente o descendente
:param request: Request de FastAPI
"""
try:
return storage.get_all(Usuario, pagina, cantidad, request, order_by, sort)
except Exception as e:
print("Error al listar usuarios: {}".format(str(e)))
raise e
def obtener_usuario(self, usuario_id: UUID, request: Request) -> Usuario:
""" Retorna un usuario por su ID
:param usuario_id: ID del usuario a consultar
:param request: Request de FastAPI
"""
try:
usuario = storage.get_by_id(Usuario, usuario_id, request)
return usuario
except Exception as e:
print("Error al obtener usuario: {}".format(str(e)))
raise e | import datetime
from uuid import UUID
from api.actions import storage
from fastapi import HTTPException
from api.models.usuario import Usuario
from starlette.requests import Request
from api.dependencies import validar_email, validar_formato_fecha,validar_edad
FORMATO_FECHA = "%Y-%m-%d"
EDAD_MINIMA = 18
EDAD_MAXIMA = 100
class Usuarios_Services:
""" Sección de servicios para el manejo de la logica de negocio
Attributes:
FORMATO_FECHA (str): Formato de fecha para validar
EDAD_MINIMA (int): Edad minima para validar
EDAD_MAXIMA (int): Edad maxima para validar
"""
def agregar_usuario(self, usuario: Usuario, request: Request) -> dict:
""" Agrega un usuario a la base de datos.
:param usuario: Usuario a agregar
:param request: Request de FastAPI
"""
try:
if not validar_email(getattr(usuario, "email")):
raise HTTPException(
status_code=400,
detail="El email no es válido"
)
fecha_nacimiento = usuario.fecha_nacimiento
if not validar_formato_fecha(fecha_nacimiento, FORMATO_FECHA):
raise HTTPException(
status_code=400,
detail="El formato de la fecha de nacimiento no es válida"
)
usuario.fecha_nacimiento = datetime.datetime.strptime(fecha_nacimiento, FORMATO_FECHA)
if not validar_edad(usuario.fecha_nacimiento, EDAD_MINIMA, EDAD_MAXIMA):
raise HTTPException(
status_code=400,
detail="La edad no es válida"
)
usuario_id = storage.add(usuario, request)
return { "ID": usuario_id }
except Exception as e:
print("Error al agregar usuario: {}".format(str(e)))
raise e
def editar_usuario(self, usuario_id: UUID, usuario: Usuario, request: Request) -> dict:
""" Edita un usuario de la base de datos.
:param usuario_id: ID del usuario a editar
:param usuario: Usuario a editar
:param request: Request de FastAPI
"""
try:
if not validar_email(getattr(usuario, "email")):
raise HTTPException(
status_code=400,
detail="El email no es válido"
)
fecha_nacimiento = usuario.fecha_nacimiento
if not validar_formato_fecha(fecha_nacimiento, FORMATO_FECHA):
raise HTTPException(
status_code=400,
detail="El formato de la fecha de nacimiento no es válida"
)
usuario.fecha_nacimiento = datetime.datetime.strptime(fecha_nacimiento, FORMATO_FECHA)
if not validar_edad(usuario.fecha_nacimiento, EDAD_MINIMA, EDAD_MAXIMA):
raise HTTPException(
status_code=400,
detail="La edad no es válida"
)
storage.update(usuario_id, usuario, request)
return { "ID": usuario_id }
except Exception as e:
print("Error al editar usuario: {}".format(str(e)))
raise e
def eliminar_usuario(self, usuario_id: UUID, request: Request) -> dict:
""" Elimina un usuario de la base de datos.
:param usuario_id: ID del usuario a eliminar
:param request: Request de FastAPI
"""
try:
storage.delete(Usuario, usuario_id, request)
return { "ID": usuario_id }
except Exception as e:
print("Error al eliminar usuario: {}".format(str(e)))
raise e
def listar_usuarios(self, pagina: int, cantidad: int, order_by: str, sort: str, request: Request)-> dict:
""" Obtiene una lista de usuarios de la base de datos.
:param pagina: Pagina a retornar
:param cantidad: Cantidad de usuarios a retornar
:param order_by: Campo por el cual se ordenará la lista
:param sort: Orden ascendente o descendente
:param request: Request de FastAPI
"""
try:
return storage.get_all(Usuario, pagina, cantidad, request, order_by, sort)
except Exception as e:
print("Error al listar usuarios: {}".format(str(e)))
raise e
def obtener_usuario(self, usuario_id: UUID, request: Request) -> Usuario:
""" Retorna un usuario por su ID
:param usuario_id: ID del usuario a consultar
:param request: Request de FastAPI
"""
try:
usuario = storage.get_by_id(Usuario, usuario_id, request)
return usuario
except Exception as e:
print("Error al obtener usuario: {}".format(str(e)))
raise e | es | 0.857543 | Sección de servicios para el manejo de la logica de negocio Attributes: FORMATO_FECHA (str): Formato de fecha para validar EDAD_MINIMA (int): Edad minima para validar EDAD_MAXIMA (int): Edad maxima para validar Agrega un usuario a la base de datos. :param usuario: Usuario a agregar :param request: Request de FastAPI Edita un usuario de la base de datos. :param usuario_id: ID del usuario a editar :param usuario: Usuario a editar :param request: Request de FastAPI Elimina un usuario de la base de datos. :param usuario_id: ID del usuario a eliminar :param request: Request de FastAPI Obtiene una lista de usuarios de la base de datos. :param pagina: Pagina a retornar :param cantidad: Cantidad de usuarios a retornar :param order_by: Campo por el cual se ordenará la lista :param sort: Orden ascendente o descendente :param request: Request de FastAPI Retorna un usuario por su ID :param usuario_id: ID del usuario a consultar :param request: Request de FastAPI | 2.607412 | 3 |
certau/util/taxii/client.py | thisismyrobot/cti-toolkit | 12 | 9523 | import os
import logging
import dateutil
import pickle
from six.moves.urllib.parse import urlparse
from libtaxii import get_message_from_http_response, VID_TAXII_XML_11
from libtaxii.messages_11 import PollRequest, PollFulfillmentRequest
from libtaxii.messages_11 import PollResponse, generate_message_id
from libtaxii.clients import HttpClient
from certau import version_string
class SimpleTaxiiClient(HttpClient):
"""A simple interface to libtaxii for sending TAXII client messages.
Args:
username: a username for HTTP basic authentication
password: a password for HTTP basic authentication
key_file: a file containing a private key
(for SSL certificate-based authentication)
cert_file: a file containing a certificate
(for SSL certificate-based authentication)
ca_file: a file containing the CA's certificate
(for verifying the server's certificate)
"""
def __init__(self, username=None, password=<PASSWORD>,
key_file=None, cert_file=None, ca_file=None):
super(SimpleTaxiiClient, self).__init__()
self._logger = logging.getLogger()
self.username = username
self.password = password
self.key_file = key_file
self.cert_file = cert_file
self.ca_file = ca_file
def setup_authentication(self, use_ssl):
"""Setup the appropriate credentials and authentication type.
Initialises the authentication settings for the connection.
Args:
use_ssl: should this connection use SSL
"""
self.set_use_https(use_ssl)
credentials = dict()
if self.username and self.password:
credentials['username'] = self.username
credentials['password'] = self.password
if use_ssl and self.key_file and self.cert_file:
credentials['key_file'] = self.key_file
credentials['cert_file'] = self.cert_file
if credentials:
self.set_auth_credentials(credentials)
if self.username and self.password:
if use_ssl and self.key_file and self.cert_file:
self.set_auth_type(HttpClient.AUTH_CERT_BASIC)
self._logger.debug("TAXII authentication using private key "
"(%s), certificate (%s), and credentials "
"for user '%s'", self.key_file,
self.cert_file, self.username)
else:
self.set_auth_type(HttpClient.AUTH_BASIC)
self._logger.debug("TAXII authentication using credentials "
"for user '%s'", self.username)
elif use_ssl and self.key_file and self.cert_file:
self.set_auth_type(HttpClient.AUTH_CERT)
self._logger.debug("TAXII authentication using private key (%s) "
"and certificate (%s) only", self.key_file,
self.cert_file)
else:
self.set_auth_type(HttpClient.AUTH_NONE)
self._logger.debug("no TAXII authentication")
# CA certificate verification
if use_ssl and self.ca_file:
self.set_verify_server(verify_server=True, ca_file=self.ca_file)
self._logger.debug("SSL - verification using CA file (%s)",
self.ca_file)
@staticmethod
def create_poll_request(collection, subscription_id=None,
begin_timestamp=None, end_timestamp=None):
"""Create a poll request message using supplied parameters."""
request_kwargs = dict(
message_id=generate_message_id(),
collection_name=collection,
exclusive_begin_timestamp_label=begin_timestamp,
inclusive_end_timestamp_label=end_timestamp,
)
if subscription_id:
request_kwargs['subscription_id'] = subscription_id
else:
request_kwargs['poll_parameters'] = PollRequest.PollParameters()
return PollRequest(**request_kwargs)
@staticmethod
def create_fulfillment_request(collection, result_id, part_number):
return PollFulfillmentRequest(
message_id=generate_message_id(),
collection_name=collection,
result_id=result_id,
result_part_number=part_number,
)
def send_taxii_message(self, request, host, path, port):
# Send the request message and return the response
http_response = self.call_taxii_service2(
host=host,
path=path,
message_binding=VID_TAXII_XML_11,
post_data=request.to_xml(),
port=port,
user_agent='{} (libtaxii)'.format(version_string)
)
response = get_message_from_http_response(
http_response=http_response,
in_response_to=request.message_id,
)
return response
@staticmethod
def get_poll_time(filename, poll_url, collection):
if os.path.isfile(filename):
with open(filename, 'rb') as state_file:
poll_state = pickle.load(state_file)
if isinstance(poll_state, dict) and poll_url in poll_state:
if collection in poll_state[poll_url]:
time_string = poll_state[poll_url][collection]
return dateutil.parser.parse(time_string)
return None
@staticmethod
def save_poll_time(filename, poll_url, collection, timestamp):
if timestamp is not None:
poll_state = dict()
if os.path.isfile(filename):
with open(filename, 'rb') as state_file:
poll_state = pickle.load(state_file)
if not isinstance(poll_state, dict):
raise Exception('unexpected content encountered when '
'reading TAXII poll state file')
if poll_url not in poll_state:
poll_state[poll_url] = dict()
poll_state[poll_url][collection] = str(timestamp)
with open(filename, 'wb') as state_file:
pickle.dump(poll_state, state_file, protocol=2)
def poll(self, poll_url, collection, subscription_id=None,
begin_timestamp=None, end_timestamp=None, state_file=None):
"""Send the TAXII poll request to the server using the given URL."""
# Parse the poll_url to get the parts required by libtaxii
url_parts = urlparse(poll_url)
# Allow credentials to be provided in poll_url
if url_parts.username and url_parts.password:
self.username = url_parts.username
self.password = url_parts.password
self._logger.debug('updating username and password from poll_url')
if url_parts.scheme not in ['http', 'https']:
raise Exception('invalid scheme in poll_url (%s); expected '
'"http" or "https"', poll_url)
use_ssl = True if url_parts.scheme == 'https' else False
# Initialise the authentication settings
self.setup_authentication(use_ssl)
if state_file and not begin_timestamp:
begin_timestamp = self.get_poll_time(
filename=state_file,
poll_url=poll_url,
collection=collection,
)
request = self.create_poll_request(
collection=collection,
subscription_id=subscription_id,
begin_timestamp=begin_timestamp,
end_timestamp=end_timestamp,
)
self._logger.debug('sending poll request (url=%s, collection=%s)',
poll_url, collection)
response = self.send_taxii_message(
request=request,
host=url_parts.hostname,
path=url_parts.path,
port=url_parts.port,
)
first = True
poll_end_time = None
while True:
if not isinstance(response, PollResponse):
raise Exception('didn\'t get a poll response')
self._logger.debug('received poll response '
'(content_blocks=%d, result_id=%s, more=%s)',
len(response.content_blocks),
response.result_id,
'True' if response.more else 'False')
# Save end timestamp from first PollResponse
if first:
poll_end_time = response.inclusive_end_timestamp_label
if len(response.content_blocks) == 0:
if first:
self._logger.info('poll response contained '
'no content blocks')
break
for content_block in response.content_blocks:
yield content_block
if not response.more:
break
# Send a fulfilment request
if first:
# Initialise fulfilment request values
part_number = response.result_part_number
result_id = response.result_id
first = False
part_number += 1
request = self.create_fulfillment_request(
collection=collection,
result_id=result_id,
part_number=part_number,
)
self._logger.debug('sending fulfilment request '
'(result_id=%s, part_number=%d)',
result_id, part_number)
response = self.send_taxii_message(
request=request,
host=url_parts.hostname,
path=url_parts.path,
port=url_parts.port,
)
# Update the timestamp for the latest poll
if state_file and poll_end_time:
self.save_poll_time(
filename=state_file,
poll_url=poll_url,
collection=collection,
timestamp=poll_end_time,
)
| import os
import logging
import dateutil
import pickle
from six.moves.urllib.parse import urlparse
from libtaxii import get_message_from_http_response, VID_TAXII_XML_11
from libtaxii.messages_11 import PollRequest, PollFulfillmentRequest
from libtaxii.messages_11 import PollResponse, generate_message_id
from libtaxii.clients import HttpClient
from certau import version_string
class SimpleTaxiiClient(HttpClient):
"""A simple interface to libtaxii for sending TAXII client messages.
Args:
username: a username for HTTP basic authentication
password: a password for HTTP basic authentication
key_file: a file containing a private key
(for SSL certificate-based authentication)
cert_file: a file containing a certificate
(for SSL certificate-based authentication)
ca_file: a file containing the CA's certificate
(for verifying the server's certificate)
"""
def __init__(self, username=None, password=<PASSWORD>,
key_file=None, cert_file=None, ca_file=None):
super(SimpleTaxiiClient, self).__init__()
self._logger = logging.getLogger()
self.username = username
self.password = password
self.key_file = key_file
self.cert_file = cert_file
self.ca_file = ca_file
def setup_authentication(self, use_ssl):
"""Setup the appropriate credentials and authentication type.
Initialises the authentication settings for the connection.
Args:
use_ssl: should this connection use SSL
"""
self.set_use_https(use_ssl)
credentials = dict()
if self.username and self.password:
credentials['username'] = self.username
credentials['password'] = self.password
if use_ssl and self.key_file and self.cert_file:
credentials['key_file'] = self.key_file
credentials['cert_file'] = self.cert_file
if credentials:
self.set_auth_credentials(credentials)
if self.username and self.password:
if use_ssl and self.key_file and self.cert_file:
self.set_auth_type(HttpClient.AUTH_CERT_BASIC)
self._logger.debug("TAXII authentication using private key "
"(%s), certificate (%s), and credentials "
"for user '%s'", self.key_file,
self.cert_file, self.username)
else:
self.set_auth_type(HttpClient.AUTH_BASIC)
self._logger.debug("TAXII authentication using credentials "
"for user '%s'", self.username)
elif use_ssl and self.key_file and self.cert_file:
self.set_auth_type(HttpClient.AUTH_CERT)
self._logger.debug("TAXII authentication using private key (%s) "
"and certificate (%s) only", self.key_file,
self.cert_file)
else:
self.set_auth_type(HttpClient.AUTH_NONE)
self._logger.debug("no TAXII authentication")
# CA certificate verification
if use_ssl and self.ca_file:
self.set_verify_server(verify_server=True, ca_file=self.ca_file)
self._logger.debug("SSL - verification using CA file (%s)",
self.ca_file)
@staticmethod
def create_poll_request(collection, subscription_id=None,
begin_timestamp=None, end_timestamp=None):
"""Create a poll request message using supplied parameters."""
request_kwargs = dict(
message_id=generate_message_id(),
collection_name=collection,
exclusive_begin_timestamp_label=begin_timestamp,
inclusive_end_timestamp_label=end_timestamp,
)
if subscription_id:
request_kwargs['subscription_id'] = subscription_id
else:
request_kwargs['poll_parameters'] = PollRequest.PollParameters()
return PollRequest(**request_kwargs)
@staticmethod
def create_fulfillment_request(collection, result_id, part_number):
return PollFulfillmentRequest(
message_id=generate_message_id(),
collection_name=collection,
result_id=result_id,
result_part_number=part_number,
)
def send_taxii_message(self, request, host, path, port):
# Send the request message and return the response
http_response = self.call_taxii_service2(
host=host,
path=path,
message_binding=VID_TAXII_XML_11,
post_data=request.to_xml(),
port=port,
user_agent='{} (libtaxii)'.format(version_string)
)
response = get_message_from_http_response(
http_response=http_response,
in_response_to=request.message_id,
)
return response
@staticmethod
def get_poll_time(filename, poll_url, collection):
if os.path.isfile(filename):
with open(filename, 'rb') as state_file:
poll_state = pickle.load(state_file)
if isinstance(poll_state, dict) and poll_url in poll_state:
if collection in poll_state[poll_url]:
time_string = poll_state[poll_url][collection]
return dateutil.parser.parse(time_string)
return None
@staticmethod
def save_poll_time(filename, poll_url, collection, timestamp):
if timestamp is not None:
poll_state = dict()
if os.path.isfile(filename):
with open(filename, 'rb') as state_file:
poll_state = pickle.load(state_file)
if not isinstance(poll_state, dict):
raise Exception('unexpected content encountered when '
'reading TAXII poll state file')
if poll_url not in poll_state:
poll_state[poll_url] = dict()
poll_state[poll_url][collection] = str(timestamp)
with open(filename, 'wb') as state_file:
pickle.dump(poll_state, state_file, protocol=2)
def poll(self, poll_url, collection, subscription_id=None,
begin_timestamp=None, end_timestamp=None, state_file=None):
"""Send the TAXII poll request to the server using the given URL."""
# Parse the poll_url to get the parts required by libtaxii
url_parts = urlparse(poll_url)
# Allow credentials to be provided in poll_url
if url_parts.username and url_parts.password:
self.username = url_parts.username
self.password = url_parts.password
self._logger.debug('updating username and password from poll_url')
if url_parts.scheme not in ['http', 'https']:
raise Exception('invalid scheme in poll_url (%s); expected '
'"http" or "https"', poll_url)
use_ssl = True if url_parts.scheme == 'https' else False
# Initialise the authentication settings
self.setup_authentication(use_ssl)
if state_file and not begin_timestamp:
begin_timestamp = self.get_poll_time(
filename=state_file,
poll_url=poll_url,
collection=collection,
)
request = self.create_poll_request(
collection=collection,
subscription_id=subscription_id,
begin_timestamp=begin_timestamp,
end_timestamp=end_timestamp,
)
self._logger.debug('sending poll request (url=%s, collection=%s)',
poll_url, collection)
response = self.send_taxii_message(
request=request,
host=url_parts.hostname,
path=url_parts.path,
port=url_parts.port,
)
first = True
poll_end_time = None
while True:
if not isinstance(response, PollResponse):
raise Exception('didn\'t get a poll response')
self._logger.debug('received poll response '
'(content_blocks=%d, result_id=%s, more=%s)',
len(response.content_blocks),
response.result_id,
'True' if response.more else 'False')
# Save end timestamp from first PollResponse
if first:
poll_end_time = response.inclusive_end_timestamp_label
if len(response.content_blocks) == 0:
if first:
self._logger.info('poll response contained '
'no content blocks')
break
for content_block in response.content_blocks:
yield content_block
if not response.more:
break
# Send a fulfilment request
if first:
# Initialise fulfilment request values
part_number = response.result_part_number
result_id = response.result_id
first = False
part_number += 1
request = self.create_fulfillment_request(
collection=collection,
result_id=result_id,
part_number=part_number,
)
self._logger.debug('sending fulfilment request '
'(result_id=%s, part_number=%d)',
result_id, part_number)
response = self.send_taxii_message(
request=request,
host=url_parts.hostname,
path=url_parts.path,
port=url_parts.port,
)
# Update the timestamp for the latest poll
if state_file and poll_end_time:
self.save_poll_time(
filename=state_file,
poll_url=poll_url,
collection=collection,
timestamp=poll_end_time,
)
| en | 0.735281 | A simple interface to libtaxii for sending TAXII client messages. Args: username: a username for HTTP basic authentication password: a password for HTTP basic authentication key_file: a file containing a private key (for SSL certificate-based authentication) cert_file: a file containing a certificate (for SSL certificate-based authentication) ca_file: a file containing the CA's certificate (for verifying the server's certificate) Setup the appropriate credentials and authentication type. Initialises the authentication settings for the connection. Args: use_ssl: should this connection use SSL # CA certificate verification Create a poll request message using supplied parameters. # Send the request message and return the response Send the TAXII poll request to the server using the given URL. # Parse the poll_url to get the parts required by libtaxii # Allow credentials to be provided in poll_url # Initialise the authentication settings # Save end timestamp from first PollResponse # Send a fulfilment request # Initialise fulfilment request values # Update the timestamp for the latest poll | 2.4143 | 2 |
tutorials/registration/data.py | YipengHu/MPHY0041 | 1 | 9524 | <gh_stars>1-10
import os
import zipfile
import requests
DATA_PATH = './data'
RESULT_PATH = './result'
if not os.path.exists(DATA_PATH):
os.makedirs(DATA_PATH)
print('Downloading and extracting data...')
url = 'https://weisslab.cs.ucl.ac.uk/WEISSTeaching/datasets/-/archive/hn2dct/datasets-hn2dct.zip'
r = requests.get(url,allow_redirects=True)
temp_file = 'temp.zip'
_ = open(temp_file,'wb').write(r.content)
with zipfile.ZipFile(temp_file,'r') as zip_obj:
zip_obj.extractall(DATA_PATH)
os.remove(temp_file)
print('Done.')
print('Head-neck 2D CT data downloaded: %s' % os.path.abspath(os.path.join(DATA_PATH,'datasets-hn2dct')))
if not os.path.exists(RESULT_PATH):
os.makedirs(RESULT_PATH)
print('Result directory created: %s' % os.path.abspath(RESULT_PATH))
| import os
import zipfile
import requests
DATA_PATH = './data'
RESULT_PATH = './result'
if not os.path.exists(DATA_PATH):
os.makedirs(DATA_PATH)
print('Downloading and extracting data...')
url = 'https://weisslab.cs.ucl.ac.uk/WEISSTeaching/datasets/-/archive/hn2dct/datasets-hn2dct.zip'
r = requests.get(url,allow_redirects=True)
temp_file = 'temp.zip'
_ = open(temp_file,'wb').write(r.content)
with zipfile.ZipFile(temp_file,'r') as zip_obj:
zip_obj.extractall(DATA_PATH)
os.remove(temp_file)
print('Done.')
print('Head-neck 2D CT data downloaded: %s' % os.path.abspath(os.path.join(DATA_PATH,'datasets-hn2dct')))
if not os.path.exists(RESULT_PATH):
os.makedirs(RESULT_PATH)
print('Result directory created: %s' % os.path.abspath(RESULT_PATH)) | none | 1 | 3.29896 | 3 |
|
insights/parsers/tests/test_freeipa_healthcheck_log.py | lhuett/insights-core | 0 | 9525 | <filename>insights/parsers/tests/test_freeipa_healthcheck_log.py
import doctest
from insights.parsers import freeipa_healthcheck_log
from insights.parsers.freeipa_healthcheck_log import FreeIPAHealthCheckLog
from insights.tests import context_wrap
LONG_FREEIPA_HEALTHCHECK_LOG_OK = """
[{"source": "ipahealthcheck.ipa.roles", "check": "IPACRLManagerCheck",
"result": "SUCCESS", "uuid": "1f4177a4-0ddb-4e4d-8258-a5cd5f4638fc",
"when": "20191203122317Z", "duration": "0.002254",
"kw": {"key": "crl_manager", "crlgen_enabled": true}}]
""".strip()
LONG_FREEIPA_HEALTHCHECK_LOG_FAILURES = """
[{"source": "ipahealthcheck.system.filesystemspace",
"check": "FileSystemSpaceCheck",
"result": "ERROR", "uuid": "90ed8765-6ad7-425c-abbd-b07a652649cb",
"when": "20191203122221Z", "duration": "0.000474", "kw": {
"msg": "/var/log/audit/: free space under threshold: 14 MiB < 512 MiB",
"store": "/var/log/audit/", "free_space": 14, "threshold": 512}}]
""".strip()
FREEIPA_HEALTHCHECK_LOG_DOCS_EXAMPLE = '''
[
{
"source": "ipahealthcheck.ipa.roles",
"check": "IPACRLManagerCheck",
"result": "SUCCESS",
"uuid": "1f4177a4-0ddb-4e4d-8258-a5cd5f4638fc",
"when": "20191203122317Z",
"duration": "0.002254",
"kw": {
"key": "crl_manager",
"crlgen_enabled": true
}
},
{
"source": "ipahealthcheck.ipa.roles",
"check": "IPARenewalMasterCheck",
"result": "SUCCESS",
"uuid": "1feb7f99-2e98-4e37-bb52-686896972022",
"when": "20191203122317Z",
"duration": "0.018330",
"kw": {
"key": "renewal_master",
"master": true
}
},
{
"source": "ipahealthcheck.system.filesystemspace",
"check": "FileSystemSpaceCheck",
"result": "ERROR",
"uuid": "90ed8765-6ad7-425c-abbd-b07a652649cb",
"when": "20191203122221Z",
"duration": "0.000474",
"kw": {
"msg": "/var/log/audit/: free space under threshold: 14 MiB < 512 MiB",
"store": "/var/log/audit/",
"free_space": 14,
"threshold": 512
}
}
]
'''.strip()
FREEIPA_HEALTHCHECK_LOG_OK = "".join(LONG_FREEIPA_HEALTHCHECK_LOG_OK.splitlines())
FREEIPA_HEALTHCHECK_LOG_FAILURES = "".join(LONG_FREEIPA_HEALTHCHECK_LOG_FAILURES.splitlines())
def test_freeipa_healthcheck_log_ok():
log_obj = FreeIPAHealthCheckLog(context_wrap(FREEIPA_HEALTHCHECK_LOG_OK))
assert len(log_obj.issues) == 0
def test_freeipa_healthcheck_log_not_ok():
log_obj = FreeIPAHealthCheckLog(context_wrap(FREEIPA_HEALTHCHECK_LOG_FAILURES))
assert len(log_obj.issues) > 0
for issue in log_obj.issues:
assert issue['check'] == 'FileSystemSpaceCheck'
assert issue['source'] == 'ipahealthcheck.system.filesystemspace'
def test_freeipa_healthcheck_get_results_ok():
log_obj = FreeIPAHealthCheckLog(context_wrap(FREEIPA_HEALTHCHECK_LOG_OK))
results = log_obj.get_results('ipahealthcheck.system.filesystemspace', 'FileSystemSpaceCheck')
assert len(results) == 0
def test_freeipa_healthcheck_get_results_not_ok():
log_obj = FreeIPAHealthCheckLog(context_wrap(FREEIPA_HEALTHCHECK_LOG_FAILURES))
results = log_obj.get_results('ipahealthcheck.system.filesystemspace', 'FileSystemSpaceCheck')
assert len(results) == 1
for result in results:
assert result['result'] in ['ERROR', 'CRITICAL']
assert result['check'] == 'FileSystemSpaceCheck'
assert result['source'] == 'ipahealthcheck.system.filesystemspace'
def test_freeipa_healthcheck_log__documentation():
env = {
'healthcheck': FreeIPAHealthCheckLog(context_wrap(FREEIPA_HEALTHCHECK_LOG_DOCS_EXAMPLE)),
}
failed, total = doctest.testmod(freeipa_healthcheck_log, globs=env)
assert failed == 0
| <filename>insights/parsers/tests/test_freeipa_healthcheck_log.py
import doctest
from insights.parsers import freeipa_healthcheck_log
from insights.parsers.freeipa_healthcheck_log import FreeIPAHealthCheckLog
from insights.tests import context_wrap
LONG_FREEIPA_HEALTHCHECK_LOG_OK = """
[{"source": "ipahealthcheck.ipa.roles", "check": "IPACRLManagerCheck",
"result": "SUCCESS", "uuid": "1f4177a4-0ddb-4e4d-8258-a5cd5f4638fc",
"when": "20191203122317Z", "duration": "0.002254",
"kw": {"key": "crl_manager", "crlgen_enabled": true}}]
""".strip()
LONG_FREEIPA_HEALTHCHECK_LOG_FAILURES = """
[{"source": "ipahealthcheck.system.filesystemspace",
"check": "FileSystemSpaceCheck",
"result": "ERROR", "uuid": "90ed8765-6ad7-425c-abbd-b07a652649cb",
"when": "20191203122221Z", "duration": "0.000474", "kw": {
"msg": "/var/log/audit/: free space under threshold: 14 MiB < 512 MiB",
"store": "/var/log/audit/", "free_space": 14, "threshold": 512}}]
""".strip()
FREEIPA_HEALTHCHECK_LOG_DOCS_EXAMPLE = '''
[
{
"source": "ipahealthcheck.ipa.roles",
"check": "IPACRLManagerCheck",
"result": "SUCCESS",
"uuid": "1f4177a4-0ddb-4e4d-8258-a5cd5f4638fc",
"when": "20191203122317Z",
"duration": "0.002254",
"kw": {
"key": "crl_manager",
"crlgen_enabled": true
}
},
{
"source": "ipahealthcheck.ipa.roles",
"check": "IPARenewalMasterCheck",
"result": "SUCCESS",
"uuid": "1feb7f99-2e98-4e37-bb52-686896972022",
"when": "20191203122317Z",
"duration": "0.018330",
"kw": {
"key": "renewal_master",
"master": true
}
},
{
"source": "ipahealthcheck.system.filesystemspace",
"check": "FileSystemSpaceCheck",
"result": "ERROR",
"uuid": "90ed8765-6ad7-425c-abbd-b07a652649cb",
"when": "20191203122221Z",
"duration": "0.000474",
"kw": {
"msg": "/var/log/audit/: free space under threshold: 14 MiB < 512 MiB",
"store": "/var/log/audit/",
"free_space": 14,
"threshold": 512
}
}
]
'''.strip()
FREEIPA_HEALTHCHECK_LOG_OK = "".join(LONG_FREEIPA_HEALTHCHECK_LOG_OK.splitlines())
FREEIPA_HEALTHCHECK_LOG_FAILURES = "".join(LONG_FREEIPA_HEALTHCHECK_LOG_FAILURES.splitlines())
def test_freeipa_healthcheck_log_ok():
log_obj = FreeIPAHealthCheckLog(context_wrap(FREEIPA_HEALTHCHECK_LOG_OK))
assert len(log_obj.issues) == 0
def test_freeipa_healthcheck_log_not_ok():
log_obj = FreeIPAHealthCheckLog(context_wrap(FREEIPA_HEALTHCHECK_LOG_FAILURES))
assert len(log_obj.issues) > 0
for issue in log_obj.issues:
assert issue['check'] == 'FileSystemSpaceCheck'
assert issue['source'] == 'ipahealthcheck.system.filesystemspace'
def test_freeipa_healthcheck_get_results_ok():
log_obj = FreeIPAHealthCheckLog(context_wrap(FREEIPA_HEALTHCHECK_LOG_OK))
results = log_obj.get_results('ipahealthcheck.system.filesystemspace', 'FileSystemSpaceCheck')
assert len(results) == 0
def test_freeipa_healthcheck_get_results_not_ok():
log_obj = FreeIPAHealthCheckLog(context_wrap(FREEIPA_HEALTHCHECK_LOG_FAILURES))
results = log_obj.get_results('ipahealthcheck.system.filesystemspace', 'FileSystemSpaceCheck')
assert len(results) == 1
for result in results:
assert result['result'] in ['ERROR', 'CRITICAL']
assert result['check'] == 'FileSystemSpaceCheck'
assert result['source'] == 'ipahealthcheck.system.filesystemspace'
def test_freeipa_healthcheck_log__documentation():
env = {
'healthcheck': FreeIPAHealthCheckLog(context_wrap(FREEIPA_HEALTHCHECK_LOG_DOCS_EXAMPLE)),
}
failed, total = doctest.testmod(freeipa_healthcheck_log, globs=env)
assert failed == 0
| en | 0.378521 | [{"source": "ipahealthcheck.ipa.roles", "check": "IPACRLManagerCheck", "result": "SUCCESS", "uuid": "1f4177a4-0ddb-4e4d-8258-a5cd5f4638fc", "when": "20191203122317Z", "duration": "0.002254", "kw": {"key": "crl_manager", "crlgen_enabled": true}}] [{"source": "ipahealthcheck.system.filesystemspace", "check": "FileSystemSpaceCheck", "result": "ERROR", "uuid": "90ed8765-6ad7-425c-abbd-b07a652649cb", "when": "20191203122221Z", "duration": "0.000474", "kw": { "msg": "/var/log/audit/: free space under threshold: 14 MiB < 512 MiB", "store": "/var/log/audit/", "free_space": 14, "threshold": 512}}] [ { "source": "ipahealthcheck.ipa.roles", "check": "IPACRLManagerCheck", "result": "SUCCESS", "uuid": "1f4177a4-0ddb-4e4d-8258-a5cd5f4638fc", "when": "20191203122317Z", "duration": "0.002254", "kw": { "key": "crl_manager", "crlgen_enabled": true } }, { "source": "ipahealthcheck.ipa.roles", "check": "IPARenewalMasterCheck", "result": "SUCCESS", "uuid": "1feb7f99-2e98-4e37-bb52-686896972022", "when": "20191203122317Z", "duration": "0.018330", "kw": { "key": "renewal_master", "master": true } }, { "source": "ipahealthcheck.system.filesystemspace", "check": "FileSystemSpaceCheck", "result": "ERROR", "uuid": "90ed8765-6ad7-425c-abbd-b07a652649cb", "when": "20191203122221Z", "duration": "0.000474", "kw": { "msg": "/var/log/audit/: free space under threshold: 14 MiB < 512 MiB", "store": "/var/log/audit/", "free_space": 14, "threshold": 512 } } ] | 1.947539 | 2 |
recipes/recipes/windows_image_builder/winpe_customization.py | xswz8015/infra | 0 | 9526 | # Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from recipe_engine import post_process
from PB.recipes.infra.windows_image_builder import windows_image_builder as wib
from PB.recipes.infra.windows_image_builder import actions
from PB.recipes.infra.windows_image_builder import sources
from recipe_engine.post_process import DropExpectation, StatusSuccess
from RECIPE_MODULES.infra.windows_scripts_executor import test_helper as t
DEPS = [
'depot_tools/gitiles',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/raw_io',
'recipe_engine/json',
'windows_adk',
'windows_scripts_executor',
]
PYTHON_VERSION_COMPATIBILITY = 'PY3'
PROPERTIES = wib.Image
def RunSteps(api, image):
""" This recipe executes offline_winpe_customization."""
if not api.platform.is_win:
raise AssertionError('This recipe can only run on windows')
# this recipe will only execute the offline winpe customizations
for cust in image.customizations:
assert (cust.WhichOneof('customization') == 'offline_winpe_customization')
# initialize the image to scripts executor
api.windows_scripts_executor.init()
custs = api.windows_scripts_executor.init_customizations(image)
# pinning all the refs and generating unique keys
custs = api.windows_scripts_executor.process_customizations(custs)
# download all the required refs
api.windows_scripts_executor.download_all_packages(custs)
# download and install the windows ADK and WinPE packages
api.windows_adk.ensure()
# execute the customizations given
api.windows_scripts_executor.execute_customizations(custs)
wpe_image = 'wpe_image'
wpe_cust = 'generic'
arch = 'x86'
key = '9055a3e678be47d58bb860d27b85adbea41fd2ef3e22c5b7cb3180edf358de90'
def GenTests(api):
# actions for adding files from git
ACTION_ADD_STARTNET = actions.Action(
add_file=actions.AddFile(
name='add_startnet_file',
src=sources.Src(
git_src=sources.GITSrc(
repo='chromium.dev',
ref='HEAD',
src='windows/artifacts/startnet.cmd'),),
dst='Windows\\System32',
))
STARTNET_URL = 'chromium.dev/+/ef70cb069518e6dc3ff24bfae7f195de5099c377/' +\
'windows/artifacts/startnet.cmd'
yield (api.test('not_run_on_windows', api.platform('linux', 64)) +
api.expect_exception('AssertionError') +
api.post_process(DropExpectation))
yield (api.test('happy path', api.platform('win', 64)) + api.properties(
t.WPE_IMAGE(wpe_image, wib.ARCH_X86, wpe_cust, 'happy test',
[ACTION_ADD_STARTNET])) +
# mock all the init and deinit steps
t.MOCK_WPE_INIT_DEINIT_SUCCESS(api, key, arch, wpe_image, wpe_cust) +
# mock git pin file
t.GIT_PIN_FILE(api, wpe_cust, 'HEAD', 'windows/artifacts/startnet.cmd',
'HEAD') +
# mock add file to wpe_image mount dir step
t.ADD_FILE(api, wpe_image, wpe_cust, STARTNET_URL) +
# assert that the generated wpe_image was uploaded
t.CHECK_GCS_UPLOAD(
api, wpe_image, wpe_cust,
'\[CLEANUP\]\\\\{}\\\\workdir\\\\gcs.zip'.format(wpe_cust),
'gs://chrome-gce-images/WIB-WIM/{}.zip'.format(key)) +
api.post_process(StatusSuccess) + api.post_process(DropExpectation))
| # Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from recipe_engine import post_process
from PB.recipes.infra.windows_image_builder import windows_image_builder as wib
from PB.recipes.infra.windows_image_builder import actions
from PB.recipes.infra.windows_image_builder import sources
from recipe_engine.post_process import DropExpectation, StatusSuccess
from RECIPE_MODULES.infra.windows_scripts_executor import test_helper as t
DEPS = [
'depot_tools/gitiles',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/raw_io',
'recipe_engine/json',
'windows_adk',
'windows_scripts_executor',
]
PYTHON_VERSION_COMPATIBILITY = 'PY3'
PROPERTIES = wib.Image
def RunSteps(api, image):
""" This recipe executes offline_winpe_customization."""
if not api.platform.is_win:
raise AssertionError('This recipe can only run on windows')
# this recipe will only execute the offline winpe customizations
for cust in image.customizations:
assert (cust.WhichOneof('customization') == 'offline_winpe_customization')
# initialize the image to scripts executor
api.windows_scripts_executor.init()
custs = api.windows_scripts_executor.init_customizations(image)
# pinning all the refs and generating unique keys
custs = api.windows_scripts_executor.process_customizations(custs)
# download all the required refs
api.windows_scripts_executor.download_all_packages(custs)
# download and install the windows ADK and WinPE packages
api.windows_adk.ensure()
# execute the customizations given
api.windows_scripts_executor.execute_customizations(custs)
wpe_image = 'wpe_image'
wpe_cust = 'generic'
arch = 'x86'
key = '9055a3e678be47d58bb860d27b85adbea41fd2ef3e22c5b7cb3180edf358de90'
def GenTests(api):
# actions for adding files from git
ACTION_ADD_STARTNET = actions.Action(
add_file=actions.AddFile(
name='add_startnet_file',
src=sources.Src(
git_src=sources.GITSrc(
repo='chromium.dev',
ref='HEAD',
src='windows/artifacts/startnet.cmd'),),
dst='Windows\\System32',
))
STARTNET_URL = 'chromium.dev/+/ef70cb069518e6dc3ff24bfae7f195de5099c377/' +\
'windows/artifacts/startnet.cmd'
yield (api.test('not_run_on_windows', api.platform('linux', 64)) +
api.expect_exception('AssertionError') +
api.post_process(DropExpectation))
yield (api.test('happy path', api.platform('win', 64)) + api.properties(
t.WPE_IMAGE(wpe_image, wib.ARCH_X86, wpe_cust, 'happy test',
[ACTION_ADD_STARTNET])) +
# mock all the init and deinit steps
t.MOCK_WPE_INIT_DEINIT_SUCCESS(api, key, arch, wpe_image, wpe_cust) +
# mock git pin file
t.GIT_PIN_FILE(api, wpe_cust, 'HEAD', 'windows/artifacts/startnet.cmd',
'HEAD') +
# mock add file to wpe_image mount dir step
t.ADD_FILE(api, wpe_image, wpe_cust, STARTNET_URL) +
# assert that the generated wpe_image was uploaded
t.CHECK_GCS_UPLOAD(
api, wpe_image, wpe_cust,
'\[CLEANUP\]\\\\{}\\\\workdir\\\\gcs.zip'.format(wpe_cust),
'gs://chrome-gce-images/WIB-WIM/{}.zip'.format(key)) +
api.post_process(StatusSuccess) + api.post_process(DropExpectation))
| en | 0.87019 | # Copyright 2021 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. This recipe executes offline_winpe_customization. # this recipe will only execute the offline winpe customizations # initialize the image to scripts executor # pinning all the refs and generating unique keys # download all the required refs # download and install the windows ADK and WinPE packages # execute the customizations given # actions for adding files from git # mock all the init and deinit steps # mock git pin file # mock add file to wpe_image mount dir step # assert that the generated wpe_image was uploaded | 1.83908 | 2 |
back/lollangCompiler/main.py | wonjinYi/lollang-playground | 11 | 9527 | <reponame>wonjinYi/lollang-playground
from lollangCompiler.compiler import Compiler
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--file", required=True, help="컴파일할 파일을 선택해주세요.")
parser.add_argument("--out", default="out.py", help="목적 파이썬 파일경로를 선택해주세요")
args = parser.parse_args()
cmp = Compiler()
cmp.compileFile(args.file, args.out) | from lollangCompiler.compiler import Compiler
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--file", required=True, help="컴파일할 파일을 선택해주세요.")
parser.add_argument("--out", default="out.py", help="목적 파이썬 파일경로를 선택해주세요")
args = parser.parse_args()
cmp = Compiler()
cmp.compileFile(args.file, args.out) | none | 1 | 2.363509 | 2 |
|
src/add_2_zip_imports.py | goubertbrent/oca-backend | 0 | 9528 | # -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from google.appengine.api import users as gusers
from mcfw.cache import CachedModelMixIn
from mcfw.consts import MISSING
from mcfw.restapi import register_postcall_hook, INJECTED_FUNCTIONS
from mcfw.rpc import serialize_value, get_type_details
from rogerthat.rpc import users
from rogerthat.utils import OFFLOAD_TYPE_WEB, offload
from rogerthat.utils.transactions import on_trans_committed
dummy = lambda: None
def log_restapi_call_result(function, success, kwargs, result_or_error):
if function.meta['silent']:
request_data = "****"
else:
kwarg_types = function.meta[u"kwarg_types"]
request_data = dict()
for arg, value in kwargs.iteritems():
if arg == 'accept_missing':
continue
if value == MISSING:
continue
request_data[arg] = serialize_value(value, *get_type_details(kwarg_types[arg], value), skip_missing=True)
if function.meta['silent_result']:
result = "****"
elif isinstance(result_or_error, Exception):
result = unicode(result_or_error)
else:
result = result_or_error
offload(users.get_current_user() or gusers.get_current_user(), OFFLOAD_TYPE_WEB, request_data,
result, function.meta['uri'], success)
register_postcall_hook(log_restapi_call_result)
INJECTED_FUNCTIONS.get_current_session = users.get_current_session
del log_restapi_call_result
CachedModelMixIn.on_trans_committed = lambda self, f, *args, **kwargs: on_trans_committed(f, *args, **kwargs)
| # -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from google.appengine.api import users as gusers
from mcfw.cache import CachedModelMixIn
from mcfw.consts import MISSING
from mcfw.restapi import register_postcall_hook, INJECTED_FUNCTIONS
from mcfw.rpc import serialize_value, get_type_details
from rogerthat.rpc import users
from rogerthat.utils import OFFLOAD_TYPE_WEB, offload
from rogerthat.utils.transactions import on_trans_committed
dummy = lambda: None
def log_restapi_call_result(function, success, kwargs, result_or_error):
if function.meta['silent']:
request_data = "****"
else:
kwarg_types = function.meta[u"kwarg_types"]
request_data = dict()
for arg, value in kwargs.iteritems():
if arg == 'accept_missing':
continue
if value == MISSING:
continue
request_data[arg] = serialize_value(value, *get_type_details(kwarg_types[arg], value), skip_missing=True)
if function.meta['silent_result']:
result = "****"
elif isinstance(result_or_error, Exception):
result = unicode(result_or_error)
else:
result = result_or_error
offload(users.get_current_user() or gusers.get_current_user(), OFFLOAD_TYPE_WEB, request_data,
result, function.meta['uri'], success)
register_postcall_hook(log_restapi_call_result)
INJECTED_FUNCTIONS.get_current_session = users.get_current_session
del log_restapi_call_result
CachedModelMixIn.on_trans_committed = lambda self, f, *args, **kwargs: on_trans_committed(f, *args, **kwargs)
| en | 0.827645 | # -*- coding: utf-8 -*- # Copyright 2020 Green Valley Belgium NV # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # @@license_version:1.7@@ | 1.759077 | 2 |
lib/galaxy/model/migrate/versions/0026_cloud_tables.py | Galaxyinternship/Galaxy | 0 | 9529 | <filename>lib/galaxy/model/migrate/versions/0026_cloud_tables.py
"""
This script adds tables needed for Galaxy cloud functionality.
"""
from __future__ import print_function
import datetime
import logging
from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer, MetaData, Table, TEXT
now = datetime.datetime.utcnow
log = logging.getLogger( __name__ )
metadata = MetaData()
CloudImage_table = Table( "cloud_image", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "provider_type", TEXT ),
Column( "image_id", TEXT, nullable=False ),
Column( "manifest", TEXT ),
Column( "state", TEXT ),
Column( "architecture", TEXT ),
Column( "deleted", Boolean, default=False ) )
""" UserConfiguredInstance (UCI) table """
UCI_table = Table( "cloud_uci", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
Column( "credentials_id", Integer, ForeignKey( "cloud_user_credentials.id" ), index=True ),
Column( "key_pair_name", TEXT ),
Column( "key_pair_material", TEXT ),
Column( "name", TEXT ),
Column( "state", TEXT ),
Column( "error", TEXT ),
Column( "total_size", Integer ),
Column( "launch_time", DateTime ),
Column( "deleted", Boolean, default=False ) )
CloudInstance_table = Table( "cloud_instance", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "launch_time", DateTime ),
Column( "stop_time", DateTime ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
Column( "uci_id", Integer, ForeignKey( "cloud_uci.id" ), index=True ),
Column( "type", TEXT ),
Column( "reservation_id", TEXT ),
Column( "instance_id", TEXT ),
Column( "mi_id", Integer, ForeignKey( "cloud_image.id" ), index=True ),
Column( "state", TEXT ),
Column( "error", TEXT ),
Column( "public_dns", TEXT ),
Column( "private_dns", TEXT ),
Column( "security_group", TEXT ),
Column( "availability_zone", TEXT ) )
CloudStore_table = Table( "cloud_store", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "attach_time", DateTime ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
Column( "uci_id", Integer, ForeignKey( "cloud_uci.id" ), index=True, nullable=False ),
Column( "volume_id", TEXT ),
Column( "size", Integer, nullable=False ),
Column( "availability_zone", TEXT ),
Column( "inst_id", Integer, ForeignKey( "cloud_instance.id" ) ),
Column( "status", TEXT ),
Column( "device", TEXT ),
Column( "space_consumed", Integer ),
Column( "error", TEXT ),
Column( "deleted", Boolean, default=False ) )
CloudSnapshot_table = Table( "cloud_snapshot", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
Column( "uci_id", Integer, ForeignKey( "cloud_uci.id" ), index=True ),
Column( "store_id", Integer, ForeignKey( "cloud_store.id" ), index=True, nullable=False ),
Column( "snapshot_id", TEXT ),
Column( "status", TEXT ),
Column( "description", TEXT ),
Column( "error", TEXT ),
Column( "deleted", Boolean, default=False ) )
CloudUserCredentials_table = Table( "cloud_user_credentials", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
Column( "provider_id", Integer, ForeignKey( "cloud_provider.id" ), index=True, nullable=False ),
Column( "name", TEXT ),
Column( "access_key", TEXT ),
Column( "secret_key", TEXT ),
Column( "deleted", Boolean, default=False ) )
CloudProvider_table = Table( "cloud_provider", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
Column( "type", TEXT, nullable=False ),
Column( "name", TEXT ),
Column( "region_connection", TEXT ),
Column( "region_name", TEXT ),
Column( "region_endpoint", TEXT ),
Column( "is_secure", Boolean ),
Column( "host", TEXT ),
Column( "port", Integer ),
Column( "proxy", TEXT ),
Column( "proxy_port", TEXT ),
Column( "proxy_user", TEXT ),
Column( "proxy_pass", TEXT ),
Column( "debug", Integer ),
Column( "https_connection_factory", TEXT ),
Column( "path", TEXT ),
Column( "deleted", Boolean, default=False ) )
def upgrade(migrate_engine):
metadata.bind = migrate_engine
print(__doc__)
# Load existing tables
metadata.reflect()
try:
CloudProvider_table.create()
CloudUserCredentials_table.create()
CloudImage_table.create()
UCI_table.create()
CloudInstance_table.create()
CloudStore_table.create()
CloudSnapshot_table.create()
except Exception:
log.exception("Creating cloud tables failed.")
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
try:
CloudSnapshot_table.drop()
CloudStore_table.drop()
CloudInstance_table.drop()
UCI_table.drop()
CloudImage_table.drop()
CloudUserCredentials_table.drop()
CloudProvider_table.drop()
except Exception:
log.exception("Dropping cloud tables failed.")
| <filename>lib/galaxy/model/migrate/versions/0026_cloud_tables.py
"""
This script adds tables needed for Galaxy cloud functionality.
"""
from __future__ import print_function
import datetime
import logging
from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer, MetaData, Table, TEXT
now = datetime.datetime.utcnow
log = logging.getLogger( __name__ )
metadata = MetaData()
CloudImage_table = Table( "cloud_image", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "provider_type", TEXT ),
Column( "image_id", TEXT, nullable=False ),
Column( "manifest", TEXT ),
Column( "state", TEXT ),
Column( "architecture", TEXT ),
Column( "deleted", Boolean, default=False ) )
""" UserConfiguredInstance (UCI) table """
UCI_table = Table( "cloud_uci", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
Column( "credentials_id", Integer, ForeignKey( "cloud_user_credentials.id" ), index=True ),
Column( "key_pair_name", TEXT ),
Column( "key_pair_material", TEXT ),
Column( "name", TEXT ),
Column( "state", TEXT ),
Column( "error", TEXT ),
Column( "total_size", Integer ),
Column( "launch_time", DateTime ),
Column( "deleted", Boolean, default=False ) )
CloudInstance_table = Table( "cloud_instance", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "launch_time", DateTime ),
Column( "stop_time", DateTime ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
Column( "uci_id", Integer, ForeignKey( "cloud_uci.id" ), index=True ),
Column( "type", TEXT ),
Column( "reservation_id", TEXT ),
Column( "instance_id", TEXT ),
Column( "mi_id", Integer, ForeignKey( "cloud_image.id" ), index=True ),
Column( "state", TEXT ),
Column( "error", TEXT ),
Column( "public_dns", TEXT ),
Column( "private_dns", TEXT ),
Column( "security_group", TEXT ),
Column( "availability_zone", TEXT ) )
CloudStore_table = Table( "cloud_store", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "attach_time", DateTime ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
Column( "uci_id", Integer, ForeignKey( "cloud_uci.id" ), index=True, nullable=False ),
Column( "volume_id", TEXT ),
Column( "size", Integer, nullable=False ),
Column( "availability_zone", TEXT ),
Column( "inst_id", Integer, ForeignKey( "cloud_instance.id" ) ),
Column( "status", TEXT ),
Column( "device", TEXT ),
Column( "space_consumed", Integer ),
Column( "error", TEXT ),
Column( "deleted", Boolean, default=False ) )
CloudSnapshot_table = Table( "cloud_snapshot", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
Column( "uci_id", Integer, ForeignKey( "cloud_uci.id" ), index=True ),
Column( "store_id", Integer, ForeignKey( "cloud_store.id" ), index=True, nullable=False ),
Column( "snapshot_id", TEXT ),
Column( "status", TEXT ),
Column( "description", TEXT ),
Column( "error", TEXT ),
Column( "deleted", Boolean, default=False ) )
CloudUserCredentials_table = Table( "cloud_user_credentials", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
Column( "provider_id", Integer, ForeignKey( "cloud_provider.id" ), index=True, nullable=False ),
Column( "name", TEXT ),
Column( "access_key", TEXT ),
Column( "secret_key", TEXT ),
Column( "deleted", Boolean, default=False ) )
CloudProvider_table = Table( "cloud_provider", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
Column( "type", TEXT, nullable=False ),
Column( "name", TEXT ),
Column( "region_connection", TEXT ),
Column( "region_name", TEXT ),
Column( "region_endpoint", TEXT ),
Column( "is_secure", Boolean ),
Column( "host", TEXT ),
Column( "port", Integer ),
Column( "proxy", TEXT ),
Column( "proxy_port", TEXT ),
Column( "proxy_user", TEXT ),
Column( "proxy_pass", TEXT ),
Column( "debug", Integer ),
Column( "https_connection_factory", TEXT ),
Column( "path", TEXT ),
Column( "deleted", Boolean, default=False ) )
def upgrade(migrate_engine):
metadata.bind = migrate_engine
print(__doc__)
# Load existing tables
metadata.reflect()
try:
CloudProvider_table.create()
CloudUserCredentials_table.create()
CloudImage_table.create()
UCI_table.create()
CloudInstance_table.create()
CloudStore_table.create()
CloudSnapshot_table.create()
except Exception:
log.exception("Creating cloud tables failed.")
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
try:
CloudSnapshot_table.drop()
CloudStore_table.drop()
CloudInstance_table.drop()
UCI_table.drop()
CloudImage_table.drop()
CloudUserCredentials_table.drop()
CloudProvider_table.drop()
except Exception:
log.exception("Dropping cloud tables failed.")
| en | 0.542502 | This script adds tables needed for Galaxy cloud functionality. UserConfiguredInstance (UCI) table # Load existing tables | 2.242466 | 2 |
apps/user/urls.py | mrf-foundation/ckios_v1 | 0 | 9530 | # -*- encoding: utf-8 -*-
"""
Copyright (c) 2021 <EMAIL>
"""
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from apps.user import views as user_views
from.views import EditProfilePage
urlpatterns = [
#User
path('admin/', admin.site.urls),
path('register/', user_views.register, name='register'),
path('login/', auth_views.LoginView.as_view(template_name='registration/login.html'), name='login'),
path('profile/', user_views.profile, name='profile'),
path('edit_profile/', user_views.edit_profile, name='edit_profile'),
path("myprofile/", user_views.myprofile, name="Myprofile"),
path('logout/', auth_views.LogoutView.as_view(template_name='users/logout.html'), name='logout'),
#path('tinymce/', include('tinymce.urls')),
path('edit_profile_page/', user_views.EditProfilePage.as_view(template_name='registration/edit_profile_page.html'), name='edit_profile_page'),
# For PasswordPresset
path('admin/password_reset/',auth_views.PasswordResetView.as_view(),name='admin_password_reset',),
path('admin/password_reset/done/',auth_views.PasswordResetDoneView.as_view(),name='password_reset_done',),
path('reset/<uidb64>/<token>/',auth_views.PasswordResetConfirmView.as_view(),name='password_reset_confirm',),
path('reset/done/',auth_views.PasswordResetCompleteView.as_view(),name='password_reset_complete',),
] | # -*- encoding: utf-8 -*-
"""
Copyright (c) 2021 <EMAIL>
"""
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from apps.user import views as user_views
from.views import EditProfilePage
urlpatterns = [
#User
path('admin/', admin.site.urls),
path('register/', user_views.register, name='register'),
path('login/', auth_views.LoginView.as_view(template_name='registration/login.html'), name='login'),
path('profile/', user_views.profile, name='profile'),
path('edit_profile/', user_views.edit_profile, name='edit_profile'),
path("myprofile/", user_views.myprofile, name="Myprofile"),
path('logout/', auth_views.LogoutView.as_view(template_name='users/logout.html'), name='logout'),
#path('tinymce/', include('tinymce.urls')),
path('edit_profile_page/', user_views.EditProfilePage.as_view(template_name='registration/edit_profile_page.html'), name='edit_profile_page'),
# For PasswordPresset
path('admin/password_reset/',auth_views.PasswordResetView.as_view(),name='admin_password_reset',),
path('admin/password_reset/done/',auth_views.PasswordResetDoneView.as_view(),name='password_reset_done',),
path('reset/<uidb64>/<token>/',auth_views.PasswordResetConfirmView.as_view(),name='password_reset_confirm',),
path('reset/done/',auth_views.PasswordResetCompleteView.as_view(),name='password_reset_complete',),
] | en | 0.444147 | # -*- encoding: utf-8 -*- Copyright (c) 2021 <EMAIL> #User #path('tinymce/', include('tinymce.urls')), # For PasswordPresset | 1.954098 | 2 |
sra_django_api/user/migrations/0003_auto_20180914_1242.py | tflati/ncbi-search | 0 | 9531 | <gh_stars>0
# Generated by Django 2.0.3 on 2018-09-14 12:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0002_project'),
]
operations = [
migrations.RenameField(
model_name='project',
old_name='file_path',
new_name='base_path',
),
migrations.AlterField(
model_name='project',
name='creation_date',
field=models.DateTimeField(auto_now_add=True),
),
]
| # Generated by Django 2.0.3 on 2018-09-14 12:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0002_project'),
]
operations = [
migrations.RenameField(
model_name='project',
old_name='file_path',
new_name='base_path',
),
migrations.AlterField(
model_name='project',
name='creation_date',
field=models.DateTimeField(auto_now_add=True),
),
] | en | 0.71583 | # Generated by Django 2.0.3 on 2018-09-14 12:42 | 1.730614 | 2 |
image_misc.py | frankgh/deep-visualization-toolbox | 0 | 9532 | #! /usr/bin/env python
import cv2
import matplotlib.pyplot as plt
import skimage
import skimage.io
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.pyplot import cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, \
fromstring, ceil, dtype, float32, sqrt, dot, zeros
from misc import WithTimer
def norm01(arr):
arr = arr.copy()
arr -= arr.min()
arr /= arr.max() + 1e-10
return arr
def norm01c(arr, center):
'''Maps the input range to [0,1] such that the center value maps to .5'''
arr = arr.copy()
arr -= center
arr /= max(2 * arr.max(), -2 * arr.min()) + 1e-10
arr += .5
assert arr.min() >= 0
assert arr.max() <= 1
return arr
def norm0255(arr):
'''Maps the input range to [0,255] as dtype uint8'''
arr = arr.copy()
arr -= arr.min()
arr *= 255.0 / (arr.max() + 1e-10)
arr = array(arr, 'uint8')
return arr
def cv2_read_cap_rgb(cap, saveto=None):
rval, frame = cap.read()
if saveto:
cv2.imwrite(saveto, frame)
if len(frame.shape) == 2:
# Upconvert single channel grayscale to color
frame = frame[:, :, newaxis]
if frame.shape[2] == 1:
frame = tile(frame, (1, 1, 3))
if frame.shape[2] > 3:
# Chop off transparency
frame = frame[:, :, :3]
frame = frame[:, :, ::-1] # Convert native OpenCV BGR -> RGB
return frame
def plt_plot_signal(data, labels, zoom_level=-1, offset=0, markers=None, title=None):
fig = Figure(figsize=(5, 5))
canvas = FigureCanvas(fig)
ax = None
if len(data.shape) == 1:
data = expand_dims(data, axis=1)
if zoom_level == -1:
zoom_level = data.shape[0]
color = iter(cm.rainbow(linspace(0, 1, data.shape[1])))
s = offset
e = s + zoom_level
x = arange(s, e)
for i in range(data.shape[1]):
c = next(color)
label = labels[i] if labels is not None else 'Signal {}'.format(i + 1)
ax = fig.add_subplot(data.shape[1], 1, (i + 1), sharex=ax)
ax.plot(x, data[s:e, i], lw=1, label=label, c=c)
# # ax.set_adjustable('box-forced')
# ax.set_xlim(left=0, right=zoom_level)
# ax.get_xaxis().set_visible(i == data.shape[1] - 1)
# ax.xaxis.set_ticks(arange(s, e + 1, (e - s) / 10.0))
# ax.xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1f'))
ax.legend(loc='lower right')
if markers is not None and i in markers:
for val in markers[i]:
if val >= s and val < e:
ax.axvline(x=val)
if title is not None:
fig.suptitle(title)
fig.tight_layout()
fig.subplots_adjust(hspace=0)
canvas.draw() # draw the canvas, cache the renderer
l, b, w, h = fig.bbox.bounds
w, h = int(w), int(h)
im = fromstring(canvas.tostring_rgb(), dtype='uint8')
im.shape = h, w, 3
return im
def plt_plot_heatmap(data,
shape,
rows,
cols,
title=None,
x_axis_label=None,
y_axis_label=None,
x_axis_values=None,
y_axis_values=None,
hide_axis=True,
vmin=None,
vmax=None):
res = []
shape = (max(2, ceil(shape[1] / 80 / cols)), max(2, ceil(shape[0] / 80 / rows)))
fig, ax = plt.subplots(1, 1, figsize=shape)
canvas = FigureCanvas(fig)
# for i in xrange(y.shape[0]):
# sns.heatmap(y[i], ax=ax, vmin=minn, vmax=maxx)
# canvas.draw() # draw the canvas, cache the renderer
#
# l, b, w, h = fig.bbox.bounds
# w, h = int(w), int(h)
# im = fromstring(canvas.tostring_rgb(), dtype='uint8')
# im.shape = h, w, 3
# res.append(im)
img = ax.imshow(
zeros((data.shape[1], data.shape[2])),
cmap='viridis',
vmin=vmin if vmin is not None else data.min(),
vmax=vmax if vmax is not None else data.max(),
interpolation='none',
aspect='auto'
)
# get rid of spines and fix range of axes, rotate x-axis labels
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
if hide_axis:
ax.set_xticks([])
ax.set_yticks([])
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
fig.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.95, hspace=0, wspace=0)
else:
if title is not None:
plt.title(title)
if x_axis_label is not None:
ax.set_xlabel(x_axis_label)
if y_axis_label is not None:
ax.set_ylabel(y_axis_label)
if x_axis_values is not None:
a = arange(0, x_axis_values.shape[0], 3) + 0.5
b = arange(x_axis_values.min(), x_axis_values.max() + 1.5, 1.5)
ax.set_xticks(a)
ax.set_xticklabels(b, rotation=90)
if y_axis_values is not None:
a = arange(0, y_axis_values.shape[0], 3) + 0.5
# c = roundup((y_axis_values.max() - y_axis_values.min()) / 11)
# b = arange(y_axis_values.min(), y_axis_values.max(), c)
b = linspace(y_axis_values.min(), y_axis_values.max(), num=10, dtype=int)
ax.set_yticks(a)
ax.set_yticklabels(b)
# for tick in ax.get_xticklabels():
# tick.set_rotation(90)
if not hide_axis:
divider = make_axes_locatable(ax)
# colorbar on the right of ax. Colorbar width in % of ax and space between them is defined by pad in inches
cax = divider.append_axes('right', size='5%', pad=0.07)
cb = fig.colorbar(img, cax=cax)
# remove colorbar frame/spines
cb.outline.set_visible(False)
# don't stop after each subfigure change
plt.show(block=False)
if not hide_axis:
fig.tight_layout()
canvas.draw() # draw the canvas, cache the renderer
# keep bg in memory
background = fig.canvas.copy_from_bbox(ax.bbox)
# start = time.time()
for i in xrange(data.shape[0]):
img.set_array(data[i])
# restore background
fig.canvas.restore_region(background)
ax.draw_artist(img)
# fill in the axes rectangle
fig.canvas.blit(ax.bbox)
# loop through array
# for i in xrange(data.shape[0]):
# time.sleep(0.005)
# img.set_array(data[i])
# canvas.draw()
l, b, w, h = fig.bbox.bounds
w, h = int(w), int(h)
im = fromstring(canvas.tostring_rgb(), dtype='uint8')
im.shape = h, w, 3
res.append(im)
fig.clf()
plt.clf()
plt.close()
return array(res)
def plt_plot_filter(x, y, title, x_axis_label, y_axis_label, log_scale):
fig, ax = plt.subplots(1, 1, figsize=(4, 4))
canvas = FigureCanvas(fig)
x = arange(0, y.shape[0]) if x is None else x
if log_scale == 1:
ax.semilogy(x, y, lw=2)
else:
ax.plot(x, y, lw=2)
ax.set(xlabel=x_axis_label, ylabel=y_axis_label, title=title)
fig.tight_layout()
canvas.draw() # draw the canvas, cache the renderer
l, b, w, h = fig.bbox.bounds
w, h = int(w), int(h)
im = fromstring(canvas.tostring_rgb(), dtype='uint8')
im.shape = h, w, 3
fig.clf()
plt.clf()
plt.close()
return im
def plt_plot_filters_blit(y, x, shape, rows, cols,
title=None,
x_axis_label=None,
y_axis_label=None,
log_scale=0,
hide_axis=False):
res = []
x = arange(0, y.shape[1]) if x is None else x
# if log_scale == 1:
# y = log(y)
# elif log_scale == 2:
# x = log(x)
# elif log_scale == 3:
# x = log(x)
# y = log(y)
shape = (max(2, ceil(shape[1] / 80 / cols)), max(2, ceil(shape[0] / 80 / rows)))
fig, ax = plt.subplots(1, 1, figsize=shape)
canvas = FigureCanvas(fig)
ax.set_xlim(min(x), max(x))
ax.set_ylim(y.min(), y.max())
if hide_axis:
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
fig.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.95, hspace=0, wspace=0)
else:
if x_axis_label is not None:
ax.set_xlabel(x_axis_label)
if y_axis_label is not None:
ax.set_ylabel(y_axis_label)
if title is not None:
plt.title(title)
line, = ax.plot([], [], lw=2)
if not hide_axis:
fig.tight_layout()
canvas.draw() # draw the canvas, cache the renderer
# keep bg in memory
background = fig.canvas.copy_from_bbox(ax.bbox)
for i in xrange(y.shape[0]):
line.set_data(x, y[i])
# line.set_color()
# restore background
fig.canvas.restore_region(background)
# redraw just the points
ax.draw_artist(line)
# fill in the axes rectangle
fig.canvas.blit(ax.bbox)
l, b, w, h = fig.bbox.bounds
w, h = int(w), int(h)
im = fromstring(canvas.tostring_rgb(), dtype='uint8')
im.shape = h, w, 3
res.append(im)
fig.clf()
plt.clf()
plt.close()
return array(res)
def plt_plot_filters_fast(y, x, shape, rows, cols,
title=None,
x_axis_label=None,
y_axis_label=None,
share_axes=True,
log_scale=0):
res = []
shape = (ceil(shape[1] / 80 / cols), ceil(shape[0] / 80 / rows))
fig, ax = plt.subplots(1, 1, figsize=shape)
canvas = FigureCanvas(fig)
# ax.set_aspect('equal')
if share_axes:
if x is not None:
min_x, max_x = min(x), max(x)
else:
min_x, max_x = 0, y.shape[1]
min_y, max_y = y.min(), y.max()
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_y, max_y)
# ax.hold(True)
plt.subplots_adjust(left=0.185, bottom=0.125, right=0.98, top=0.98)
# plt.show(False)
# plt.draw()
# background = fig.canvas.copy_from_bbox(ax.bbox)
# points = ax.plot(x[0], linewidth=1)[0]
for i in xrange(y.shape[0]):
if x is not None:
if log_scale == 1:
ax.semilogy(x, y[i], linewidth=1)
else:
ax.plot(x, y[i], linewidth=1)
else:
if log_scale == 1:
ax.semilogy(y[i], linewidth=1)
else:
ax.plot(y[i], linewidth=1)
if x_axis_label is not None:
ax.set_xlabel(x_axis_label)
if y_axis_label is not None:
ax.set_ylabel(y_axis_label)
if title is not None:
plt.title(title)
# plt.autoscale(enable=True, axis='y', tight=True)
# plt.tight_layout()
# Turn off axes and set axes limits
# ax.axis('off')
canvas.draw() # draw the canvas, cache the renderer
l, b, w, h = fig.bbox.bounds
w, h = int(w), int(h)
im = fromstring(canvas.tostring_rgb(), dtype='uint8')
im.shape = h, w, 3
res.append(im)
# ax.cla()
fig.clf()
return array(res)
def plt_plot_filters(x, y, shape, rows, cols,
selected_unit=None,
selected_unit_color=None,
title=None,
x_axis_label=None,
y_axis_label=None,
share_axes=True,
log_scale=0):
shape = (ceil(shape[1] / 80), ceil(shape[0] / 80))
fig = Figure(figsize=shape)
canvas = FigureCanvas(fig)
ax, highlighted_ax, right_ax, bottom_ax, curr, right, bottom = None, None, None, None, None, None, None
if selected_unit is not None:
row = selected_unit / cols
col = selected_unit % cols
curr = selected_unit
bottom = (selected_unit + cols) if row < rows - 1 else None
right = (selected_unit + 1) if col < cols - 1 else None
for i in xrange(x.shape[0]):
if share_axes:
ax = fig.add_subplot(rows, cols, (i + 1), axisbelow=False, sharex=ax, sharey=ax)
else:
ax = fig.add_subplot(rows, cols, (i + 1), axisbelow=False)
if y is not None:
if log_scale == 1:
ax.semilogy(y, x[i], linewidth=1)
else:
ax.plot(y, x[i], linewidth=1)
else:
if log_scale == 1:
ax.semilogy(x[i], linewidth=1)
else:
ax.plot(x[i], linewidth=1)
ax.set_xlim(left=0, right=x.shape[1] - 1)
ax.get_xaxis().set_visible(i >= ((rows - 1) * cols))
ax.get_yaxis().set_visible(i % cols == 0)
if i == curr:
highlighted_ax = ax
if i == bottom:
bottom_ax = ax
if i == right:
right_ax = ax
if x_axis_label is not None:
ax.set_xlabel(x_axis_label)
if y_axis_label is not None:
ax.set_ylabel(y_axis_label)
if highlighted_ax is not None:
for axis in ['top', 'bottom', 'left', 'right']:
highlighted_ax.spines[axis].set_linewidth(2.5)
highlighted_ax.spines[axis].set_color(selected_unit_color)
if bottom_ax is not None:
bottom_ax.spines['top'].set_linewidth(2)
bottom_ax.spines['top'].set_color(selected_unit_color)
if right_ax is not None:
right_ax.spines['left'].set_linewidth(2)
right_ax.spines['left'].set_color(selected_unit_color)
if title is not None:
fig.suptitle(title)
fig.tight_layout()
fig.subplots_adjust(hspace=0, wspace=0)
canvas.draw() # draw the canvas, cache the renderer
l, b, w, h = fig.bbox.bounds
w, h = int(w), int(h)
im = fromstring(canvas.tostring_rgb(), dtype='uint8')
im.shape = h, w, 3
return im
def cv2_read_file_rgb(filename):
'''Reads an image from file. Always returns (x,y,3)'''
im = cv2.imread(filename)
if len(im.shape) == 2:
# Upconvert single channel grayscale to color
im = im[:, :, newaxis]
if im.shape[2] == 1:
im = tile(im, (1, 1, 3))
if im.shape[2] > 3:
# Chop off transparency
im = im[:, :, :3]
return cv2.cvtColor(im, cv2.COLOR_BGR2RGB) # Convert native OpenCV BGR -> RGB
def crop_to_square(frame):
i_size, j_size = frame.shape[0], frame.shape[1]
if j_size > i_size:
# landscape
offset = (j_size - i_size) / 2
return frame[:, offset:offset + i_size, :]
else:
# portrait
offset = (i_size - j_size) / 2
return frame[offset:offset + j_size, :, :]
def cv2_imshow_rgb(window_name, img):
# Convert native OpenCV BGR -> RGB before displaying
cv2.imshow(window_name, cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
def caffe_load_image(filename, color=True, as_uint=False):
'''
Copied from Caffe to simplify potential import problems.
Load an image converting from grayscale or alpha as needed.
Take
filename: string
color: flag for color format. True (default) loads as RGB while False
loads as intensity (if image is already grayscale).
Give
image: an image with type float32 in range [0, 1]
of size (H x W x 3) in RGB or
of size (H x W x 1) in grayscale.
'''
with WithTimer('imread', quiet=True):
if as_uint:
img = skimage.io.imread(filename)
else:
img = skimage.img_as_float(skimage.io.imread(filename)).astype(float32)
if img.ndim == 2:
img = img[:, :, newaxis]
if color:
img = tile(img, (1, 1, 3))
elif img.shape[2] == 4:
img = img[:, :, :3]
return img
def get_tiles_height_width(n_tiles, desired_width=None):
'''Get a height x width size that will fit n_tiles tiles.'''
if desired_width == None:
# square
width = int(ceil(sqrt(n_tiles)))
height = width
else:
assert isinstance(desired_width, int)
width = desired_width
height = int(ceil(float(n_tiles) / width))
return height, width
def get_tiles_height_width_ratio(n_tiles, width_ratio=1.0):
'''Get a height x width size that will fit n_tiles tiles.'''
width = int(ceil(sqrt(n_tiles * width_ratio)))
return get_tiles_height_width(n_tiles, desired_width=width)
def tile_images_normalize(data, c01=False, boost_indiv=0.0, boost_gamma=1.0, single_tile=False, scale_range=1.0,
neg_pos_colors=None):
data = data.copy()
if single_tile:
# promote 2D image -> 3D batch (01 -> b01) or 3D image -> 4D batch (01c -> b01c OR c01 -> bc01)
data = data[newaxis]
if c01:
# Convert bc01 -> b01c
assert len(data.shape) == 4, 'expected bc01 data'
data = data.transpose(0, 2, 3, 1)
if neg_pos_colors:
neg_clr, pos_clr = neg_pos_colors
neg_clr = array(neg_clr).reshape((1, 3))
pos_clr = array(pos_clr).reshape((1, 3))
# Keep 0 at 0
data /= max(data.max(), -data.min()) + 1e-10 # Map data to [-1, 1]
# data += .5 * scale_range # now in [0, scale_range]
# assert data.min() >= 0
# assert data.max() <= scale_range
if len(data.shape) == 3:
data = data.reshape(data.shape + (1,))
assert data.shape[3] == 1, 'neg_pos_color only makes sense if color data is not provided (channels should be 1)'
data = dot((data > 0) * data, pos_clr) + dot((data < 0) * -data, neg_clr)
data -= data.min()
data *= scale_range / (data.max() + 1e-10)
# sqrt-scale (0->0, .1->.3, 1->1)
assert boost_indiv >= 0 and boost_indiv <= 1, 'boost_indiv out of range'
# print 'using boost_indiv:', boost_indiv
if boost_indiv > 0:
if len(data.shape) == 4:
mm = (data.max(-1).max(-1).max(-1) + 1e-10) ** -boost_indiv
else:
mm = (data.max(-1).max(-1) + 1e-10) ** -boost_indiv
data = (data.T * mm).T
if boost_gamma != 1.0:
data = data ** boost_gamma
# Promote single-channel data to 3 channel color
if len(data.shape) == 3:
# b01 -> b01c
data = tile(data[:, :, :, newaxis], 3)
return data
def tile_images_make_tiles(data, padsize=1, padval=0, hw=None, highlights=None):
if hw:
height, width = hw
else:
height, width = get_tiles_height_width(data.shape[0])
assert height * width >= data.shape[0], '{} rows x {} columns cannot fit {} tiles'.format(height, width,
data.shape[0])
# First iteration: one-way padding, no highlights
# padding = ((0, width*height - data.shape[0]), (0, padsize), (0, padsize)) + ((0, 0),) * (data.ndim - 3)
# data = pad(data, padding, mode='constant', constant_values=(padval, padval))
# Second iteration: padding with highlights
# padding = ((0, width*height - data.shape[0]), (padsize, padsize), (padsize, padsize)) + ((0, 0),) * (data.ndim - 3)
# print 'tile_images: data min,max =', data.min(), data.max()
# padder = SmartPadder()
##data = pad(data, padding, mode=jy_pad_fn)
# data = pad(data, padding, mode=padder.pad_function)
# print 'padder.calls =', padder.calls
# Third iteration: two-way padding with highlights
if highlights is not None:
assert len(highlights) == data.shape[0]
padding = ((0, width * height - data.shape[0]), (padsize, padsize), (padsize, padsize)) + ((0, 0),) * (
data.ndim - 3)
# First pad with constant vals
try:
len(padval)
except:
padval = tuple((padval,))
assert len(padval) in (1, 3), 'padval should be grayscale (len 1) or color (len 3)'
if len(padval) == 1:
data = pad(data, padding, mode='constant', constant_values=(padval, padval))
else:
data = pad(data, padding, mode='constant', constant_values=(0, 0))
for cc in (0, 1, 2):
# Replace 0s with proper color in each channel
data[:padding[0][0], :, :, cc] = padval[cc]
if padding[0][1] > 0:
data[-padding[0][1]:, :, :, cc] = padval[cc]
data[:, :padding[1][0], :, cc] = padval[cc]
if padding[1][1] > 0:
data[:, -padding[1][1]:, :, cc] = padval[cc]
data[:, :, :padding[2][0], cc] = padval[cc]
if padding[2][1] > 0:
data[:, :, -padding[2][1]:, cc] = padval[cc]
if highlights is not None:
# Then highlight if necessary
for ii, highlight in enumerate(highlights):
if highlight is not None:
data[ii, :padding[1][0], :, :] = highlight
if padding[1][1] > 0:
data[ii, -padding[1][1]:, :, :] = highlight
data[ii, :, :padding[2][0], :] = highlight
if padding[2][1] > 0:
data[ii, :, -padding[2][1]:, :] = highlight
# tile the filters into an image
data = data.reshape((height, width) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
data = data.reshape((height * data.shape[1], width * data.shape[3]) + data.shape[4:])
data = data[0:-padsize, 0:-padsize] # remove excess padding
return (height, width), data
def to_255(vals_01):
'''Convert vals in [0,1] to [0,255]'''
try:
ret = [v * 255 for v in vals_01]
if type(vals_01) is tuple:
return tuple(ret)
else:
return ret
except TypeError:
# Not iterable (single int or float)
return vals_01 * 255
def ensure_uint255_and_resize_to_fit(img, out_max_shape,
shrink_interpolation=cv2.INTER_LINEAR,
grow_interpolation=cv2.INTER_NEAREST):
as_uint255 = ensure_uint255(img)
return resize_to_fit(as_uint255, out_max_shape,
dtype_out='uint8',
shrink_interpolation=shrink_interpolation,
grow_interpolation=grow_interpolation)
def ensure_uint255(arr):
'''If data is float, multiply by 255 and convert to uint8. Else leave as uint8.'''
if arr.dtype == 'uint8':
return arr
elif arr.dtype in ('float32', 'float64'):
# print 'extra check...'
# assert arr.max() <= 1.1
return array(arr * 255, dtype='uint8')
else:
raise Exception('ensure_uint255 expects uint8 or float input but got %s with range [%g,%g,].' % (
arr.dtype, arr.min(), arr.max()))
def ensure_float01(arr, dtype_preference='float32'):
'''If data is uint, convert to float and divide by 255. Else leave at float.'''
if arr.dtype == 'uint8':
# print 'extra check...'
# assert arr.max() <= 256
return array(arr, dtype=dtype_preference) / 255
elif arr.dtype in ('float32', 'float64'):
return arr
else:
raise Exception('ensure_float01 expects uint8 or float input but got %s with range [%g,%g,].' % (
arr.dtype, arr.min(), arr.max()))
def resize_to_fit(img, out_max_shape,
dtype_out=None,
shrink_interpolation=cv2.INTER_LINEAR,
grow_interpolation=cv2.INTER_NEAREST):
'''Resizes to fit within out_max_shape. If ratio is different,
returns an image that fits but is smaller along one of the two
dimensions.
If one of the out_max_shape dimensions is None, then use only the other dimension to perform resizing.
Timing info on MBP Retina with OpenBlas:
- conclusion: uint8 is always tied or faster. float64 is slower.
Scaling down:
In [79]: timeit.Timer('resize_to_fit(aa, (200,200))', setup='from kerasvis.app import resize_to_fit; import numpy as np; aa = array(np.random.uniform(0,255,(1000,1000,3)), dtype="uint8")').timeit(100)
Out[79]: 0.04950380325317383
In [77]: timeit.Timer('resize_to_fit(aa, (200,200))', setup='from kerasvis.app import resize_to_fit; import numpy as np; aa = array(np.random.uniform(0,255,(1000,1000,3)), dtype="float32")').timeit(100)
Out[77]: 0.049156904220581055
In [76]: timeit.Timer('resize_to_fit(aa, (200,200))', setup='from kerasvis.app import resize_to_fit; import numpy as np; aa = array(np.random.uniform(0,255,(1000,1000,3)), dtype="float64")').timeit(100)
Out[76]: 0.11808204650878906
Scaling up:
In [68]: timeit.Timer('resize_to_fit(aa, (2000,2000))', setup='from kerasvis.app import resize_to_fit; import numpy as np; aa = array(np.random.uniform(0,255,(1000,1000,3)), dtype="uint8")').timeit(100)
Out[68]: 0.4357950687408447
In [70]: timeit.Timer('resize_to_fit(aa, (2000,2000))', setup='from kerasvis.app import resize_to_fit; import numpy as np; aa = array(np.random.uniform(0,255,(1000,1000,3)), dtype="float32")').timeit(100)
Out[70]: 1.3411099910736084
In [73]: timeit.Timer('resize_to_fit(aa, (2000,2000))', setup='from kerasvis.app import resize_to_fit; import numpy as np; aa = array(np.random.uniform(0,255,(1000,1000,3)), dtype="float64")').timeit(100)
Out[73]: 2.6078310012817383
'''
if dtype_out is not None and img.dtype != dtype_out:
dtype_in_size = img.dtype.itemsize
dtype_out_size = dtype(dtype_out).itemsize
convert_early = (dtype_out_size < dtype_in_size)
convert_late = not convert_early
else:
convert_early = False
convert_late = False
if img.shape[0] == 0 and img.shape[1] == 0:
scale = 1
elif out_max_shape[0] is None or img.shape[0] == 0:
scale = float(out_max_shape[1]) / img.shape[1]
elif out_max_shape[1] is None or img.shape[1] == 0:
scale = float(out_max_shape[0]) / img.shape[0]
else:
scale = min(float(out_max_shape[0]) / img.shape[0],
float(out_max_shape[1]) / img.shape[1])
if convert_early:
img = array(img, dtype=dtype_out)
out = cv2.resize(img,
(int(img.shape[1] * scale), int(img.shape[0] * scale)), # in (c,r) order
interpolation=grow_interpolation if scale > 1 else shrink_interpolation)
if convert_late:
out = array(out, dtype=dtype_out)
return out
class FormattedString(object):
def __init__(self, string, defaults, face=None, fsize=None, clr=None, thick=None, align=None, width=None):
self.string = string
self.face = face if face else defaults['face']
self.fsize = fsize if fsize else defaults['fsize']
self.clr = clr if clr else defaults['clr']
self.thick = thick if thick else defaults['thick']
self.width = width # if None: calculate width automatically
self.align = align if align else defaults.get('align', 'left')
def cv2_typeset_text(data, lines, loc, between=' ', string_spacing=0, line_spacing=0, wrap=False):
'''Typesets mutliple strings on multiple lines of text, where each string may have its own formatting.
Given:
data: as in cv2.putText
loc: as in cv2.putText
lines: list of lists of FormattedString objects, may be modified by this function!
between: what to insert between each string on each line, ala str.join
string_spacing: extra spacing to insert between strings on a line
line_spacing: extra spacing to insert between lines
wrap: if true, wraps words to next line
Returns:
locy: new y location = loc[1] + y-offset resulting from lines of text
'''
data_width = data.shape[1]
# lines_modified = False
# lines = lines_in # will be deepcopied if modification is needed later
if isinstance(lines, FormattedString):
lines = [lines]
assert isinstance(lines,
list), 'lines must be a list of lines or list of FormattedString objects or a single FormattedString object'
if len(lines) == 0:
return loc[1]
if not isinstance(lines[0], list):
# If a single line of text is given as a list of strings, convert to multiline format
lines = [lines]
locy = loc[1]
line_num = 0
while line_num < len(lines):
line = lines[line_num]
maxy = 0
locx = loc[0]
for ii, fs in enumerate(line):
last_on_line = (ii == len(line) - 1)
if not last_on_line:
fs.string += between
boxsize, _ = cv2.getTextSize(fs.string, fs.face, fs.fsize, fs.thick)
if fs.width is not None:
if fs.align == 'right':
locx += fs.width - boxsize[0]
elif fs.align == 'center':
locx += (fs.width - boxsize[0]) / 2
# print 'right boundary is', locx + boxsize[0], '(%s)' % fs.string
# print 'HERE'
right_edge = locx + boxsize[0]
if wrap and ii > 0 and right_edge > data_width:
# Wrap rest of line to the next line
# if not lines_modified:
# lines = deepcopy(lines_in)
# lines_modified = True
new_this_line = line[:ii]
new_next_line = line[ii:]
lines[line_num] = new_this_line
lines.insert(line_num + 1, new_next_line)
break
###line_num += 1
###continue
cv2.putText(data, fs.string, (locx, locy), fs.face, fs.fsize, fs.clr, fs.thick)
maxy = max(maxy, boxsize[1])
if fs.width is not None:
if fs.align == 'right':
locx += boxsize[0]
elif fs.align == 'left':
locx += fs.width
elif fs.align == 'center':
locx += fs.width - (fs.width - boxsize[0]) / 2
else:
locx += boxsize[0]
locx += string_spacing
line_num += 1
locy += maxy + line_spacing
return locy
def saveimage(filename, im):
'''Saves an image with pixel values in [0,1]'''
# matplotlib.image.imsave(filename, im)
if len(im.shape) == 3:
# Reverse RGB to OpenCV BGR order for color images
cv2.imwrite(filename, 255 * im[:, :, ::-1])
else:
cv2.imwrite(filename, 255 * im)
def saveimagesc(filename, im):
saveimage(filename, norm01(im))
def saveimagescc(filename, im, center):
saveimage(filename, norm01c(im, center))
| #! /usr/bin/env python
import cv2
import matplotlib.pyplot as plt
import skimage
import skimage.io
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.pyplot import cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, \
fromstring, ceil, dtype, float32, sqrt, dot, zeros
from misc import WithTimer
def norm01(arr):
arr = arr.copy()
arr -= arr.min()
arr /= arr.max() + 1e-10
return arr
def norm01c(arr, center):
'''Maps the input range to [0,1] such that the center value maps to .5'''
arr = arr.copy()
arr -= center
arr /= max(2 * arr.max(), -2 * arr.min()) + 1e-10
arr += .5
assert arr.min() >= 0
assert arr.max() <= 1
return arr
def norm0255(arr):
'''Maps the input range to [0,255] as dtype uint8'''
arr = arr.copy()
arr -= arr.min()
arr *= 255.0 / (arr.max() + 1e-10)
arr = array(arr, 'uint8')
return arr
def cv2_read_cap_rgb(cap, saveto=None):
rval, frame = cap.read()
if saveto:
cv2.imwrite(saveto, frame)
if len(frame.shape) == 2:
# Upconvert single channel grayscale to color
frame = frame[:, :, newaxis]
if frame.shape[2] == 1:
frame = tile(frame, (1, 1, 3))
if frame.shape[2] > 3:
# Chop off transparency
frame = frame[:, :, :3]
frame = frame[:, :, ::-1] # Convert native OpenCV BGR -> RGB
return frame
def plt_plot_signal(data, labels, zoom_level=-1, offset=0, markers=None, title=None):
fig = Figure(figsize=(5, 5))
canvas = FigureCanvas(fig)
ax = None
if len(data.shape) == 1:
data = expand_dims(data, axis=1)
if zoom_level == -1:
zoom_level = data.shape[0]
color = iter(cm.rainbow(linspace(0, 1, data.shape[1])))
s = offset
e = s + zoom_level
x = arange(s, e)
for i in range(data.shape[1]):
c = next(color)
label = labels[i] if labels is not None else 'Signal {}'.format(i + 1)
ax = fig.add_subplot(data.shape[1], 1, (i + 1), sharex=ax)
ax.plot(x, data[s:e, i], lw=1, label=label, c=c)
# # ax.set_adjustable('box-forced')
# ax.set_xlim(left=0, right=zoom_level)
# ax.get_xaxis().set_visible(i == data.shape[1] - 1)
# ax.xaxis.set_ticks(arange(s, e + 1, (e - s) / 10.0))
# ax.xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1f'))
ax.legend(loc='lower right')
if markers is not None and i in markers:
for val in markers[i]:
if val >= s and val < e:
ax.axvline(x=val)
if title is not None:
fig.suptitle(title)
fig.tight_layout()
fig.subplots_adjust(hspace=0)
canvas.draw() # draw the canvas, cache the renderer
l, b, w, h = fig.bbox.bounds
w, h = int(w), int(h)
im = fromstring(canvas.tostring_rgb(), dtype='uint8')
im.shape = h, w, 3
return im
def plt_plot_heatmap(data,
shape,
rows,
cols,
title=None,
x_axis_label=None,
y_axis_label=None,
x_axis_values=None,
y_axis_values=None,
hide_axis=True,
vmin=None,
vmax=None):
res = []
shape = (max(2, ceil(shape[1] / 80 / cols)), max(2, ceil(shape[0] / 80 / rows)))
fig, ax = plt.subplots(1, 1, figsize=shape)
canvas = FigureCanvas(fig)
# for i in xrange(y.shape[0]):
# sns.heatmap(y[i], ax=ax, vmin=minn, vmax=maxx)
# canvas.draw() # draw the canvas, cache the renderer
#
# l, b, w, h = fig.bbox.bounds
# w, h = int(w), int(h)
# im = fromstring(canvas.tostring_rgb(), dtype='uint8')
# im.shape = h, w, 3
# res.append(im)
img = ax.imshow(
zeros((data.shape[1], data.shape[2])),
cmap='viridis',
vmin=vmin if vmin is not None else data.min(),
vmax=vmax if vmax is not None else data.max(),
interpolation='none',
aspect='auto'
)
# get rid of spines and fix range of axes, rotate x-axis labels
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
if hide_axis:
ax.set_xticks([])
ax.set_yticks([])
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
fig.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.95, hspace=0, wspace=0)
else:
if title is not None:
plt.title(title)
if x_axis_label is not None:
ax.set_xlabel(x_axis_label)
if y_axis_label is not None:
ax.set_ylabel(y_axis_label)
if x_axis_values is not None:
a = arange(0, x_axis_values.shape[0], 3) + 0.5
b = arange(x_axis_values.min(), x_axis_values.max() + 1.5, 1.5)
ax.set_xticks(a)
ax.set_xticklabels(b, rotation=90)
if y_axis_values is not None:
a = arange(0, y_axis_values.shape[0], 3) + 0.5
# c = roundup((y_axis_values.max() - y_axis_values.min()) / 11)
# b = arange(y_axis_values.min(), y_axis_values.max(), c)
b = linspace(y_axis_values.min(), y_axis_values.max(), num=10, dtype=int)
ax.set_yticks(a)
ax.set_yticklabels(b)
# for tick in ax.get_xticklabels():
# tick.set_rotation(90)
if not hide_axis:
divider = make_axes_locatable(ax)
# colorbar on the right of ax. Colorbar width in % of ax and space between them is defined by pad in inches
cax = divider.append_axes('right', size='5%', pad=0.07)
cb = fig.colorbar(img, cax=cax)
# remove colorbar frame/spines
cb.outline.set_visible(False)
# don't stop after each subfigure change
plt.show(block=False)
if not hide_axis:
fig.tight_layout()
canvas.draw() # draw the canvas, cache the renderer
# keep bg in memory
background = fig.canvas.copy_from_bbox(ax.bbox)
# start = time.time()
for i in xrange(data.shape[0]):
img.set_array(data[i])
# restore background
fig.canvas.restore_region(background)
ax.draw_artist(img)
# fill in the axes rectangle
fig.canvas.blit(ax.bbox)
# loop through array
# for i in xrange(data.shape[0]):
# time.sleep(0.005)
# img.set_array(data[i])
# canvas.draw()
l, b, w, h = fig.bbox.bounds
w, h = int(w), int(h)
im = fromstring(canvas.tostring_rgb(), dtype='uint8')
im.shape = h, w, 3
res.append(im)
fig.clf()
plt.clf()
plt.close()
return array(res)
def plt_plot_filter(x, y, title, x_axis_label, y_axis_label, log_scale):
fig, ax = plt.subplots(1, 1, figsize=(4, 4))
canvas = FigureCanvas(fig)
x = arange(0, y.shape[0]) if x is None else x
if log_scale == 1:
ax.semilogy(x, y, lw=2)
else:
ax.plot(x, y, lw=2)
ax.set(xlabel=x_axis_label, ylabel=y_axis_label, title=title)
fig.tight_layout()
canvas.draw() # draw the canvas, cache the renderer
l, b, w, h = fig.bbox.bounds
w, h = int(w), int(h)
im = fromstring(canvas.tostring_rgb(), dtype='uint8')
im.shape = h, w, 3
fig.clf()
plt.clf()
plt.close()
return im
def plt_plot_filters_blit(y, x, shape, rows, cols,
title=None,
x_axis_label=None,
y_axis_label=None,
log_scale=0,
hide_axis=False):
res = []
x = arange(0, y.shape[1]) if x is None else x
# if log_scale == 1:
# y = log(y)
# elif log_scale == 2:
# x = log(x)
# elif log_scale == 3:
# x = log(x)
# y = log(y)
shape = (max(2, ceil(shape[1] / 80 / cols)), max(2, ceil(shape[0] / 80 / rows)))
fig, ax = plt.subplots(1, 1, figsize=shape)
canvas = FigureCanvas(fig)
ax.set_xlim(min(x), max(x))
ax.set_ylim(y.min(), y.max())
if hide_axis:
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
fig.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.95, hspace=0, wspace=0)
else:
if x_axis_label is not None:
ax.set_xlabel(x_axis_label)
if y_axis_label is not None:
ax.set_ylabel(y_axis_label)
if title is not None:
plt.title(title)
line, = ax.plot([], [], lw=2)
if not hide_axis:
fig.tight_layout()
canvas.draw() # draw the canvas, cache the renderer
# keep bg in memory
background = fig.canvas.copy_from_bbox(ax.bbox)
for i in xrange(y.shape[0]):
line.set_data(x, y[i])
# line.set_color()
# restore background
fig.canvas.restore_region(background)
# redraw just the points
ax.draw_artist(line)
# fill in the axes rectangle
fig.canvas.blit(ax.bbox)
l, b, w, h = fig.bbox.bounds
w, h = int(w), int(h)
im = fromstring(canvas.tostring_rgb(), dtype='uint8')
im.shape = h, w, 3
res.append(im)
fig.clf()
plt.clf()
plt.close()
return array(res)
def plt_plot_filters_fast(y, x, shape, rows, cols,
title=None,
x_axis_label=None,
y_axis_label=None,
share_axes=True,
log_scale=0):
res = []
shape = (ceil(shape[1] / 80 / cols), ceil(shape[0] / 80 / rows))
fig, ax = plt.subplots(1, 1, figsize=shape)
canvas = FigureCanvas(fig)
# ax.set_aspect('equal')
if share_axes:
if x is not None:
min_x, max_x = min(x), max(x)
else:
min_x, max_x = 0, y.shape[1]
min_y, max_y = y.min(), y.max()
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_y, max_y)
# ax.hold(True)
plt.subplots_adjust(left=0.185, bottom=0.125, right=0.98, top=0.98)
# plt.show(False)
# plt.draw()
# background = fig.canvas.copy_from_bbox(ax.bbox)
# points = ax.plot(x[0], linewidth=1)[0]
for i in xrange(y.shape[0]):
if x is not None:
if log_scale == 1:
ax.semilogy(x, y[i], linewidth=1)
else:
ax.plot(x, y[i], linewidth=1)
else:
if log_scale == 1:
ax.semilogy(y[i], linewidth=1)
else:
ax.plot(y[i], linewidth=1)
if x_axis_label is not None:
ax.set_xlabel(x_axis_label)
if y_axis_label is not None:
ax.set_ylabel(y_axis_label)
if title is not None:
plt.title(title)
# plt.autoscale(enable=True, axis='y', tight=True)
# plt.tight_layout()
# Turn off axes and set axes limits
# ax.axis('off')
canvas.draw() # draw the canvas, cache the renderer
l, b, w, h = fig.bbox.bounds
w, h = int(w), int(h)
im = fromstring(canvas.tostring_rgb(), dtype='uint8')
im.shape = h, w, 3
res.append(im)
# ax.cla()
fig.clf()
return array(res)
def plt_plot_filters(x, y, shape, rows, cols,
selected_unit=None,
selected_unit_color=None,
title=None,
x_axis_label=None,
y_axis_label=None,
share_axes=True,
log_scale=0):
shape = (ceil(shape[1] / 80), ceil(shape[0] / 80))
fig = Figure(figsize=shape)
canvas = FigureCanvas(fig)
ax, highlighted_ax, right_ax, bottom_ax, curr, right, bottom = None, None, None, None, None, None, None
if selected_unit is not None:
row = selected_unit / cols
col = selected_unit % cols
curr = selected_unit
bottom = (selected_unit + cols) if row < rows - 1 else None
right = (selected_unit + 1) if col < cols - 1 else None
for i in xrange(x.shape[0]):
if share_axes:
ax = fig.add_subplot(rows, cols, (i + 1), axisbelow=False, sharex=ax, sharey=ax)
else:
ax = fig.add_subplot(rows, cols, (i + 1), axisbelow=False)
if y is not None:
if log_scale == 1:
ax.semilogy(y, x[i], linewidth=1)
else:
ax.plot(y, x[i], linewidth=1)
else:
if log_scale == 1:
ax.semilogy(x[i], linewidth=1)
else:
ax.plot(x[i], linewidth=1)
ax.set_xlim(left=0, right=x.shape[1] - 1)
ax.get_xaxis().set_visible(i >= ((rows - 1) * cols))
ax.get_yaxis().set_visible(i % cols == 0)
if i == curr:
highlighted_ax = ax
if i == bottom:
bottom_ax = ax
if i == right:
right_ax = ax
if x_axis_label is not None:
ax.set_xlabel(x_axis_label)
if y_axis_label is not None:
ax.set_ylabel(y_axis_label)
if highlighted_ax is not None:
for axis in ['top', 'bottom', 'left', 'right']:
highlighted_ax.spines[axis].set_linewidth(2.5)
highlighted_ax.spines[axis].set_color(selected_unit_color)
if bottom_ax is not None:
bottom_ax.spines['top'].set_linewidth(2)
bottom_ax.spines['top'].set_color(selected_unit_color)
if right_ax is not None:
right_ax.spines['left'].set_linewidth(2)
right_ax.spines['left'].set_color(selected_unit_color)
if title is not None:
fig.suptitle(title)
fig.tight_layout()
fig.subplots_adjust(hspace=0, wspace=0)
canvas.draw() # draw the canvas, cache the renderer
l, b, w, h = fig.bbox.bounds
w, h = int(w), int(h)
im = fromstring(canvas.tostring_rgb(), dtype='uint8')
im.shape = h, w, 3
return im
def cv2_read_file_rgb(filename):
'''Reads an image from file. Always returns (x,y,3)'''
im = cv2.imread(filename)
if len(im.shape) == 2:
# Upconvert single channel grayscale to color
im = im[:, :, newaxis]
if im.shape[2] == 1:
im = tile(im, (1, 1, 3))
if im.shape[2] > 3:
# Chop off transparency
im = im[:, :, :3]
return cv2.cvtColor(im, cv2.COLOR_BGR2RGB) # Convert native OpenCV BGR -> RGB
def crop_to_square(frame):
i_size, j_size = frame.shape[0], frame.shape[1]
if j_size > i_size:
# landscape
offset = (j_size - i_size) / 2
return frame[:, offset:offset + i_size, :]
else:
# portrait
offset = (i_size - j_size) / 2
return frame[offset:offset + j_size, :, :]
def cv2_imshow_rgb(window_name, img):
# Convert native OpenCV BGR -> RGB before displaying
cv2.imshow(window_name, cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
def caffe_load_image(filename, color=True, as_uint=False):
'''
Copied from Caffe to simplify potential import problems.
Load an image converting from grayscale or alpha as needed.
Take
filename: string
color: flag for color format. True (default) loads as RGB while False
loads as intensity (if image is already grayscale).
Give
image: an image with type float32 in range [0, 1]
of size (H x W x 3) in RGB or
of size (H x W x 1) in grayscale.
'''
with WithTimer('imread', quiet=True):
if as_uint:
img = skimage.io.imread(filename)
else:
img = skimage.img_as_float(skimage.io.imread(filename)).astype(float32)
if img.ndim == 2:
img = img[:, :, newaxis]
if color:
img = tile(img, (1, 1, 3))
elif img.shape[2] == 4:
img = img[:, :, :3]
return img
def get_tiles_height_width(n_tiles, desired_width=None):
'''Get a height x width size that will fit n_tiles tiles.'''
if desired_width == None:
# square
width = int(ceil(sqrt(n_tiles)))
height = width
else:
assert isinstance(desired_width, int)
width = desired_width
height = int(ceil(float(n_tiles) / width))
return height, width
def get_tiles_height_width_ratio(n_tiles, width_ratio=1.0):
'''Get a height x width size that will fit n_tiles tiles.'''
width = int(ceil(sqrt(n_tiles * width_ratio)))
return get_tiles_height_width(n_tiles, desired_width=width)
def tile_images_normalize(data, c01=False, boost_indiv=0.0, boost_gamma=1.0, single_tile=False, scale_range=1.0,
neg_pos_colors=None):
data = data.copy()
if single_tile:
# promote 2D image -> 3D batch (01 -> b01) or 3D image -> 4D batch (01c -> b01c OR c01 -> bc01)
data = data[newaxis]
if c01:
# Convert bc01 -> b01c
assert len(data.shape) == 4, 'expected bc01 data'
data = data.transpose(0, 2, 3, 1)
if neg_pos_colors:
neg_clr, pos_clr = neg_pos_colors
neg_clr = array(neg_clr).reshape((1, 3))
pos_clr = array(pos_clr).reshape((1, 3))
# Keep 0 at 0
data /= max(data.max(), -data.min()) + 1e-10 # Map data to [-1, 1]
# data += .5 * scale_range # now in [0, scale_range]
# assert data.min() >= 0
# assert data.max() <= scale_range
if len(data.shape) == 3:
data = data.reshape(data.shape + (1,))
assert data.shape[3] == 1, 'neg_pos_color only makes sense if color data is not provided (channels should be 1)'
data = dot((data > 0) * data, pos_clr) + dot((data < 0) * -data, neg_clr)
data -= data.min()
data *= scale_range / (data.max() + 1e-10)
# sqrt-scale (0->0, .1->.3, 1->1)
assert boost_indiv >= 0 and boost_indiv <= 1, 'boost_indiv out of range'
# print 'using boost_indiv:', boost_indiv
if boost_indiv > 0:
if len(data.shape) == 4:
mm = (data.max(-1).max(-1).max(-1) + 1e-10) ** -boost_indiv
else:
mm = (data.max(-1).max(-1) + 1e-10) ** -boost_indiv
data = (data.T * mm).T
if boost_gamma != 1.0:
data = data ** boost_gamma
# Promote single-channel data to 3 channel color
if len(data.shape) == 3:
# b01 -> b01c
data = tile(data[:, :, :, newaxis], 3)
return data
def tile_images_make_tiles(data, padsize=1, padval=0, hw=None, highlights=None):
if hw:
height, width = hw
else:
height, width = get_tiles_height_width(data.shape[0])
assert height * width >= data.shape[0], '{} rows x {} columns cannot fit {} tiles'.format(height, width,
data.shape[0])
# First iteration: one-way padding, no highlights
# padding = ((0, width*height - data.shape[0]), (0, padsize), (0, padsize)) + ((0, 0),) * (data.ndim - 3)
# data = pad(data, padding, mode='constant', constant_values=(padval, padval))
# Second iteration: padding with highlights
# padding = ((0, width*height - data.shape[0]), (padsize, padsize), (padsize, padsize)) + ((0, 0),) * (data.ndim - 3)
# print 'tile_images: data min,max =', data.min(), data.max()
# padder = SmartPadder()
##data = pad(data, padding, mode=jy_pad_fn)
# data = pad(data, padding, mode=padder.pad_function)
# print 'padder.calls =', padder.calls
# Third iteration: two-way padding with highlights
if highlights is not None:
assert len(highlights) == data.shape[0]
padding = ((0, width * height - data.shape[0]), (padsize, padsize), (padsize, padsize)) + ((0, 0),) * (
data.ndim - 3)
# First pad with constant vals
try:
len(padval)
except:
padval = tuple((padval,))
assert len(padval) in (1, 3), 'padval should be grayscale (len 1) or color (len 3)'
if len(padval) == 1:
data = pad(data, padding, mode='constant', constant_values=(padval, padval))
else:
data = pad(data, padding, mode='constant', constant_values=(0, 0))
for cc in (0, 1, 2):
# Replace 0s with proper color in each channel
data[:padding[0][0], :, :, cc] = padval[cc]
if padding[0][1] > 0:
data[-padding[0][1]:, :, :, cc] = padval[cc]
data[:, :padding[1][0], :, cc] = padval[cc]
if padding[1][1] > 0:
data[:, -padding[1][1]:, :, cc] = padval[cc]
data[:, :, :padding[2][0], cc] = padval[cc]
if padding[2][1] > 0:
data[:, :, -padding[2][1]:, cc] = padval[cc]
if highlights is not None:
# Then highlight if necessary
for ii, highlight in enumerate(highlights):
if highlight is not None:
data[ii, :padding[1][0], :, :] = highlight
if padding[1][1] > 0:
data[ii, -padding[1][1]:, :, :] = highlight
data[ii, :, :padding[2][0], :] = highlight
if padding[2][1] > 0:
data[ii, :, -padding[2][1]:, :] = highlight
# tile the filters into an image
data = data.reshape((height, width) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
data = data.reshape((height * data.shape[1], width * data.shape[3]) + data.shape[4:])
data = data[0:-padsize, 0:-padsize] # remove excess padding
return (height, width), data
def to_255(vals_01):
'''Convert vals in [0,1] to [0,255]'''
try:
ret = [v * 255 for v in vals_01]
if type(vals_01) is tuple:
return tuple(ret)
else:
return ret
except TypeError:
# Not iterable (single int or float)
return vals_01 * 255
def ensure_uint255_and_resize_to_fit(img, out_max_shape,
shrink_interpolation=cv2.INTER_LINEAR,
grow_interpolation=cv2.INTER_NEAREST):
as_uint255 = ensure_uint255(img)
return resize_to_fit(as_uint255, out_max_shape,
dtype_out='uint8',
shrink_interpolation=shrink_interpolation,
grow_interpolation=grow_interpolation)
def ensure_uint255(arr):
'''If data is float, multiply by 255 and convert to uint8. Else leave as uint8.'''
if arr.dtype == 'uint8':
return arr
elif arr.dtype in ('float32', 'float64'):
# print 'extra check...'
# assert arr.max() <= 1.1
return array(arr * 255, dtype='uint8')
else:
raise Exception('ensure_uint255 expects uint8 or float input but got %s with range [%g,%g,].' % (
arr.dtype, arr.min(), arr.max()))
def ensure_float01(arr, dtype_preference='float32'):
'''If data is uint, convert to float and divide by 255. Else leave at float.'''
if arr.dtype == 'uint8':
# print 'extra check...'
# assert arr.max() <= 256
return array(arr, dtype=dtype_preference) / 255
elif arr.dtype in ('float32', 'float64'):
return arr
else:
raise Exception('ensure_float01 expects uint8 or float input but got %s with range [%g,%g,].' % (
arr.dtype, arr.min(), arr.max()))
def resize_to_fit(img, out_max_shape,
dtype_out=None,
shrink_interpolation=cv2.INTER_LINEAR,
grow_interpolation=cv2.INTER_NEAREST):
'''Resizes to fit within out_max_shape. If ratio is different,
returns an image that fits but is smaller along one of the two
dimensions.
If one of the out_max_shape dimensions is None, then use only the other dimension to perform resizing.
Timing info on MBP Retina with OpenBlas:
- conclusion: uint8 is always tied or faster. float64 is slower.
Scaling down:
In [79]: timeit.Timer('resize_to_fit(aa, (200,200))', setup='from kerasvis.app import resize_to_fit; import numpy as np; aa = array(np.random.uniform(0,255,(1000,1000,3)), dtype="uint8")').timeit(100)
Out[79]: 0.04950380325317383
In [77]: timeit.Timer('resize_to_fit(aa, (200,200))', setup='from kerasvis.app import resize_to_fit; import numpy as np; aa = array(np.random.uniform(0,255,(1000,1000,3)), dtype="float32")').timeit(100)
Out[77]: 0.049156904220581055
In [76]: timeit.Timer('resize_to_fit(aa, (200,200))', setup='from kerasvis.app import resize_to_fit; import numpy as np; aa = array(np.random.uniform(0,255,(1000,1000,3)), dtype="float64")').timeit(100)
Out[76]: 0.11808204650878906
Scaling up:
In [68]: timeit.Timer('resize_to_fit(aa, (2000,2000))', setup='from kerasvis.app import resize_to_fit; import numpy as np; aa = array(np.random.uniform(0,255,(1000,1000,3)), dtype="uint8")').timeit(100)
Out[68]: 0.4357950687408447
In [70]: timeit.Timer('resize_to_fit(aa, (2000,2000))', setup='from kerasvis.app import resize_to_fit; import numpy as np; aa = array(np.random.uniform(0,255,(1000,1000,3)), dtype="float32")').timeit(100)
Out[70]: 1.3411099910736084
In [73]: timeit.Timer('resize_to_fit(aa, (2000,2000))', setup='from kerasvis.app import resize_to_fit; import numpy as np; aa = array(np.random.uniform(0,255,(1000,1000,3)), dtype="float64")').timeit(100)
Out[73]: 2.6078310012817383
'''
if dtype_out is not None and img.dtype != dtype_out:
dtype_in_size = img.dtype.itemsize
dtype_out_size = dtype(dtype_out).itemsize
convert_early = (dtype_out_size < dtype_in_size)
convert_late = not convert_early
else:
convert_early = False
convert_late = False
if img.shape[0] == 0 and img.shape[1] == 0:
scale = 1
elif out_max_shape[0] is None or img.shape[0] == 0:
scale = float(out_max_shape[1]) / img.shape[1]
elif out_max_shape[1] is None or img.shape[1] == 0:
scale = float(out_max_shape[0]) / img.shape[0]
else:
scale = min(float(out_max_shape[0]) / img.shape[0],
float(out_max_shape[1]) / img.shape[1])
if convert_early:
img = array(img, dtype=dtype_out)
out = cv2.resize(img,
(int(img.shape[1] * scale), int(img.shape[0] * scale)), # in (c,r) order
interpolation=grow_interpolation if scale > 1 else shrink_interpolation)
if convert_late:
out = array(out, dtype=dtype_out)
return out
class FormattedString(object):
def __init__(self, string, defaults, face=None, fsize=None, clr=None, thick=None, align=None, width=None):
self.string = string
self.face = face if face else defaults['face']
self.fsize = fsize if fsize else defaults['fsize']
self.clr = clr if clr else defaults['clr']
self.thick = thick if thick else defaults['thick']
self.width = width # if None: calculate width automatically
self.align = align if align else defaults.get('align', 'left')
def cv2_typeset_text(data, lines, loc, between=' ', string_spacing=0, line_spacing=0, wrap=False):
'''Typesets mutliple strings on multiple lines of text, where each string may have its own formatting.
Given:
data: as in cv2.putText
loc: as in cv2.putText
lines: list of lists of FormattedString objects, may be modified by this function!
between: what to insert between each string on each line, ala str.join
string_spacing: extra spacing to insert between strings on a line
line_spacing: extra spacing to insert between lines
wrap: if true, wraps words to next line
Returns:
locy: new y location = loc[1] + y-offset resulting from lines of text
'''
data_width = data.shape[1]
# lines_modified = False
# lines = lines_in # will be deepcopied if modification is needed later
if isinstance(lines, FormattedString):
lines = [lines]
assert isinstance(lines,
list), 'lines must be a list of lines or list of FormattedString objects or a single FormattedString object'
if len(lines) == 0:
return loc[1]
if not isinstance(lines[0], list):
# If a single line of text is given as a list of strings, convert to multiline format
lines = [lines]
locy = loc[1]
line_num = 0
while line_num < len(lines):
line = lines[line_num]
maxy = 0
locx = loc[0]
for ii, fs in enumerate(line):
last_on_line = (ii == len(line) - 1)
if not last_on_line:
fs.string += between
boxsize, _ = cv2.getTextSize(fs.string, fs.face, fs.fsize, fs.thick)
if fs.width is not None:
if fs.align == 'right':
locx += fs.width - boxsize[0]
elif fs.align == 'center':
locx += (fs.width - boxsize[0]) / 2
# print 'right boundary is', locx + boxsize[0], '(%s)' % fs.string
# print 'HERE'
right_edge = locx + boxsize[0]
if wrap and ii > 0 and right_edge > data_width:
# Wrap rest of line to the next line
# if not lines_modified:
# lines = deepcopy(lines_in)
# lines_modified = True
new_this_line = line[:ii]
new_next_line = line[ii:]
lines[line_num] = new_this_line
lines.insert(line_num + 1, new_next_line)
break
###line_num += 1
###continue
cv2.putText(data, fs.string, (locx, locy), fs.face, fs.fsize, fs.clr, fs.thick)
maxy = max(maxy, boxsize[1])
if fs.width is not None:
if fs.align == 'right':
locx += boxsize[0]
elif fs.align == 'left':
locx += fs.width
elif fs.align == 'center':
locx += fs.width - (fs.width - boxsize[0]) / 2
else:
locx += boxsize[0]
locx += string_spacing
line_num += 1
locy += maxy + line_spacing
return locy
def saveimage(filename, im):
'''Saves an image with pixel values in [0,1]'''
# matplotlib.image.imsave(filename, im)
if len(im.shape) == 3:
# Reverse RGB to OpenCV BGR order for color images
cv2.imwrite(filename, 255 * im[:, :, ::-1])
else:
cv2.imwrite(filename, 255 * im)
def saveimagesc(filename, im):
saveimage(filename, norm01(im))
def saveimagescc(filename, im, center):
saveimage(filename, norm01c(im, center))
| en | 0.546897 | #! /usr/bin/env python Maps the input range to [0,1] such that the center value maps to .5 Maps the input range to [0,255] as dtype uint8 # Upconvert single channel grayscale to color # Chop off transparency # Convert native OpenCV BGR -> RGB # # ax.set_adjustable('box-forced') # ax.set_xlim(left=0, right=zoom_level) # ax.get_xaxis().set_visible(i == data.shape[1] - 1) # ax.xaxis.set_ticks(arange(s, e + 1, (e - s) / 10.0)) # ax.xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1f')) # draw the canvas, cache the renderer # for i in xrange(y.shape[0]): # sns.heatmap(y[i], ax=ax, vmin=minn, vmax=maxx) # canvas.draw() # draw the canvas, cache the renderer # # l, b, w, h = fig.bbox.bounds # w, h = int(w), int(h) # im = fromstring(canvas.tostring_rgb(), dtype='uint8') # im.shape = h, w, 3 # res.append(im) # get rid of spines and fix range of axes, rotate x-axis labels # c = roundup((y_axis_values.max() - y_axis_values.min()) / 11) # b = arange(y_axis_values.min(), y_axis_values.max(), c) # for tick in ax.get_xticklabels(): # tick.set_rotation(90) # colorbar on the right of ax. Colorbar width in % of ax and space between them is defined by pad in inches # remove colorbar frame/spines # don't stop after each subfigure change # draw the canvas, cache the renderer # keep bg in memory # start = time.time() # restore background # fill in the axes rectangle # loop through array # for i in xrange(data.shape[0]): # time.sleep(0.005) # img.set_array(data[i]) # canvas.draw() # draw the canvas, cache the renderer # if log_scale == 1: # y = log(y) # elif log_scale == 2: # x = log(x) # elif log_scale == 3: # x = log(x) # y = log(y) # draw the canvas, cache the renderer # keep bg in memory # line.set_color() # restore background # redraw just the points # fill in the axes rectangle # ax.set_aspect('equal') # ax.hold(True) # plt.show(False) # plt.draw() # background = fig.canvas.copy_from_bbox(ax.bbox) # points = ax.plot(x[0], linewidth=1)[0] # plt.autoscale(enable=True, axis='y', tight=True) # plt.tight_layout() # Turn off axes and set axes limits # ax.axis('off') # draw the canvas, cache the renderer # ax.cla() # draw the canvas, cache the renderer Reads an image from file. Always returns (x,y,3) # Upconvert single channel grayscale to color # Chop off transparency # Convert native OpenCV BGR -> RGB # landscape # portrait # Convert native OpenCV BGR -> RGB before displaying Copied from Caffe to simplify potential import problems. Load an image converting from grayscale or alpha as needed. Take filename: string color: flag for color format. True (default) loads as RGB while False loads as intensity (if image is already grayscale). Give image: an image with type float32 in range [0, 1] of size (H x W x 3) in RGB or of size (H x W x 1) in grayscale. Get a height x width size that will fit n_tiles tiles. # square Get a height x width size that will fit n_tiles tiles. # promote 2D image -> 3D batch (01 -> b01) or 3D image -> 4D batch (01c -> b01c OR c01 -> bc01) # Convert bc01 -> b01c # Keep 0 at 0 # Map data to [-1, 1] # data += .5 * scale_range # now in [0, scale_range] # assert data.min() >= 0 # assert data.max() <= scale_range # sqrt-scale (0->0, .1->.3, 1->1) # print 'using boost_indiv:', boost_indiv # Promote single-channel data to 3 channel color # b01 -> b01c # First iteration: one-way padding, no highlights # padding = ((0, width*height - data.shape[0]), (0, padsize), (0, padsize)) + ((0, 0),) * (data.ndim - 3) # data = pad(data, padding, mode='constant', constant_values=(padval, padval)) # Second iteration: padding with highlights # padding = ((0, width*height - data.shape[0]), (padsize, padsize), (padsize, padsize)) + ((0, 0),) * (data.ndim - 3) # print 'tile_images: data min,max =', data.min(), data.max() # padder = SmartPadder() ##data = pad(data, padding, mode=jy_pad_fn) # data = pad(data, padding, mode=padder.pad_function) # print 'padder.calls =', padder.calls # Third iteration: two-way padding with highlights # First pad with constant vals # Replace 0s with proper color in each channel # Then highlight if necessary # tile the filters into an image # remove excess padding Convert vals in [0,1] to [0,255] # Not iterable (single int or float) If data is float, multiply by 255 and convert to uint8. Else leave as uint8. # print 'extra check...' # assert arr.max() <= 1.1 If data is uint, convert to float and divide by 255. Else leave at float. # print 'extra check...' # assert arr.max() <= 256 Resizes to fit within out_max_shape. If ratio is different, returns an image that fits but is smaller along one of the two dimensions. If one of the out_max_shape dimensions is None, then use only the other dimension to perform resizing. Timing info on MBP Retina with OpenBlas: - conclusion: uint8 is always tied or faster. float64 is slower. Scaling down: In [79]: timeit.Timer('resize_to_fit(aa, (200,200))', setup='from kerasvis.app import resize_to_fit; import numpy as np; aa = array(np.random.uniform(0,255,(1000,1000,3)), dtype="uint8")').timeit(100) Out[79]: 0.04950380325317383 In [77]: timeit.Timer('resize_to_fit(aa, (200,200))', setup='from kerasvis.app import resize_to_fit; import numpy as np; aa = array(np.random.uniform(0,255,(1000,1000,3)), dtype="float32")').timeit(100) Out[77]: 0.049156904220581055 In [76]: timeit.Timer('resize_to_fit(aa, (200,200))', setup='from kerasvis.app import resize_to_fit; import numpy as np; aa = array(np.random.uniform(0,255,(1000,1000,3)), dtype="float64")').timeit(100) Out[76]: 0.11808204650878906 Scaling up: In [68]: timeit.Timer('resize_to_fit(aa, (2000,2000))', setup='from kerasvis.app import resize_to_fit; import numpy as np; aa = array(np.random.uniform(0,255,(1000,1000,3)), dtype="uint8")').timeit(100) Out[68]: 0.4357950687408447 In [70]: timeit.Timer('resize_to_fit(aa, (2000,2000))', setup='from kerasvis.app import resize_to_fit; import numpy as np; aa = array(np.random.uniform(0,255,(1000,1000,3)), dtype="float32")').timeit(100) Out[70]: 1.3411099910736084 In [73]: timeit.Timer('resize_to_fit(aa, (2000,2000))', setup='from kerasvis.app import resize_to_fit; import numpy as np; aa = array(np.random.uniform(0,255,(1000,1000,3)), dtype="float64")').timeit(100) Out[73]: 2.6078310012817383 # in (c,r) order # if None: calculate width automatically Typesets mutliple strings on multiple lines of text, where each string may have its own formatting. Given: data: as in cv2.putText loc: as in cv2.putText lines: list of lists of FormattedString objects, may be modified by this function! between: what to insert between each string on each line, ala str.join string_spacing: extra spacing to insert between strings on a line line_spacing: extra spacing to insert between lines wrap: if true, wraps words to next line Returns: locy: new y location = loc[1] + y-offset resulting from lines of text # lines_modified = False # lines = lines_in # will be deepcopied if modification is needed later # If a single line of text is given as a list of strings, convert to multiline format # print 'right boundary is', locx + boxsize[0], '(%s)' % fs.string # print 'HERE' # Wrap rest of line to the next line # if not lines_modified: # lines = deepcopy(lines_in) # lines_modified = True ###line_num += 1 ###continue Saves an image with pixel values in [0,1] # matplotlib.image.imsave(filename, im) # Reverse RGB to OpenCV BGR order for color images | 2.528671 | 3 |
text.py | Kedyn/PingPong | 0 | 9533 | import pygame.font
import copy
class Text:
"""Draws a text to the screen."""
def __init__(self, rect, size, color, screen, text):
self.screen = screen
self.rect = copy.deepcopy(rect)
self.text = text
self.color = color
self.font = pygame.font.SysFont(None, size)
self.text_image = None
self.text_image_rect = None
self.prep_img()
def prep_img(self):
"""Turn msg into a rendered image, and center text on the button."""
self.text_image = self.font.render(self.text, True,
self.color)
self.text_image_rect = self.text_image.get_rect()
self.text_image_rect.center = self.rect.center
def render(self):
self.screen.blit(self.text_image, self.text_image_rect)
| import pygame.font
import copy
class Text:
"""Draws a text to the screen."""
def __init__(self, rect, size, color, screen, text):
self.screen = screen
self.rect = copy.deepcopy(rect)
self.text = text
self.color = color
self.font = pygame.font.SysFont(None, size)
self.text_image = None
self.text_image_rect = None
self.prep_img()
def prep_img(self):
"""Turn msg into a rendered image, and center text on the button."""
self.text_image = self.font.render(self.text, True,
self.color)
self.text_image_rect = self.text_image.get_rect()
self.text_image_rect.center = self.rect.center
def render(self):
self.screen.blit(self.text_image, self.text_image_rect)
| en | 0.619045 | Draws a text to the screen. Turn msg into a rendered image, and center text on the button. | 3.295439 | 3 |
py/test/selenium/webdriver/common/window_tests.py | ey-advisory-technology-testing/selenium | 1 | 9534 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pytest
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.support.wait import WebDriverWait
# @pytest.mark.xfail_ie
# @pytest.mark.xfail_chromiumedge(reason="Fails on Travis")
# @pytest.mark.xfail_firefox(reason="Fails on Travis")
# @pytest.mark.xfail_remote(reason="Fails on Travis")
# def testShouldMaximizeTheWindow(driver):
# resize_timeout = 5
# wait = WebDriverWait(driver, resize_timeout)
# old_size = driver.get_window_size()
# driver.set_window_size(200, 200)
# wait.until(
# lambda dr: dr.get_window_size() != old_size if old_size["width"] != 200 and old_size["height"] != 200 else True)
# size = driver.get_window_size()
# driver.maximize_window()
# wait.until(lambda dr: dr.get_window_size() != size)
# new_size = driver.get_window_size()
# assert new_size["width"] > size["width"]
# assert new_size["height"] > size["height"]
def test_should_get_the_size_of_the_current_window(driver):
size = driver.get_window_size()
assert size.get('width') > 0
assert size.get('height') > 0
def test_should_set_the_size_of_the_current_window(driver):
size = driver.get_window_size()
target_width = size.get('width') - 20
target_height = size.get('height') - 20
driver.set_window_size(width=target_width, height=target_height)
new_size = driver.get_window_size()
assert new_size.get('width') == target_width
assert new_size.get('height') == target_height
def test_should_get_the_position_of_the_current_window(driver):
position = driver.get_window_position()
assert position.get('x') >= 0
assert position.get('y') >= 0
def test_should_set_the_position_of_the_current_window(driver):
position = driver.get_window_position()
target_x = position.get('x') + 10
target_y = position.get('y') + 10
driver.set_window_position(x=target_x, y=target_y)
WebDriverWait(driver, 2)\
.until(lambda d: d.get_window_position()['x'] != position['x'] and d.get_window_position()['y'] != position['y'])
new_position = driver.get_window_position()
assert new_position.get('x') == target_x
assert new_position.get('y') == target_y
@pytest.mark.xfail_safari(raises=WebDriverException,
reason='Get Window Rect command not implemented')
def test_should_get_the_rect_of_the_current_window(driver):
rect = driver.get_window_rect()
assert rect.get('x') >= 0
assert rect.get('y') >= 0
assert rect.get('width') >= 0
assert rect.get('height') >= 0
@pytest.mark.xfail_safari(raises=WebDriverException,
reason='Get Window Rect command not implemented')
def test_should_set_the_rect_of_the_current_window(driver):
rect = driver.get_window_rect()
target_x = rect.get('x') + 10
target_y = rect.get('y') + 10
target_width = rect.get('width') + 10
target_height = rect.get('height') + 10
driver.set_window_rect(x=target_x, y=target_y, width=target_width, height=target_height)
WebDriverWait(driver, 2)\
.until(lambda d: d.get_window_position()['x'] != rect['x'] and d.get_window_position()['y'] != rect['y'])
new_rect = driver.get_window_rect()
assert new_rect.get('x') == target_x
assert new_rect.get('y') == target_y
assert new_rect.get('width') == target_width
assert new_rect.get('height') == target_height
# @pytest.mark.xfail_safari(raises=WebDriverException,
# reason='Fullscreen command not implemented')
# @pytest.mark.skipif(os.environ.get('TRAVIS') == 'true',
# reason='Fullscreen command causes Travis to hang')
# @pytest.mark.no_driver_after_test
# def test_should_fullscreen_the_current_window(driver):
# start_width = driver.execute_script('return window.innerWidth;')
# start_height = driver.execute_script('return window.innerHeight;')
# driver.fullscreen_window()
# WebDriverWait(driver, 2)\
# .until(lambda d: driver.execute_script('return window.innerWidth;') > start_width)
# end_width = driver.execute_script('return window.innerWidth;')
# end_height = driver.execute_script('return window.innerHeight;')
# driver.quit() # Kill driver so we aren't running fullscreen after
# assert end_width > start_width
# assert end_height > start_height
# @pytest.mark.xfail_safari(raises=WebDriverException,
# reason='Minimize command not implemented')
# @pytest.mark.skipif(os.environ.get('TRAVIS') == 'true',
# reason='Minimize command causes Travis to hang')
# @pytest.mark.no_driver_after_test
# def test_should_minimize_the_current_window(driver):
# driver.minimize_window()
# minimized = driver.execute_script('return document.hidden;')
# driver.quit() # Kill driver so we aren't running minimized after
# assert minimized is True
| # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pytest
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.support.wait import WebDriverWait
# @pytest.mark.xfail_ie
# @pytest.mark.xfail_chromiumedge(reason="Fails on Travis")
# @pytest.mark.xfail_firefox(reason="Fails on Travis")
# @pytest.mark.xfail_remote(reason="Fails on Travis")
# def testShouldMaximizeTheWindow(driver):
# resize_timeout = 5
# wait = WebDriverWait(driver, resize_timeout)
# old_size = driver.get_window_size()
# driver.set_window_size(200, 200)
# wait.until(
# lambda dr: dr.get_window_size() != old_size if old_size["width"] != 200 and old_size["height"] != 200 else True)
# size = driver.get_window_size()
# driver.maximize_window()
# wait.until(lambda dr: dr.get_window_size() != size)
# new_size = driver.get_window_size()
# assert new_size["width"] > size["width"]
# assert new_size["height"] > size["height"]
def test_should_get_the_size_of_the_current_window(driver):
size = driver.get_window_size()
assert size.get('width') > 0
assert size.get('height') > 0
def test_should_set_the_size_of_the_current_window(driver):
size = driver.get_window_size()
target_width = size.get('width') - 20
target_height = size.get('height') - 20
driver.set_window_size(width=target_width, height=target_height)
new_size = driver.get_window_size()
assert new_size.get('width') == target_width
assert new_size.get('height') == target_height
def test_should_get_the_position_of_the_current_window(driver):
position = driver.get_window_position()
assert position.get('x') >= 0
assert position.get('y') >= 0
def test_should_set_the_position_of_the_current_window(driver):
position = driver.get_window_position()
target_x = position.get('x') + 10
target_y = position.get('y') + 10
driver.set_window_position(x=target_x, y=target_y)
WebDriverWait(driver, 2)\
.until(lambda d: d.get_window_position()['x'] != position['x'] and d.get_window_position()['y'] != position['y'])
new_position = driver.get_window_position()
assert new_position.get('x') == target_x
assert new_position.get('y') == target_y
@pytest.mark.xfail_safari(raises=WebDriverException,
reason='Get Window Rect command not implemented')
def test_should_get_the_rect_of_the_current_window(driver):
rect = driver.get_window_rect()
assert rect.get('x') >= 0
assert rect.get('y') >= 0
assert rect.get('width') >= 0
assert rect.get('height') >= 0
@pytest.mark.xfail_safari(raises=WebDriverException,
reason='Get Window Rect command not implemented')
def test_should_set_the_rect_of_the_current_window(driver):
rect = driver.get_window_rect()
target_x = rect.get('x') + 10
target_y = rect.get('y') + 10
target_width = rect.get('width') + 10
target_height = rect.get('height') + 10
driver.set_window_rect(x=target_x, y=target_y, width=target_width, height=target_height)
WebDriverWait(driver, 2)\
.until(lambda d: d.get_window_position()['x'] != rect['x'] and d.get_window_position()['y'] != rect['y'])
new_rect = driver.get_window_rect()
assert new_rect.get('x') == target_x
assert new_rect.get('y') == target_y
assert new_rect.get('width') == target_width
assert new_rect.get('height') == target_height
# @pytest.mark.xfail_safari(raises=WebDriverException,
# reason='Fullscreen command not implemented')
# @pytest.mark.skipif(os.environ.get('TRAVIS') == 'true',
# reason='Fullscreen command causes Travis to hang')
# @pytest.mark.no_driver_after_test
# def test_should_fullscreen_the_current_window(driver):
# start_width = driver.execute_script('return window.innerWidth;')
# start_height = driver.execute_script('return window.innerHeight;')
# driver.fullscreen_window()
# WebDriverWait(driver, 2)\
# .until(lambda d: driver.execute_script('return window.innerWidth;') > start_width)
# end_width = driver.execute_script('return window.innerWidth;')
# end_height = driver.execute_script('return window.innerHeight;')
# driver.quit() # Kill driver so we aren't running fullscreen after
# assert end_width > start_width
# assert end_height > start_height
# @pytest.mark.xfail_safari(raises=WebDriverException,
# reason='Minimize command not implemented')
# @pytest.mark.skipif(os.environ.get('TRAVIS') == 'true',
# reason='Minimize command causes Travis to hang')
# @pytest.mark.no_driver_after_test
# def test_should_minimize_the_current_window(driver):
# driver.minimize_window()
# minimized = driver.execute_script('return document.hidden;')
# driver.quit() # Kill driver so we aren't running minimized after
# assert minimized is True
| en | 0.608578 | # Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # @pytest.mark.xfail_ie # @pytest.mark.xfail_chromiumedge(reason="Fails on Travis") # @pytest.mark.xfail_firefox(reason="Fails on Travis") # @pytest.mark.xfail_remote(reason="Fails on Travis") # def testShouldMaximizeTheWindow(driver): # resize_timeout = 5 # wait = WebDriverWait(driver, resize_timeout) # old_size = driver.get_window_size() # driver.set_window_size(200, 200) # wait.until( # lambda dr: dr.get_window_size() != old_size if old_size["width"] != 200 and old_size["height"] != 200 else True) # size = driver.get_window_size() # driver.maximize_window() # wait.until(lambda dr: dr.get_window_size() != size) # new_size = driver.get_window_size() # assert new_size["width"] > size["width"] # assert new_size["height"] > size["height"] # @pytest.mark.xfail_safari(raises=WebDriverException, # reason='Fullscreen command not implemented') # @pytest.mark.skipif(os.environ.get('TRAVIS') == 'true', # reason='Fullscreen command causes Travis to hang') # @pytest.mark.no_driver_after_test # def test_should_fullscreen_the_current_window(driver): # start_width = driver.execute_script('return window.innerWidth;') # start_height = driver.execute_script('return window.innerHeight;') # driver.fullscreen_window() # WebDriverWait(driver, 2)\ # .until(lambda d: driver.execute_script('return window.innerWidth;') > start_width) # end_width = driver.execute_script('return window.innerWidth;') # end_height = driver.execute_script('return window.innerHeight;') # driver.quit() # Kill driver so we aren't running fullscreen after # assert end_width > start_width # assert end_height > start_height # @pytest.mark.xfail_safari(raises=WebDriverException, # reason='Minimize command not implemented') # @pytest.mark.skipif(os.environ.get('TRAVIS') == 'true', # reason='Minimize command causes Travis to hang') # @pytest.mark.no_driver_after_test # def test_should_minimize_the_current_window(driver): # driver.minimize_window() # minimized = driver.execute_script('return document.hidden;') # driver.quit() # Kill driver so we aren't running minimized after # assert minimized is True | 2.056332 | 2 |
psydac/cad/geometry.py | mayuri-dhote/psydac | 5 | 9535 | <gh_stars>1-10
# coding: utf-8
#
# a Geometry class contains the list of patches and additional information about
# the topology i.e. connectivity, boundaries
# For the moment, it is used as a container, that can be loaded from a file
# (hdf5)
from itertools import product
from collections import abc
import numpy as np
import string
import random
import h5py
import yaml
import os
import string
import random
from mpi4py import MPI
from psydac.fem.splines import SplineSpace
from psydac.fem.tensor import TensorFemSpace
from psydac.mapping.discrete import SplineMapping, NurbsMapping
from sympde.topology import Domain, Line, Square, Cube, NCubeInterior
from sympde.topology.basic import Union
#==============================================================================
class Geometry( object ):
_ldim = None
_pdim = None
_patches = []
_topology = None
#--------------------------------------------------------------------------
# Option [1]: from a (domain, mappings) or a file
#--------------------------------------------------------------------------
def __init__( self, domain=None, mappings=None,
filename=None, comm=MPI.COMM_WORLD ):
# ... read the geometry if the filename is given
if not( filename is None ):
self.read(filename, comm=comm)
elif not( domain is None ):
assert( isinstance( domain, Domain ) )
assert( not( mappings is None ))
assert isinstance( mappings, dict)
# ... check sanity
interior_names = sorted(domain.interior_names)
mappings_keys = sorted(list(mappings.keys()))
assert( interior_names == mappings_keys )
# ...
self._domain = domain
self._ldim = domain.dim
self._pdim = domain.dim # TODO must be given => only dim is defined for a Domain
self._mappings = mappings
else:
raise ValueError('Wrong input')
# ...
self._comm = comm
#--------------------------------------------------------------------------
# Option [2]: from a discrete mapping
#--------------------------------------------------------------------------
@classmethod
def from_discrete_mapping( cls, mapping, comm=None ):
"""Create a geometry from one discrete mapping."""
if mapping.ldim in [1]:
raise NotImplementedError('')
if mapping.ldim == 2:
domain = Square(name='Omega')
mappings = {'Omega': mapping}
return Geometry(domain=domain, mappings=mappings, comm=comm)
elif mapping.ldim == 3:
domain = Cube(name='Omega')
mappings = {'Omega': mapping}
return Geometry(domain=domain, mappings=mappings, comm=comm)
#--------------------------------------------------------------------------
# Option [3]: discrete topological line/square/cube
#--------------------------------------------------------------------------
@classmethod
def from_topological_domain(cls, domain, ncells, comm=None):
interior = domain.interior
if not isinstance(interior, Union):
interior = [interior]
for itr in interior:
if not isinstance(itr, NCubeInterior):
msg = "Topological domain must be an NCube;"\
" got {} instead.".format(type(itr))
raise TypeError(msg)
mappings = {itr.name: None for itr in interior}
geo = Geometry(domain=domain, mappings=mappings, comm=comm)
geo.ncells = ncells
return geo
#--------------------------------------------------------------------------
@property
def ldim(self):
return self._ldim
@property
def pdim(self):
return self._pdim
@property
def comm(self):
return self._comm
@property
def domain(self):
return self._domain
@property
def mappings(self):
return self._mappings
def __len__(self):
return len(self.domain)
def read( self, filename, comm=MPI.COMM_WORLD ):
# ... check extension of the file
basename, ext = os.path.splitext(filename)
if not(ext == '.h5'):
raise ValueError('> Only h5 files are supported')
# ...
# read the topological domain
domain = Domain.from_file(filename)
if not(comm is None):
kwargs = dict( driver='mpio', comm=comm ) if comm.size > 1 else {}
else:
kwargs = {}
h5 = h5py.File( filename, mode='r', **kwargs )
yml = yaml.load( h5['geometry.yml'][()], Loader=yaml.SafeLoader )
ldim = yml['ldim']
pdim = yml['pdim']
n_patches = len( yml['patches'] )
# ...
if n_patches == 0:
h5.close()
raise ValueError( "Input file contains no patches." )
# ...
# ... read patchs
mappings = {}
for i_patch in range( n_patches ):
item = yml['patches'][i_patch]
patch_name = item['name']
mapping_id = item['mapping_id']
dtype = item['type']
patch = h5[mapping_id]
if dtype in ['SplineMapping', 'NurbsMapping']:
degree = [int (p) for p in patch.attrs['degree' ]]
periodic = [bool(b) for b in patch.attrs['periodic']]
knots = [patch['knots_{}'.format(d)][:] for d in range( ldim )]
spaces = [SplineSpace( degree=p, knots=k, periodic=b )
for p,k,b in zip( degree, knots, periodic )]
tensor_space = TensorFemSpace( *spaces, comm=comm )
if dtype == 'SplineMapping':
mapping = SplineMapping.from_control_points( tensor_space,
patch['points'][..., :pdim] )
elif dtype == 'NurbsMapping':
mapping = NurbsMapping.from_control_points_weights( tensor_space,
patch['points'][..., :pdim],
patch['weights'] )
mapping.set_name( item['name'] )
mappings[patch_name] = mapping
# ...
# ... close the h5 file
h5.close()
# ...
# ...
self._ldim = ldim
self._pdim = pdim
self._mappings = mappings
self._domain = domain
# ...
def export( self, filename ):
"""
Parameters
----------
filename : str
Name of HDF5 output file.
"""
# ...
comm = self.comm
# ...
# Create dictionary with geometry metadata
yml = {}
yml['ldim'] = self.ldim
yml['pdim'] = self.pdim
# ... information about the patches
if not( self.mappings ):
raise ValueError('No mappings were found')
patches_info = []
i_mapping = 0
for patch_name, mapping in self.mappings.items():
name = '{}'.format( patch_name )
mapping_id = 'mapping_{}'.format( i_mapping )
dtype = '{}'.format( type( mapping ).__name__ )
patches_info += [{'name': name,
'mapping_id': mapping_id,
'type': dtype}]
i_mapping += 1
yml['patches'] = patches_info
# ...
# ... topology
topo_yml = self.domain.todict()
# ...
# Create HDF5 file (in parallel mode if MPI communicator size > 1)
if not(comm is None) and comm.size > 1:
kwargs = dict( driver='mpio', comm=comm )
else:
kwargs = {}
h5 = h5py.File( filename, mode='w', **kwargs )
# ...
# Dump geometry metadata to string in YAML file format
geo = yaml.dump( data = yml, sort_keys=False)
# Write geometry metadata as fixed-length array of ASCII characters
h5['geometry.yml'] = np.array( geo, dtype='S' )
# ...
# ...
# Dump geometry metadata to string in YAML file format
geo = yaml.dump( data = topo_yml, sort_keys=False)
# Write topology metadata as fixed-length array of ASCII characters
h5['topology.yml'] = np.array( geo, dtype='S' )
# ...
i_mapping = 0
for patch_name, mapping in self.mappings.items():
space = mapping.space
# Create group for patch 0
group = h5.create_group( yml['patches'][i_mapping]['mapping_id'] )
group.attrs['shape' ] = space.vector_space.npts
group.attrs['degree' ] = space.degree
group.attrs['rational' ] = False # TODO remove
group.attrs['periodic' ] = space.periodic
for d in range( self.ldim ):
group['knots_{}'.format( d )] = space.spaces[d].knots
# Collective: create dataset for control points
shape = [n for n in space.vector_space.npts] + [self.pdim]
dtype = space.vector_space.dtype
dset = group.create_dataset( 'points', shape=shape, dtype=dtype )
# Independent: write control points to dataset
starts = space.vector_space.starts
ends = space.vector_space.ends
index = [slice(s, e+1) for s, e in zip(starts, ends)] + [slice(None)]
index = tuple( index )
dset[index] = mapping.control_points[index]
# case of NURBS
if isinstance(mapping, NurbsMapping):
# Collective: create dataset for weights
shape = [n for n in space.vector_space.npts]
dtype = space.vector_space.dtype
dset = group.create_dataset( 'weights', shape=shape, dtype=dtype )
# Independent: write weights to dataset
starts = space.vector_space.starts
ends = space.vector_space.ends
index = [slice(s, e+1) for s, e in zip(starts, ends)]
index = tuple( index )
dset[index] = mapping.weights[index]
i_mapping += 1
# Close HDF5 file
h5.close()
#==============================================================================
def export_nurbs_to_hdf5(filename, nurbs, periodic=None, comm=None ):
"""
Export a single-patch igakit NURBS object to a Psydac geometry file in HDF5 format
Parameters
----------
filename : <str>
Name of output geometry file, e.g. 'geo.h5'
nurbs : <igakit.nurbs.NURBS>
igakit geometry nurbs object
comm : <MPI.COMM>
mpi communicator
"""
import os.path
import igakit
assert isinstance(nurbs, igakit.nurbs.NURBS)
extension = os.path.splitext(filename)[-1]
if not extension == '.h5':
raise ValueError('> Only h5 extension is allowed for filename')
yml = {}
yml['ldim'] = nurbs.dim
yml['pdim'] = nurbs.dim
patches_info = []
i_mapping = 0
i = 0
rational = not abs(nurbs.weights-1).sum()<1e-15
patch_name = 'patch_{}'.format(i)
name = '{}'.format( patch_name )
mapping_id = 'mapping_{}'.format( i_mapping )
dtype = 'NurbsMapping' if rational else 'SplineMapping'
patches_info += [{'name': name , 'mapping_id':mapping_id, 'type':dtype}]
yml['patches'] = patches_info
# ...
# Create HDF5 file (in parallel mode if MPI communicator size > 1)
if not(comm is None) and comm.size > 1:
kwargs = dict( driver='mpio', comm=comm )
else:
kwargs = {}
h5 = h5py.File( filename, mode='w', **kwargs )
# ...
# Dump geometry metadata to string in YAML file format
geom = yaml.dump( data = yml, sort_keys=False)
# Write geometry metadata as fixed-length array of ASCII characters
h5['geometry.yml'] = np.array( geom, dtype='S' )
# ...
# ... topology
if nurbs.dim == 1:
bounds1 = (float(nurbs.breaks(0)[0]), float(nurbs.breaks(0)[-1]))
domain = Line(patch_name, bounds1=bounds1)
elif nurbs.dim == 2:
bounds1 = (float(nurbs.breaks(0)[0]), float(nurbs.breaks(0)[-1]))
bounds2 = (float(nurbs.breaks(1)[0]), float(nurbs.breaks(1)[-1]))
domain = Square(patch_name, bounds1=bounds1, bounds2=bounds2)
elif nurbs.dim == 3:
bounds1 = (float(nurbs.breaks(0)[0]), float(nurbs.breaks(0)[-1]))
bounds2 = (float(nurbs.breaks(1)[0]), float(nurbs.breaks(1)[-1]))
bounds3 = (float(nurbs.breaks(2)[0]), float(nurbs.breaks(2)[-1]))
domain = Cube(patch_name, bounds1=bounds1, bounds2=bounds2, bounds3=bounds3)
topo_yml = domain.todict()
# Dump geometry metadata to string in YAML file format
geom = yaml.dump( data = topo_yml, sort_keys=False)
# Write topology metadata as fixed-length array of ASCII characters
h5['topology.yml'] = np.array( geom, dtype='S' )
group = h5.create_group( yml['patches'][i]['mapping_id'] )
group.attrs['degree' ] = nurbs.degree
group.attrs['rational' ] = rational
group.attrs['periodic' ] = tuple( False for d in range( nurbs.dim ) ) if periodic is None else periodic
for d in range( nurbs.dim ):
group['knots_{}'.format( d )] = nurbs.knots[d]
group['points'] = nurbs.points[...,:nurbs.dim]
if rational:
group['weights'] = nurbs.weights
h5.close()
#==============================================================================
def refine_nurbs(nrb, ncells=None, degree=None, multiplicity=None, tol=1e-9):
"""
This function refines the nurbs object.
It contructs a new grid based on the new number of cells, and it adds the new break points to the nrb grid,
such that the total number of cells is equal to the new number of cells.
We use knot insertion to construct the new knot sequence , so the geometry is identical to the previous one.
It also elevates the degree of the nrb object based on the new degree.
Parameters
----------
nrb : <igakit.nurbs.NURBS>
geometry nurbs object
ncells : <list>
total number of cells in each direction
degree : <list>
degree in each direction
multiplicity : <list>
multiplicity of each knot in the knot sequence in each direction
tol : <float>
Minimum distance between two break points.
Returns
-------
nrb : <igakit.nurbs.NURBS>
the refined geometry nurbs object
"""
if multiplicity is None:
multiplicity = [1]*nrb.dim
nrb = nrb.clone()
if ncells is not None:
for axis in range(0,nrb.dim):
ub = nrb.breaks(axis)[0]
ue = nrb.breaks(axis)[-1]
knots = np.linspace(ub,ue,ncells[axis]+1)
index = nrb.knots[axis].searchsorted(knots)
nrb_knots = nrb.knots[axis][index]
for m,(nrb_k, k) in enumerate(zip(nrb_knots, knots)):
if abs(k-nrb_k)<tol:
knots[m] = np.nan
knots = knots[~np.isnan(knots)]
indices = np.round(np.linspace(0, len(knots) - 1, ncells[axis]+1-len(nrb.breaks(axis)))).astype(int)
knots = knots[indices]
if len(knots)>0:
nrb.refine(axis, knots)
if degree is not None:
for axis in range(0,nrb.dim):
d = degree[axis] - nrb.degree[axis]
if d<0:
raise ValueError('The degree {} must be >= {}'.format(degree, nrb.degree))
nrb.elevate(axis, times=d)
for axis in range(nrb.dim):
decimals = abs(np.floor(np.log10(np.abs(tol))).astype(int))
knots, counts = np.unique(nrb.knots[axis].round(decimals=decimals), return_counts=True)
counts = multiplicity[axis] - counts
counts[counts<0] = 0
knots = np.repeat(knots, counts)
nrb = nrb.refine(axis, knots)
return nrb
def refine_knots(knots, ncells, degree, multiplicity=None, tol=1e-9):
"""
This function refines the knot sequence.
It contructs a new grid based on the new number of cells, and it adds the new break points to the nrb grid,
such that the total number of cells is equal to the new number of cells.
We use knot insertion to construct the new knot sequence , so the geometry is identical to the previous one.
It also elevates the degree of the nrb object based on the new degree.
Parameters
----------
knots : <list>
list of knot sequences in each direction
ncells : <list>
total number of cells in each direction
degree : <list>
degree in each direction
multiplicity : <list>
multiplicity of each knot in the knot sequence in each direction
tol : <float>
Minimum distance between two break points.
Returns
-------
knots : <list>
the refined knot sequences in each direction
"""
from igakit.nurbs import NURBS
dim = len(ncells)
if multiplicity is None:
multiplicity = [1]*dim
assert len(knots) == dim
nrb = NURBS(knots)
for axis in range(dim):
ub = nrb.breaks(axis)[0]
ue = nrb.breaks(axis)[-1]
knots = np.linspace(ub,ue,ncells[axis]+1)
index = nrb.knots[axis].searchsorted(knots)
nrb_knots = nrb.knots[axis][index]
for m,(nrb_k, k) in enumerate(zip(nrb_knots, knots)):
if abs(k-nrb_k)<tol:
knots[m] = np.nan
knots = knots[~np.isnan(knots)]
indices = np.round(np.linspace(0, len(knots) - 1, ncells[axis]+1-len(nrb.breaks(axis)))).astype(int)
knots = knots[indices]
if len(knots)>0:
nrb.refine(axis, knots)
for axis in range(dim):
d = degree[axis] - nrb.degree[axis]
if d<0:
raise ValueError('The degree {} must be >= {}'.format(degree, nrb.degree))
nrb.elevate(axis, times=d)
for axis in range(dim):
decimals = abs(np.floor(np.log10(np.abs(tol))).astype(int))
knots, counts = np.unique(nrb.knots[axis].round(decimals=decimals), return_counts=True)
counts = multiplicity[axis] - counts
counts[counts<0] = 0
knots = np.repeat(knots, counts)
nrb = nrb.refine(axis, knots)
return nrb.knots
#==============================================================================
def import_geopdes_to_nurbs(filename):
"""
This function reads a geopdes geometry file and convert it to igakit nurbs object
Parameters
----------
filename : <str>
the filename of the geometry file
Returns
-------
nrb : <igakit.nurbs.NURBS>
the geometry nurbs object
"""
extension = os.path.splitext(filename)[-1]
if not extension == '.txt':
raise ValueError('> Expected .txt extension')
f = open(filename)
lines = f.readlines()
f.close()
lines = [line for line in lines if line[0].strip() != "#"]
data = _read_header(lines[0])
n_dim = data[0]
r_dim = data[1]
n_patchs = data[2]
n_lines_per_patch = 3*n_dim + 1
list_begin_line = _get_begin_line(lines, n_patchs)
nrb = _read_patch(lines, 1, n_lines_per_patch, list_begin_line)
return nrb
def _read_header(line):
chars = line.split(" ")
data = []
for c in chars:
try:
data.append(int(c))
except:
pass
return data
def _extract_patch_line(lines, i_patch):
text = "PATCH " + str(i_patch)
for i_line,line in enumerate(lines):
r = line.find(text)
if r != -1:
return i_line
return None
def _get_begin_line(lines, n_patchs):
list_begin_line = []
for i_patch in range(0, n_patchs):
r = _extract_patch_line(lines, i_patch+1)
if r is not None:
list_begin_line.append(r)
else:
raise ValueError(" could not parse the input file")
return list_begin_line
def _read_line(line):
chars = line.split(" ")
data = []
for c in chars:
try:
data.append(int(c))
except:
try:
data.append(float(c))
except:
pass
return data
def _read_patch(lines, i_patch, n_lines_per_patch, list_begin_line):
from igakit.nurbs import NURBS
i_begin_line = list_begin_line[i_patch-1]
data_patch = []
for i in range(i_begin_line+1, i_begin_line + n_lines_per_patch+1):
data_patch.append(_read_line(lines[i]))
degree = data_patch[0]
shape = data_patch[1]
xl = [np.array(i) for i in data_patch[2:2+len(degree)] ]
xp = [np.array(i) for i in data_patch[2+len(degree):2+2*len(degree)] ]
w = np.array(data_patch[2+2*len(degree)])
X = [i.reshape(shape, order='F') for i in xp]
W = w.reshape(shape, order='F')
points = np.zeros((*shape, 3))
for i in range(len(shape)):
points[..., i] = X[i]
knots = xl
nrb = NURBS(knots, control=points, weights=W)
return nrb
| # coding: utf-8
#
# a Geometry class contains the list of patches and additional information about
# the topology i.e. connectivity, boundaries
# For the moment, it is used as a container, that can be loaded from a file
# (hdf5)
from itertools import product
from collections import abc
import numpy as np
import string
import random
import h5py
import yaml
import os
import string
import random
from mpi4py import MPI
from psydac.fem.splines import SplineSpace
from psydac.fem.tensor import TensorFemSpace
from psydac.mapping.discrete import SplineMapping, NurbsMapping
from sympde.topology import Domain, Line, Square, Cube, NCubeInterior
from sympde.topology.basic import Union
#==============================================================================
class Geometry( object ):
_ldim = None
_pdim = None
_patches = []
_topology = None
#--------------------------------------------------------------------------
# Option [1]: from a (domain, mappings) or a file
#--------------------------------------------------------------------------
def __init__( self, domain=None, mappings=None,
filename=None, comm=MPI.COMM_WORLD ):
# ... read the geometry if the filename is given
if not( filename is None ):
self.read(filename, comm=comm)
elif not( domain is None ):
assert( isinstance( domain, Domain ) )
assert( not( mappings is None ))
assert isinstance( mappings, dict)
# ... check sanity
interior_names = sorted(domain.interior_names)
mappings_keys = sorted(list(mappings.keys()))
assert( interior_names == mappings_keys )
# ...
self._domain = domain
self._ldim = domain.dim
self._pdim = domain.dim # TODO must be given => only dim is defined for a Domain
self._mappings = mappings
else:
raise ValueError('Wrong input')
# ...
self._comm = comm
#--------------------------------------------------------------------------
# Option [2]: from a discrete mapping
#--------------------------------------------------------------------------
@classmethod
def from_discrete_mapping( cls, mapping, comm=None ):
"""Create a geometry from one discrete mapping."""
if mapping.ldim in [1]:
raise NotImplementedError('')
if mapping.ldim == 2:
domain = Square(name='Omega')
mappings = {'Omega': mapping}
return Geometry(domain=domain, mappings=mappings, comm=comm)
elif mapping.ldim == 3:
domain = Cube(name='Omega')
mappings = {'Omega': mapping}
return Geometry(domain=domain, mappings=mappings, comm=comm)
#--------------------------------------------------------------------------
# Option [3]: discrete topological line/square/cube
#--------------------------------------------------------------------------
@classmethod
def from_topological_domain(cls, domain, ncells, comm=None):
interior = domain.interior
if not isinstance(interior, Union):
interior = [interior]
for itr in interior:
if not isinstance(itr, NCubeInterior):
msg = "Topological domain must be an NCube;"\
" got {} instead.".format(type(itr))
raise TypeError(msg)
mappings = {itr.name: None for itr in interior}
geo = Geometry(domain=domain, mappings=mappings, comm=comm)
geo.ncells = ncells
return geo
#--------------------------------------------------------------------------
@property
def ldim(self):
return self._ldim
@property
def pdim(self):
return self._pdim
@property
def comm(self):
return self._comm
@property
def domain(self):
return self._domain
@property
def mappings(self):
return self._mappings
def __len__(self):
return len(self.domain)
def read( self, filename, comm=MPI.COMM_WORLD ):
# ... check extension of the file
basename, ext = os.path.splitext(filename)
if not(ext == '.h5'):
raise ValueError('> Only h5 files are supported')
# ...
# read the topological domain
domain = Domain.from_file(filename)
if not(comm is None):
kwargs = dict( driver='mpio', comm=comm ) if comm.size > 1 else {}
else:
kwargs = {}
h5 = h5py.File( filename, mode='r', **kwargs )
yml = yaml.load( h5['geometry.yml'][()], Loader=yaml.SafeLoader )
ldim = yml['ldim']
pdim = yml['pdim']
n_patches = len( yml['patches'] )
# ...
if n_patches == 0:
h5.close()
raise ValueError( "Input file contains no patches." )
# ...
# ... read patchs
mappings = {}
for i_patch in range( n_patches ):
item = yml['patches'][i_patch]
patch_name = item['name']
mapping_id = item['mapping_id']
dtype = item['type']
patch = h5[mapping_id]
if dtype in ['SplineMapping', 'NurbsMapping']:
degree = [int (p) for p in patch.attrs['degree' ]]
periodic = [bool(b) for b in patch.attrs['periodic']]
knots = [patch['knots_{}'.format(d)][:] for d in range( ldim )]
spaces = [SplineSpace( degree=p, knots=k, periodic=b )
for p,k,b in zip( degree, knots, periodic )]
tensor_space = TensorFemSpace( *spaces, comm=comm )
if dtype == 'SplineMapping':
mapping = SplineMapping.from_control_points( tensor_space,
patch['points'][..., :pdim] )
elif dtype == 'NurbsMapping':
mapping = NurbsMapping.from_control_points_weights( tensor_space,
patch['points'][..., :pdim],
patch['weights'] )
mapping.set_name( item['name'] )
mappings[patch_name] = mapping
# ...
# ... close the h5 file
h5.close()
# ...
# ...
self._ldim = ldim
self._pdim = pdim
self._mappings = mappings
self._domain = domain
# ...
def export( self, filename ):
"""
Parameters
----------
filename : str
Name of HDF5 output file.
"""
# ...
comm = self.comm
# ...
# Create dictionary with geometry metadata
yml = {}
yml['ldim'] = self.ldim
yml['pdim'] = self.pdim
# ... information about the patches
if not( self.mappings ):
raise ValueError('No mappings were found')
patches_info = []
i_mapping = 0
for patch_name, mapping in self.mappings.items():
name = '{}'.format( patch_name )
mapping_id = 'mapping_{}'.format( i_mapping )
dtype = '{}'.format( type( mapping ).__name__ )
patches_info += [{'name': name,
'mapping_id': mapping_id,
'type': dtype}]
i_mapping += 1
yml['patches'] = patches_info
# ...
# ... topology
topo_yml = self.domain.todict()
# ...
# Create HDF5 file (in parallel mode if MPI communicator size > 1)
if not(comm is None) and comm.size > 1:
kwargs = dict( driver='mpio', comm=comm )
else:
kwargs = {}
h5 = h5py.File( filename, mode='w', **kwargs )
# ...
# Dump geometry metadata to string in YAML file format
geo = yaml.dump( data = yml, sort_keys=False)
# Write geometry metadata as fixed-length array of ASCII characters
h5['geometry.yml'] = np.array( geo, dtype='S' )
# ...
# ...
# Dump geometry metadata to string in YAML file format
geo = yaml.dump( data = topo_yml, sort_keys=False)
# Write topology metadata as fixed-length array of ASCII characters
h5['topology.yml'] = np.array( geo, dtype='S' )
# ...
i_mapping = 0
for patch_name, mapping in self.mappings.items():
space = mapping.space
# Create group for patch 0
group = h5.create_group( yml['patches'][i_mapping]['mapping_id'] )
group.attrs['shape' ] = space.vector_space.npts
group.attrs['degree' ] = space.degree
group.attrs['rational' ] = False # TODO remove
group.attrs['periodic' ] = space.periodic
for d in range( self.ldim ):
group['knots_{}'.format( d )] = space.spaces[d].knots
# Collective: create dataset for control points
shape = [n for n in space.vector_space.npts] + [self.pdim]
dtype = space.vector_space.dtype
dset = group.create_dataset( 'points', shape=shape, dtype=dtype )
# Independent: write control points to dataset
starts = space.vector_space.starts
ends = space.vector_space.ends
index = [slice(s, e+1) for s, e in zip(starts, ends)] + [slice(None)]
index = tuple( index )
dset[index] = mapping.control_points[index]
# case of NURBS
if isinstance(mapping, NurbsMapping):
# Collective: create dataset for weights
shape = [n for n in space.vector_space.npts]
dtype = space.vector_space.dtype
dset = group.create_dataset( 'weights', shape=shape, dtype=dtype )
# Independent: write weights to dataset
starts = space.vector_space.starts
ends = space.vector_space.ends
index = [slice(s, e+1) for s, e in zip(starts, ends)]
index = tuple( index )
dset[index] = mapping.weights[index]
i_mapping += 1
# Close HDF5 file
h5.close()
#==============================================================================
def export_nurbs_to_hdf5(filename, nurbs, periodic=None, comm=None ):
"""
Export a single-patch igakit NURBS object to a Psydac geometry file in HDF5 format
Parameters
----------
filename : <str>
Name of output geometry file, e.g. 'geo.h5'
nurbs : <igakit.nurbs.NURBS>
igakit geometry nurbs object
comm : <MPI.COMM>
mpi communicator
"""
import os.path
import igakit
assert isinstance(nurbs, igakit.nurbs.NURBS)
extension = os.path.splitext(filename)[-1]
if not extension == '.h5':
raise ValueError('> Only h5 extension is allowed for filename')
yml = {}
yml['ldim'] = nurbs.dim
yml['pdim'] = nurbs.dim
patches_info = []
i_mapping = 0
i = 0
rational = not abs(nurbs.weights-1).sum()<1e-15
patch_name = 'patch_{}'.format(i)
name = '{}'.format( patch_name )
mapping_id = 'mapping_{}'.format( i_mapping )
dtype = 'NurbsMapping' if rational else 'SplineMapping'
patches_info += [{'name': name , 'mapping_id':mapping_id, 'type':dtype}]
yml['patches'] = patches_info
# ...
# Create HDF5 file (in parallel mode if MPI communicator size > 1)
if not(comm is None) and comm.size > 1:
kwargs = dict( driver='mpio', comm=comm )
else:
kwargs = {}
h5 = h5py.File( filename, mode='w', **kwargs )
# ...
# Dump geometry metadata to string in YAML file format
geom = yaml.dump( data = yml, sort_keys=False)
# Write geometry metadata as fixed-length array of ASCII characters
h5['geometry.yml'] = np.array( geom, dtype='S' )
# ...
# ... topology
if nurbs.dim == 1:
bounds1 = (float(nurbs.breaks(0)[0]), float(nurbs.breaks(0)[-1]))
domain = Line(patch_name, bounds1=bounds1)
elif nurbs.dim == 2:
bounds1 = (float(nurbs.breaks(0)[0]), float(nurbs.breaks(0)[-1]))
bounds2 = (float(nurbs.breaks(1)[0]), float(nurbs.breaks(1)[-1]))
domain = Square(patch_name, bounds1=bounds1, bounds2=bounds2)
elif nurbs.dim == 3:
bounds1 = (float(nurbs.breaks(0)[0]), float(nurbs.breaks(0)[-1]))
bounds2 = (float(nurbs.breaks(1)[0]), float(nurbs.breaks(1)[-1]))
bounds3 = (float(nurbs.breaks(2)[0]), float(nurbs.breaks(2)[-1]))
domain = Cube(patch_name, bounds1=bounds1, bounds2=bounds2, bounds3=bounds3)
topo_yml = domain.todict()
# Dump geometry metadata to string in YAML file format
geom = yaml.dump( data = topo_yml, sort_keys=False)
# Write topology metadata as fixed-length array of ASCII characters
h5['topology.yml'] = np.array( geom, dtype='S' )
group = h5.create_group( yml['patches'][i]['mapping_id'] )
group.attrs['degree' ] = nurbs.degree
group.attrs['rational' ] = rational
group.attrs['periodic' ] = tuple( False for d in range( nurbs.dim ) ) if periodic is None else periodic
for d in range( nurbs.dim ):
group['knots_{}'.format( d )] = nurbs.knots[d]
group['points'] = nurbs.points[...,:nurbs.dim]
if rational:
group['weights'] = nurbs.weights
h5.close()
#==============================================================================
def refine_nurbs(nrb, ncells=None, degree=None, multiplicity=None, tol=1e-9):
"""
This function refines the nurbs object.
It contructs a new grid based on the new number of cells, and it adds the new break points to the nrb grid,
such that the total number of cells is equal to the new number of cells.
We use knot insertion to construct the new knot sequence , so the geometry is identical to the previous one.
It also elevates the degree of the nrb object based on the new degree.
Parameters
----------
nrb : <igakit.nurbs.NURBS>
geometry nurbs object
ncells : <list>
total number of cells in each direction
degree : <list>
degree in each direction
multiplicity : <list>
multiplicity of each knot in the knot sequence in each direction
tol : <float>
Minimum distance between two break points.
Returns
-------
nrb : <igakit.nurbs.NURBS>
the refined geometry nurbs object
"""
if multiplicity is None:
multiplicity = [1]*nrb.dim
nrb = nrb.clone()
if ncells is not None:
for axis in range(0,nrb.dim):
ub = nrb.breaks(axis)[0]
ue = nrb.breaks(axis)[-1]
knots = np.linspace(ub,ue,ncells[axis]+1)
index = nrb.knots[axis].searchsorted(knots)
nrb_knots = nrb.knots[axis][index]
for m,(nrb_k, k) in enumerate(zip(nrb_knots, knots)):
if abs(k-nrb_k)<tol:
knots[m] = np.nan
knots = knots[~np.isnan(knots)]
indices = np.round(np.linspace(0, len(knots) - 1, ncells[axis]+1-len(nrb.breaks(axis)))).astype(int)
knots = knots[indices]
if len(knots)>0:
nrb.refine(axis, knots)
if degree is not None:
for axis in range(0,nrb.dim):
d = degree[axis] - nrb.degree[axis]
if d<0:
raise ValueError('The degree {} must be >= {}'.format(degree, nrb.degree))
nrb.elevate(axis, times=d)
for axis in range(nrb.dim):
decimals = abs(np.floor(np.log10(np.abs(tol))).astype(int))
knots, counts = np.unique(nrb.knots[axis].round(decimals=decimals), return_counts=True)
counts = multiplicity[axis] - counts
counts[counts<0] = 0
knots = np.repeat(knots, counts)
nrb = nrb.refine(axis, knots)
return nrb
def refine_knots(knots, ncells, degree, multiplicity=None, tol=1e-9):
"""
This function refines the knot sequence.
It contructs a new grid based on the new number of cells, and it adds the new break points to the nrb grid,
such that the total number of cells is equal to the new number of cells.
We use knot insertion to construct the new knot sequence , so the geometry is identical to the previous one.
It also elevates the degree of the nrb object based on the new degree.
Parameters
----------
knots : <list>
list of knot sequences in each direction
ncells : <list>
total number of cells in each direction
degree : <list>
degree in each direction
multiplicity : <list>
multiplicity of each knot in the knot sequence in each direction
tol : <float>
Minimum distance between two break points.
Returns
-------
knots : <list>
the refined knot sequences in each direction
"""
from igakit.nurbs import NURBS
dim = len(ncells)
if multiplicity is None:
multiplicity = [1]*dim
assert len(knots) == dim
nrb = NURBS(knots)
for axis in range(dim):
ub = nrb.breaks(axis)[0]
ue = nrb.breaks(axis)[-1]
knots = np.linspace(ub,ue,ncells[axis]+1)
index = nrb.knots[axis].searchsorted(knots)
nrb_knots = nrb.knots[axis][index]
for m,(nrb_k, k) in enumerate(zip(nrb_knots, knots)):
if abs(k-nrb_k)<tol:
knots[m] = np.nan
knots = knots[~np.isnan(knots)]
indices = np.round(np.linspace(0, len(knots) - 1, ncells[axis]+1-len(nrb.breaks(axis)))).astype(int)
knots = knots[indices]
if len(knots)>0:
nrb.refine(axis, knots)
for axis in range(dim):
d = degree[axis] - nrb.degree[axis]
if d<0:
raise ValueError('The degree {} must be >= {}'.format(degree, nrb.degree))
nrb.elevate(axis, times=d)
for axis in range(dim):
decimals = abs(np.floor(np.log10(np.abs(tol))).astype(int))
knots, counts = np.unique(nrb.knots[axis].round(decimals=decimals), return_counts=True)
counts = multiplicity[axis] - counts
counts[counts<0] = 0
knots = np.repeat(knots, counts)
nrb = nrb.refine(axis, knots)
return nrb.knots
#==============================================================================
def import_geopdes_to_nurbs(filename):
"""
This function reads a geopdes geometry file and convert it to igakit nurbs object
Parameters
----------
filename : <str>
the filename of the geometry file
Returns
-------
nrb : <igakit.nurbs.NURBS>
the geometry nurbs object
"""
extension = os.path.splitext(filename)[-1]
if not extension == '.txt':
raise ValueError('> Expected .txt extension')
f = open(filename)
lines = f.readlines()
f.close()
lines = [line for line in lines if line[0].strip() != "#"]
data = _read_header(lines[0])
n_dim = data[0]
r_dim = data[1]
n_patchs = data[2]
n_lines_per_patch = 3*n_dim + 1
list_begin_line = _get_begin_line(lines, n_patchs)
nrb = _read_patch(lines, 1, n_lines_per_patch, list_begin_line)
return nrb
def _read_header(line):
chars = line.split(" ")
data = []
for c in chars:
try:
data.append(int(c))
except:
pass
return data
def _extract_patch_line(lines, i_patch):
text = "PATCH " + str(i_patch)
for i_line,line in enumerate(lines):
r = line.find(text)
if r != -1:
return i_line
return None
def _get_begin_line(lines, n_patchs):
list_begin_line = []
for i_patch in range(0, n_patchs):
r = _extract_patch_line(lines, i_patch+1)
if r is not None:
list_begin_line.append(r)
else:
raise ValueError(" could not parse the input file")
return list_begin_line
def _read_line(line):
chars = line.split(" ")
data = []
for c in chars:
try:
data.append(int(c))
except:
try:
data.append(float(c))
except:
pass
return data
def _read_patch(lines, i_patch, n_lines_per_patch, list_begin_line):
from igakit.nurbs import NURBS
i_begin_line = list_begin_line[i_patch-1]
data_patch = []
for i in range(i_begin_line+1, i_begin_line + n_lines_per_patch+1):
data_patch.append(_read_line(lines[i]))
degree = data_patch[0]
shape = data_patch[1]
xl = [np.array(i) for i in data_patch[2:2+len(degree)] ]
xp = [np.array(i) for i in data_patch[2+len(degree):2+2*len(degree)] ]
w = np.array(data_patch[2+2*len(degree)])
X = [i.reshape(shape, order='F') for i in xp]
W = w.reshape(shape, order='F')
points = np.zeros((*shape, 3))
for i in range(len(shape)):
points[..., i] = X[i]
knots = xl
nrb = NURBS(knots, control=points, weights=W)
return nrb | en | 0.660659 | # coding: utf-8 # # a Geometry class contains the list of patches and additional information about # the topology i.e. connectivity, boundaries # For the moment, it is used as a container, that can be loaded from a file # (hdf5) #============================================================================== #-------------------------------------------------------------------------- # Option [1]: from a (domain, mappings) or a file #-------------------------------------------------------------------------- # ... read the geometry if the filename is given # ... check sanity # ... # TODO must be given => only dim is defined for a Domain # ... #-------------------------------------------------------------------------- # Option [2]: from a discrete mapping #-------------------------------------------------------------------------- Create a geometry from one discrete mapping. #-------------------------------------------------------------------------- # Option [3]: discrete topological line/square/cube #-------------------------------------------------------------------------- #-------------------------------------------------------------------------- # ... check extension of the file # ... # read the topological domain # ... # ... # ... read patchs # ... # ... close the h5 file # ... # ... # ... Parameters ---------- filename : str Name of HDF5 output file. # ... # ... # Create dictionary with geometry metadata # ... information about the patches # ... # ... topology # ... # Create HDF5 file (in parallel mode if MPI communicator size > 1) # ... # Dump geometry metadata to string in YAML file format # Write geometry metadata as fixed-length array of ASCII characters # ... # ... # Dump geometry metadata to string in YAML file format # Write topology metadata as fixed-length array of ASCII characters # ... # Create group for patch 0 # TODO remove # Collective: create dataset for control points # Independent: write control points to dataset # case of NURBS # Collective: create dataset for weights # Independent: write weights to dataset # Close HDF5 file #============================================================================== Export a single-patch igakit NURBS object to a Psydac geometry file in HDF5 format Parameters ---------- filename : <str> Name of output geometry file, e.g. 'geo.h5' nurbs : <igakit.nurbs.NURBS> igakit geometry nurbs object comm : <MPI.COMM> mpi communicator # ... # Create HDF5 file (in parallel mode if MPI communicator size > 1) # ... # Dump geometry metadata to string in YAML file format # Write geometry metadata as fixed-length array of ASCII characters # ... # ... topology # Dump geometry metadata to string in YAML file format # Write topology metadata as fixed-length array of ASCII characters #============================================================================== This function refines the nurbs object. It contructs a new grid based on the new number of cells, and it adds the new break points to the nrb grid, such that the total number of cells is equal to the new number of cells. We use knot insertion to construct the new knot sequence , so the geometry is identical to the previous one. It also elevates the degree of the nrb object based on the new degree. Parameters ---------- nrb : <igakit.nurbs.NURBS> geometry nurbs object ncells : <list> total number of cells in each direction degree : <list> degree in each direction multiplicity : <list> multiplicity of each knot in the knot sequence in each direction tol : <float> Minimum distance between two break points. Returns ------- nrb : <igakit.nurbs.NURBS> the refined geometry nurbs object This function refines the knot sequence. It contructs a new grid based on the new number of cells, and it adds the new break points to the nrb grid, such that the total number of cells is equal to the new number of cells. We use knot insertion to construct the new knot sequence , so the geometry is identical to the previous one. It also elevates the degree of the nrb object based on the new degree. Parameters ---------- knots : <list> list of knot sequences in each direction ncells : <list> total number of cells in each direction degree : <list> degree in each direction multiplicity : <list> multiplicity of each knot in the knot sequence in each direction tol : <float> Minimum distance between two break points. Returns ------- knots : <list> the refined knot sequences in each direction #============================================================================== This function reads a geopdes geometry file and convert it to igakit nurbs object Parameters ---------- filename : <str> the filename of the geometry file Returns ------- nrb : <igakit.nurbs.NURBS> the geometry nurbs object | 2.331976 | 2 |
utils.py | ok1zjf/AMNet | 40 | 9536 | __author__ = '<NAME>'
__email__ = '<EMAIL>'
__version__= '2.2'
__status__ = "Research"
__date__ = "28/1/2018"
__license__= "MIT License"
import os
import numpy as np
import glob
import subprocess
import platform
import sys
import pkg_resources
import torch
import PIL as Image
try:
import cv2
except:
print("WARNING: Could not load OpenCV python package. Some functionality may not be available.")
def list_files(path, extensions=[], sort=True, max_len=-1):
if os.path.isdir(path):
filenames = [os.path.join(path, fn) for fn in os.listdir(path) if
any([fn.endswith(ext) for ext in extensions])]
else:
print("ERROR. ", path,' is not a directory!')
return []
if sort:
filenames.sort()
if max_len>-1:
filenames = filenames[:max_len]
return filenames
def get_video_list(video_path, max_len=-1):
return list_files(video_path, extensions=['avi', 'flv', 'mpg', 'mp4'], sort=True, max_len=max_len)
def get_image_list(video_path, max_len=-1):
return list_files(video_path, extensions=['jpg', 'jpeg', 'png'], sort=True, max_len=max_len)
def get_split_files(dataset_path, splits_path, split_name, absolute_path=False):
path = os.path.join(dataset_path, splits_path, split_name)
files = glob.glob(path)
files.sort()
if not absolute_path:
files_out = []
for file in files:
_,filename = os.path.split(file)
files_out.append(filename)
return files_out
return files
def get_max_rc_weights(experiment_path):
log_filename = 'train_log_0.csv'
try:
f = open(os.path.join(experiment_path, log_filename), 'rt')
max_rc = 0
max_epoch = -1
max_mse = -1
for line in f:
toks = line.split(',')
if toks[0] == 'val':
epoch = toks[1]
try:
rc = float(toks[4])
if rc > max_rc:
max_rc = rc
max_epoch = int(epoch)
max_mse = float(toks[6])
except:
pass
f.close()
chkpt_file = experiment_path + '/' + 'weights_' + str(max_epoch) + '.pkl'
if not os.path.isfile(chkpt_file):
print("WARNING: File ",chkpt_file," does not exists!")
return '', 0, 0, 0
return chkpt_file, max_rc, max_mse, max_epoch
except:
print('WARNING: Could not open ' + os.path.join(experiment_path, log_filename))
return '', 0, 0, 0
def get_split_index(split_filename):
filename, _ = os.path.splitext(split_filename)
id = int(filename.split('_')[-1])
return id
def get_weight_files(split_files, experiment_name, max_rc_checkpoints=True):
data_dir = 'data'
weight_files = []
for split_filename in split_files:
split_name,_ = os.path.splitext(split_filename)
_, split_id = split_name.split('_')
weight_files_all = os.path.join(data_dir, experiment_name+'_train_'+split_id+'/*.pkl')
files = glob.glob(weight_files_all)
if len(files) == 0:
# No trained model weights for this split
weight_files.append('')
continue
elif len(files) == 1:
weight_files.append(files[0])
else:
# Multiple weights
if max_rc_checkpoints:
weights_dir = os.path.join(data_dir, experiment_name + '_train_' + split_id)
print("Selecting model weights with the highest RC on validation set in ",weights_dir)
weight_file, max_rc, max_mse, max_epoch= get_max_rc_weights(weights_dir)
if weight_file != '':
print('Found: ',weight_file, ' RC=', max_rc, ' MSE=', max_rc, ' epoch=', max_epoch)
weight_files.append(weight_file)
continue
# Get the weights from the last training epoch
files.sort(key=lambda x: get_split_index(x), reverse=True)
weight_file=files[0]
weight_files.append(weight_file)
return weight_files
def run_command(command):
p = subprocess.Popen(command.split(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
return '\n'.join([ '\t'+line.decode("utf-8").strip() for line in p.stdout.readlines()])
def ge_pkg_versions():
dep_versions = {}
cmd = 'cat /proc/driver/nvidia/version'
display_driver = run_command(cmd)
dep_versions['display'] = display_driver
dep_versions['cuda'] = 'NA'
cuda_home = '/usr/local/cuda/'
if 'CUDA_HOME' in os.environ:
cuda_home = os.environ['CUDA_HOME']
cmd = cuda_home+'/version.txt'
if os.path.isfile(cmd):
cuda_version = run_command('cat '+cmd)
dep_versions['cuda'] = cuda_version
dep_versions['cudnn'] = torch.backends.cudnn.version()
dep_versions['platform'] = platform.platform()
dep_versions['python'] = sys.version_info[0]
dep_versions['torch'] = torch.__version__
dep_versions['numpy'] = np.__version__
dep_versions['PIL'] = Image.VERSION
dep_versions['OpenCV'] = 'NA'
if 'cv2' in sys.modules:
dep_versions['OpenCV'] = cv2.__version__
dep_versions['torchvision'] = pkg_resources.get_distribution("torchvision").version
return dep_versions
def print_pkg_versions():
print("Packages & system versions:")
print("----------------------------------------------------------------------")
versions = ge_pkg_versions()
for key, val in versions.items():
print(key,": ",val)
print("")
return
if __name__ == "__main__":
print_pkg_versions()
split_files = get_split_files('datasets/lamem', 'splits', 'test_*.txt')
print(split_files)
weight_files = get_weight_files(split_files, experiment_name='lamem_ResNet50FC_lstm3_last', max_rc_checkpoints=True)
# weight_files = get_weight_files(split_files, experiment_name='lamem_ResNet50FC_lstm3')
print(weight_files) | __author__ = '<NAME>'
__email__ = '<EMAIL>'
__version__= '2.2'
__status__ = "Research"
__date__ = "28/1/2018"
__license__= "MIT License"
import os
import numpy as np
import glob
import subprocess
import platform
import sys
import pkg_resources
import torch
import PIL as Image
try:
import cv2
except:
print("WARNING: Could not load OpenCV python package. Some functionality may not be available.")
def list_files(path, extensions=[], sort=True, max_len=-1):
if os.path.isdir(path):
filenames = [os.path.join(path, fn) for fn in os.listdir(path) if
any([fn.endswith(ext) for ext in extensions])]
else:
print("ERROR. ", path,' is not a directory!')
return []
if sort:
filenames.sort()
if max_len>-1:
filenames = filenames[:max_len]
return filenames
def get_video_list(video_path, max_len=-1):
return list_files(video_path, extensions=['avi', 'flv', 'mpg', 'mp4'], sort=True, max_len=max_len)
def get_image_list(video_path, max_len=-1):
return list_files(video_path, extensions=['jpg', 'jpeg', 'png'], sort=True, max_len=max_len)
def get_split_files(dataset_path, splits_path, split_name, absolute_path=False):
path = os.path.join(dataset_path, splits_path, split_name)
files = glob.glob(path)
files.sort()
if not absolute_path:
files_out = []
for file in files:
_,filename = os.path.split(file)
files_out.append(filename)
return files_out
return files
def get_max_rc_weights(experiment_path):
log_filename = 'train_log_0.csv'
try:
f = open(os.path.join(experiment_path, log_filename), 'rt')
max_rc = 0
max_epoch = -1
max_mse = -1
for line in f:
toks = line.split(',')
if toks[0] == 'val':
epoch = toks[1]
try:
rc = float(toks[4])
if rc > max_rc:
max_rc = rc
max_epoch = int(epoch)
max_mse = float(toks[6])
except:
pass
f.close()
chkpt_file = experiment_path + '/' + 'weights_' + str(max_epoch) + '.pkl'
if not os.path.isfile(chkpt_file):
print("WARNING: File ",chkpt_file," does not exists!")
return '', 0, 0, 0
return chkpt_file, max_rc, max_mse, max_epoch
except:
print('WARNING: Could not open ' + os.path.join(experiment_path, log_filename))
return '', 0, 0, 0
def get_split_index(split_filename):
filename, _ = os.path.splitext(split_filename)
id = int(filename.split('_')[-1])
return id
def get_weight_files(split_files, experiment_name, max_rc_checkpoints=True):
data_dir = 'data'
weight_files = []
for split_filename in split_files:
split_name,_ = os.path.splitext(split_filename)
_, split_id = split_name.split('_')
weight_files_all = os.path.join(data_dir, experiment_name+'_train_'+split_id+'/*.pkl')
files = glob.glob(weight_files_all)
if len(files) == 0:
# No trained model weights for this split
weight_files.append('')
continue
elif len(files) == 1:
weight_files.append(files[0])
else:
# Multiple weights
if max_rc_checkpoints:
weights_dir = os.path.join(data_dir, experiment_name + '_train_' + split_id)
print("Selecting model weights with the highest RC on validation set in ",weights_dir)
weight_file, max_rc, max_mse, max_epoch= get_max_rc_weights(weights_dir)
if weight_file != '':
print('Found: ',weight_file, ' RC=', max_rc, ' MSE=', max_rc, ' epoch=', max_epoch)
weight_files.append(weight_file)
continue
# Get the weights from the last training epoch
files.sort(key=lambda x: get_split_index(x), reverse=True)
weight_file=files[0]
weight_files.append(weight_file)
return weight_files
def run_command(command):
p = subprocess.Popen(command.split(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
return '\n'.join([ '\t'+line.decode("utf-8").strip() for line in p.stdout.readlines()])
def ge_pkg_versions():
dep_versions = {}
cmd = 'cat /proc/driver/nvidia/version'
display_driver = run_command(cmd)
dep_versions['display'] = display_driver
dep_versions['cuda'] = 'NA'
cuda_home = '/usr/local/cuda/'
if 'CUDA_HOME' in os.environ:
cuda_home = os.environ['CUDA_HOME']
cmd = cuda_home+'/version.txt'
if os.path.isfile(cmd):
cuda_version = run_command('cat '+cmd)
dep_versions['cuda'] = cuda_version
dep_versions['cudnn'] = torch.backends.cudnn.version()
dep_versions['platform'] = platform.platform()
dep_versions['python'] = sys.version_info[0]
dep_versions['torch'] = torch.__version__
dep_versions['numpy'] = np.__version__
dep_versions['PIL'] = Image.VERSION
dep_versions['OpenCV'] = 'NA'
if 'cv2' in sys.modules:
dep_versions['OpenCV'] = cv2.__version__
dep_versions['torchvision'] = pkg_resources.get_distribution("torchvision").version
return dep_versions
def print_pkg_versions():
print("Packages & system versions:")
print("----------------------------------------------------------------------")
versions = ge_pkg_versions()
for key, val in versions.items():
print(key,": ",val)
print("")
return
if __name__ == "__main__":
print_pkg_versions()
split_files = get_split_files('datasets/lamem', 'splits', 'test_*.txt')
print(split_files)
weight_files = get_weight_files(split_files, experiment_name='lamem_ResNet50FC_lstm3_last', max_rc_checkpoints=True)
# weight_files = get_weight_files(split_files, experiment_name='lamem_ResNet50FC_lstm3')
print(weight_files) | en | 0.813004 | # No trained model weights for this split # Multiple weights # Get the weights from the last training epoch # weight_files = get_weight_files(split_files, experiment_name='lamem_ResNet50FC_lstm3') | 2.45546 | 2 |
python/aghast/aghast_generated/Slice.py | HDembinski/aghast | 18 | 9537 | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: aghast_generated
import flatbuffers
class Slice(object):
__slots__ = ["_tab"]
# Slice
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Slice
def Start(self):
return self._tab.Get(
flatbuffers.number_types.Int64Flags,
self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(0),
)
# Slice
def Stop(self):
return self._tab.Get(
flatbuffers.number_types.Int64Flags,
self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(8),
)
# Slice
def Step(self):
return self._tab.Get(
flatbuffers.number_types.Int32Flags,
self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(16),
)
# Slice
def HasStart(self):
return self._tab.Get(
flatbuffers.number_types.BoolFlags,
self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(20),
)
# Slice
def HasStop(self):
return self._tab.Get(
flatbuffers.number_types.BoolFlags,
self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(21),
)
# Slice
def HasStep(self):
return self._tab.Get(
flatbuffers.number_types.BoolFlags,
self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(22),
)
def CreateSlice(builder, start, stop, step, hasStart, hasStop, hasStep):
builder.Prep(8, 24)
builder.Pad(1)
builder.PrependBool(hasStep)
builder.PrependBool(hasStop)
builder.PrependBool(hasStart)
builder.PrependInt32(step)
builder.PrependInt64(stop)
builder.PrependInt64(start)
return builder.Offset()
| # automatically generated by the FlatBuffers compiler, do not modify
# namespace: aghast_generated
import flatbuffers
class Slice(object):
__slots__ = ["_tab"]
# Slice
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Slice
def Start(self):
return self._tab.Get(
flatbuffers.number_types.Int64Flags,
self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(0),
)
# Slice
def Stop(self):
return self._tab.Get(
flatbuffers.number_types.Int64Flags,
self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(8),
)
# Slice
def Step(self):
return self._tab.Get(
flatbuffers.number_types.Int32Flags,
self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(16),
)
# Slice
def HasStart(self):
return self._tab.Get(
flatbuffers.number_types.BoolFlags,
self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(20),
)
# Slice
def HasStop(self):
return self._tab.Get(
flatbuffers.number_types.BoolFlags,
self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(21),
)
# Slice
def HasStep(self):
return self._tab.Get(
flatbuffers.number_types.BoolFlags,
self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(22),
)
def CreateSlice(builder, start, stop, step, hasStart, hasStop, hasStep):
builder.Prep(8, 24)
builder.Pad(1)
builder.PrependBool(hasStep)
builder.PrependBool(hasStop)
builder.PrependBool(hasStart)
builder.PrependInt32(step)
builder.PrependInt64(stop)
builder.PrependInt64(start)
return builder.Offset()
| en | 0.517139 | # automatically generated by the FlatBuffers compiler, do not modify # namespace: aghast_generated # Slice # Slice # Slice # Slice # Slice # Slice # Slice | 2.100713 | 2 |
axelrod/tests/strategies/test_mystrategy.py | AleksaLuka/Axelrod | 0 | 9538 | <reponame>AleksaLuka/Axelrod
import axelrod as axl
from .test_player import TestPlayer
C, D = axl.Action.C, axl.Action.D
class TestMyStrategy(TestPlayer):
name = "MyStrategy"
player = axl.mystrategy
expected_classifier = {
"memory_depth": 1,
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def test_strategy(self):
# First move is random.
actions = [(C, C), (C, D), (D, C)]
self.versus_test(
opponent=axl.Alternator(), expected_actions=actions, seed=1
)
actions = [(C, C), (C, D), (D, C)]
self.versus_test(
opponent=axl.Alternator(), expected_actions=actions, seed=2
)
actions = [(C, C), (C, C), (C, C)]
self.versus_test(
opponent=axl.Cooperator(), expected_actions=actions, seed=1
)
actions = [(C, D), (D, D), (D, D)]
self.versus_test(
opponent=axl.Defector(), expected_actions=actions, seed=2
)
| import axelrod as axl
from .test_player import TestPlayer
C, D = axl.Action.C, axl.Action.D
class TestMyStrategy(TestPlayer):
name = "MyStrategy"
player = axl.mystrategy
expected_classifier = {
"memory_depth": 1,
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def test_strategy(self):
# First move is random.
actions = [(C, C), (C, D), (D, C)]
self.versus_test(
opponent=axl.Alternator(), expected_actions=actions, seed=1
)
actions = [(C, C), (C, D), (D, C)]
self.versus_test(
opponent=axl.Alternator(), expected_actions=actions, seed=2
)
actions = [(C, C), (C, C), (C, C)]
self.versus_test(
opponent=axl.Cooperator(), expected_actions=actions, seed=1
)
actions = [(C, D), (D, D), (D, D)]
self.versus_test(
opponent=axl.Defector(), expected_actions=actions, seed=2
) | en | 0.872668 | # First move is random. | 2.835796 | 3 |
analyzer/BannerTool.py | Gr1ph00n/staticwebanalyzer | 0 | 9539 | <reponame>Gr1ph00n/staticwebanalyzer
#FILE NAME: BannerTool.py
#created by: <NAME>
#purpose: banner localization
#last edited by: <NAME>
#INSTALL: BeautifulSoup
#TODO: this code is a blob, must be refactorized!!!!
import re
import mechanize
import socket
import urllib
from tools import BaseTool
from bs4 import BeautifulSoup
from pprint import pprint
from ipwhois import IPWhois, WhoisLookupError
from tld import get_tld
import urlparse
from tld.exceptions import TldIOError, TldDomainNotFound, TldBadUrl
from tools import ToolException
class BannerTool(BaseTool):
def __init__(self, config):
BaseTool.__init__(self, "BannerAnalyzer", config, needRefresh = True)
self.values = []
def run(self, browser):
try:
url = browser.url.replace('http://','')
print url+"\n"
#response = browser.open(url)
html = browser.httpResponse #response.get_data()
site_domain_name = get_tld(browser.url)
#print(site_domain_name)
soup = BeautifulSoup(html)
links = soup.findAll('a')
response_domain = ""
addr = ""
name = ""
state = ""
city = ""
description = ""
country = ""
foo_flag = 0
flag = 0
for link in links:
foo = link.findChild('img')
#print foo
if foo is not None:
foo_flag = 1
flag = 1
href = link.get('href')
if href is None:
continue
print(href+"\n")
if href.startswith('/'):
response_domain ="link interno"
print ("link interno\n")
elif href.startswith('/'):
response_domain ="Link interno"
print ("link interno\n")
elif href.startswith("http://"+url):
response_domain ="link interno"
print ("link interno\n")
elif href.startswith("https://"+url):
response_domain ="link interno"
print ("link interno\n")
else:
response_domain ="link esterno"
print ("link esterno... Geolocalizzazione:\n")
try:
banner_domain_name = get_tld(href)
print(banner_domain_name+"\n")
print(site_domain_name)
url = 'https://' + url if not banner_domain_name.startswith('http') else banner_domain_name.replace('http:', 'https:')
parsed = urlparse.urlparse(url)
hostname = "%s://%s" % (parsed.scheme, parsed.netloc)
url = url.split("//")[1]
url_s = url.split("/")[0]
ip = socket.gethostbyname(url_s)
#print(href)
#get ip by url
#ip = socket.gethostbyname(banner_domain_name)
#get information by ip
result = None
try:
obj = IPWhois(ip)
result = obj.lookup()
except Error as e:
continue
addr = result['nets'][0]['address'] if result['nets'][0]['address'] != None else 'None'
name = result['nets'][0]['name'] if result['nets'][0]['name'] != None else 'None'
state = result['nets'][0]['state'] if result['nets'][0]['state'] != None else 'None'
city = result['nets'][0]['city'] if result['nets'][0]['city'] != None else 'None'
description = result['nets'][0]['description'] if result['nets'][0]['description'] != None else 'None'
country = result['nets'][0]['country'] if result['nets'][0]['country'] != None else 'None'
'''
self.values.append(["Link analyzed",href])
self.values.append(["Response",response_domain])
self.values.append(["Address", addr])
self.values.append(["Name", name])
self.values.append(["State", state])
self.values.append(["City", city])
self.values.append(["Description", description])
self.values.append(["Country", country])
print('Name: ' + name + '\n' + 'Description: ' + description + '\n' + 'Address: ' +
addr + '\n' + 'Country: ' + country + '\n' + 'State: ' + state + '\n' + 'City: ' + city)
'''
temp = {
"Url" : url,
"Address" : addr,
"Name" : name,
"State" : state,
"City" : city,
"Description" : description,
"Country" : country,
"Response" : response_domain
}
self.values.append({ "Link analyzed %s" % (href) : temp })
except TldBadUrl as e:
print ("Bad URL!")
if flag == 0:
print("There aren' t extra domain banners in this site")
if(foo_flag == 0):
print("There aren't banner in this site")
except WhoisLookupError as e:
raise ToolException(str(e))
return len(self.values) >= self.config.getInt("banner_count_treshold", 0)
def createModel(self):
return False, ["key","value"], self.values
| #FILE NAME: BannerTool.py
#created by: <NAME>
#purpose: banner localization
#last edited by: <NAME>
#INSTALL: BeautifulSoup
#TODO: this code is a blob, must be refactorized!!!!
import re
import mechanize
import socket
import urllib
from tools import BaseTool
from bs4 import BeautifulSoup
from pprint import pprint
from ipwhois import IPWhois, WhoisLookupError
from tld import get_tld
import urlparse
from tld.exceptions import TldIOError, TldDomainNotFound, TldBadUrl
from tools import ToolException
class BannerTool(BaseTool):
def __init__(self, config):
BaseTool.__init__(self, "BannerAnalyzer", config, needRefresh = True)
self.values = []
def run(self, browser):
try:
url = browser.url.replace('http://','')
print url+"\n"
#response = browser.open(url)
html = browser.httpResponse #response.get_data()
site_domain_name = get_tld(browser.url)
#print(site_domain_name)
soup = BeautifulSoup(html)
links = soup.findAll('a')
response_domain = ""
addr = ""
name = ""
state = ""
city = ""
description = ""
country = ""
foo_flag = 0
flag = 0
for link in links:
foo = link.findChild('img')
#print foo
if foo is not None:
foo_flag = 1
flag = 1
href = link.get('href')
if href is None:
continue
print(href+"\n")
if href.startswith('/'):
response_domain ="link interno"
print ("link interno\n")
elif href.startswith('/'):
response_domain ="Link interno"
print ("link interno\n")
elif href.startswith("http://"+url):
response_domain ="link interno"
print ("link interno\n")
elif href.startswith("https://"+url):
response_domain ="link interno"
print ("link interno\n")
else:
response_domain ="link esterno"
print ("link esterno... Geolocalizzazione:\n")
try:
banner_domain_name = get_tld(href)
print(banner_domain_name+"\n")
print(site_domain_name)
url = 'https://' + url if not banner_domain_name.startswith('http') else banner_domain_name.replace('http:', 'https:')
parsed = urlparse.urlparse(url)
hostname = "%s://%s" % (parsed.scheme, parsed.netloc)
url = url.split("//")[1]
url_s = url.split("/")[0]
ip = socket.gethostbyname(url_s)
#print(href)
#get ip by url
#ip = socket.gethostbyname(banner_domain_name)
#get information by ip
result = None
try:
obj = IPWhois(ip)
result = obj.lookup()
except Error as e:
continue
addr = result['nets'][0]['address'] if result['nets'][0]['address'] != None else 'None'
name = result['nets'][0]['name'] if result['nets'][0]['name'] != None else 'None'
state = result['nets'][0]['state'] if result['nets'][0]['state'] != None else 'None'
city = result['nets'][0]['city'] if result['nets'][0]['city'] != None else 'None'
description = result['nets'][0]['description'] if result['nets'][0]['description'] != None else 'None'
country = result['nets'][0]['country'] if result['nets'][0]['country'] != None else 'None'
'''
self.values.append(["Link analyzed",href])
self.values.append(["Response",response_domain])
self.values.append(["Address", addr])
self.values.append(["Name", name])
self.values.append(["State", state])
self.values.append(["City", city])
self.values.append(["Description", description])
self.values.append(["Country", country])
print('Name: ' + name + '\n' + 'Description: ' + description + '\n' + 'Address: ' +
addr + '\n' + 'Country: ' + country + '\n' + 'State: ' + state + '\n' + 'City: ' + city)
'''
temp = {
"Url" : url,
"Address" : addr,
"Name" : name,
"State" : state,
"City" : city,
"Description" : description,
"Country" : country,
"Response" : response_domain
}
self.values.append({ "Link analyzed %s" % (href) : temp })
except TldBadUrl as e:
print ("Bad URL!")
if flag == 0:
print("There aren' t extra domain banners in this site")
if(foo_flag == 0):
print("There aren't banner in this site")
except WhoisLookupError as e:
raise ToolException(str(e))
return len(self.values) >= self.config.getInt("banner_count_treshold", 0)
def createModel(self):
return False, ["key","value"], self.values | en | 0.310984 | #FILE NAME: BannerTool.py #created by: <NAME> #purpose: banner localization #last edited by: <NAME> #INSTALL: BeautifulSoup #TODO: this code is a blob, must be refactorized!!!! #response = browser.open(url) #response.get_data() #print(site_domain_name) #print foo #print(href) #get ip by url #ip = socket.gethostbyname(banner_domain_name) #get information by ip self.values.append(["Link analyzed",href]) self.values.append(["Response",response_domain]) self.values.append(["Address", addr]) self.values.append(["Name", name]) self.values.append(["State", state]) self.values.append(["City", city]) self.values.append(["Description", description]) self.values.append(["Country", country]) print('Name: ' + name + '\n' + 'Description: ' + description + '\n' + 'Address: ' + addr + '\n' + 'Country: ' + country + '\n' + 'State: ' + state + '\n' + 'City: ' + city) | 2.67969 | 3 |
rhea/build/toolflow/xilinx/__init__.py | meetps/rhea | 1 | 9540 |
from .ise import ISE
from .vivado import Vivado
|
from .ise import ISE
from .vivado import Vivado
| none | 1 | 0.998369 | 1 |
|
app/AccountManagment.py | fredpan/Prosopagnosia_Web_Server | 0 | 9541 | # Copyright 2020 EraO Prosopagnosia Helper Dev Team, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
#
# Supervised by Prof. <NAME> (http://www.eecg.toronto.edu/~mann/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import mysql.connector
import re
import time
from app.sql.config.DbConfig import db_config
from flask import render_template, redirect, url_for, request, g, session
from flask_bcrypt import Bcrypt
from app import EmailSender as email_confirmation
from app import webapp
validUsernameChar = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
# The function used to establish connection to sql database
def connect_to_database():
'''
Function used to connect to database
:return:
'''
return mysql.connector.connect(user=db_config['user'], password=db_config['password'], host=db_config['host'],
database=db_config['database'], use_pure=True)
def get_database():
'''
function used to get database
:return:
'''
db = getattr(g, '_database', None)
if db is None:
db = g._database = connect_to_database()
return db
"""
#############################################################
Login Settings
############################################################
"""
@webapp.route('/login', methods=['GET', 'POST'])
def user_login():
'''
This function takes GET/POST http request with URL of "/login"
It returns the user with an html website of the login page
:return: the rendered "login_index.html"
'''
return render_template("/login_index.html", title="Welcome")
@webapp.route('/login_submit', methods=['POST'])
def login_submit():
'''
This function takes POST http request with URL of "/login_submit". It firstly reads the user submitted username,
password and the check statue of "remember me" option based on whether the user checked "remember me" the function
adjust the session expiry time by adjusting the value of webapp.permanent_session_lifetime. The function then
connects to the database and reads the search results based on user inputs. If no search results find based on
the user provided username, the function will return the user with "login_index.html" with error message; if the
user input password doesn't match the database password after bcrypt,the function will return the user with
login_index.html" with error message; If it passed all the condition, the function will redirect to URL"/secure/index"
:return: /login_index.html or /secure/index
'''
session.permanent = True
bcrypt = Bcrypt(webapp)
username = request.form['username']
password = request.form['password']
remember = request.form.get('remember')
print(remember)
rememberMe = False
# if remember!=None and remember=="on":
if remember:
rememberMe = True
else:
session.clear()
webapp.permanent_session_lifetime = datetime.timedelta(milliseconds=0)
# password = <PASSWORD>.generate_password_hash(password).decode("utf-8")
# bcrypt.check_password_hash
# connect to database
cnx = get_database()
cursor = cnx.cursor()
query = "SELECT password FROM user_info WHERE username = %s and active = 1"
cursor.execute(query, (username,))
results = cursor.fetchall()
if len(results) == 1:
hashed_pwd = results[0][0]
if bcrypt.check_password_hash(hashed_pwd, password):
session['authenticated'] = True
session['username'] = username
session['error'] = None
if rememberMe:
webapp.permanent_session_lifetime = datetime.timedelta(weeks=1)
return redirect(url_for('sensitive'))
session['username'] = username
session['error'] = "<=Error! Incorrect username or password!=>"
return render_template("/login_index.html", title="Main Page", username=username, error=session['error'])
"""
#############################################################
Sign up Settings
############################################################
"""
# Display an empty HTML form that allows users to fill the info and sign up.
@webapp.route('/signup', methods=['GET'])
def user_signup():
'''
This function takes GET http request with URL of "/signup"
It returns the user with an html website of the signup page
:return: the rendered "signup_index.html"
'''
return render_template("signup_index.html", title="Join Us!")
# Create a new account and save them in the database.
@webapp.route('/signup/save', methods=['POST'])
def sign_up_save():
'''
This function takes POST http request with a URL of "/signup/save". It firstly reads the user submitted username,
password1 and password2. It then connects to the database to check if there is already an existing username in the
database. The function also checks whether the user provided all the necessary information; whether the format of
the username and password are correct and whether the two passwords match. If any of the above condition failed,
the function will return user with "signup_index.html" with error message. If not, the function will insert the
user provided information to the database and return "signup_succeed_index.html" page to user indicating the user
has successfully created a new account.
:return: "signup_index.html" or "signup_succeed_index.html"
'''
bcrypt = Bcrypt(webapp)
# need to trim the user name
username = request.form.get('username', "")
password1 = request.form.get('password1', "")
password2 = request.form.get('password2', "")
# connect to database
cnx = get_database()
cursor = cnx.cursor()
query = "SELECT COUNT(username) FROM user_info WHERE username = %s "
cursor.execute(query, (username,))
results = cursor.fetchall()
numberOfExistUser = results[0][0]
if username == "" or password1 == "" or password2 == "":
error_msg = "Error: All fields are required!"
return render_template("signup_index.html", title="Sign Up", error_msg=error_msg,
username=username, password1=<PASSWORD>, password2=<PASSWORD>)
if re.findall(r'\s+', username) != []:
error_msg = "Error: No space allowed in user name!"
return render_template("signup_index.html", title="Sign Up", error_msg=error_msg,
username=username, password1=<PASSWORD>, password2=<PASSWORD>)
if numberOfExistUser != 0:
error_msg = "Error: User name already exist!"
return render_template("signup_index.html", title="Sign Up", error_msg=error_msg,
username=username, password1=<PASSWORD>, password2=<PASSWORD>)
if not (password1 == <PASSWORD>):
error_msg = "Error: Two passwords not matching!"
return render_template("signup_index.html", title="Sign Up", error_msg=error_msg,
username=username, password1=<PASSWORD>, password2=<PASSWORD>)
if (len(username) > 20 or len(username) < 1) or not all(c in validUsernameChar for c in username):
print(len(username))
error_msg = "Error: Username violation, username must have length between 1 to 20, only letters and numbers allowed"
return render_template("signup_index.html", title="Sign Up", error_msg=error_msg,
username=username, password1=<PASSWORD>, password2=<PASSWORD>)
if len(password1) > 16 or len(password1) < 1:
error_msg = "Error: Password length violation"
return render_template("signup_index.html", title="Sign Up", error_msg=error_msg,
username=username, password1=<PASSWORD>, password2=<PASSWORD>)
ts = time.time()
timestamp = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
password = <PASSWORD>.generate_password_hash(<PASSWORD>).decode("utf-8")
query = ''' INSERT INTO user_info (username,password,create_date,active)
VALUES (%s,%s, %s,1)
'''
cursor.execute(query, (username, password, timestamp))
cnx.commit()
# Add error catch here for sql
return render_template("signup_succeed_index.html", title="Sign Up Succeed", username=username, password=<PASSWORD>)
"""
#############################################################
Secure Index
############################################################
"""
@webapp.route('/secure/index', methods=['GET', 'POST'])
def sensitive():
'''
This function takes GET/POST http request with URL of "/secure/index". The function firstly check if the user
session has key of “authenticated” and value of True which indicating the user has passed the security check.
If not, the user will be redirected back to ‘/user_login’. If the user session contains “authenticated” and
has a value of True, the function will perform a database search based on the “username” in the client’s
session and store the user’s uid, upload_counter and create_date into the session and return the page
of "/secured_index.html".
:return: "/secure/index" or "/secured_index.html"
'''
if 'authenticated' not in session:
return redirect(url_for('user_login'))
# ==========Read user Info and sign in =========#
if session['authenticated'] == True:
# connect to database
cnx = get_database()
cursor = cnx.cursor()
query = "SELECT uid , create_date FROM user_info WHERE username = %s and active = 1"
cursor.execute(query, (session['username'],))
results = cursor.fetchall()
uid = results[0][0]
memberSince = results[0][1]
session['uid'] = uid
session['membersince'] = memberSince
return render_template("/secured_index.html", username=session['username'], membersince=session['membersince'])
else:
return redirect(url_for('user_login'))
@webapp.route('/logout', methods=['GET', 'POST'])
def logout():
'''
This function takes GET/POST http request with URL of “/logout”. The function clear all the contents in the
current user’s session and terminate the user’s session’s lifetime. The function then redirect the user to
the main page.
:return: /secure/index
'''
session.clear()
webapp.permanent_session_lifetime = datetime.timedelta(milliseconds=0)
return redirect(url_for("sensitive"))
"""
#############################################################
Send Email
############################################################
"""
# Create a new account and save them in the database.
@webapp.route('/signup/send_email', methods=['POST'])
def send_email():
'''
This function takes POST http request with URL of “/signup/send_email”. The function read the user email,
username and password and check if the user email is in correct form with Regex, if the email address is correct,
it will call “send_email” function in “EmailSender” class which can send an email to the user with registered
username and password and redirect the user back to “signup_succeed_index.html” with success message. If the user
provided email address is not a correct form, the function will redirect back to “signup_succeed_index.html” with
error message.
:return: “signup_succeed_index.html”
'''
# need to trim the user name
email = request.form.get('email', "")
username = request.form.get('username', "")
password = request.form.get('password', "")
if not re.match(r"^[A-Za-z0-9\.\+_-]+@[A-Za-z0-9\._-]+\.[a-zA-Z]*$", email):
error_msg = "Error: Not a correct email address!"
return render_template("signup_succeed_index.html", title="Sign Up Succeed", username=username,
password=password, error_msg=error_msg)
# send email
email_confirmation.send_email(email, username, password)
success_msg = "=================Email Sent!==================="
return render_template("signup_succeed_index.html", title="Sign Up Succeed", username=username, password=password,
success_msg=success_msg)
| # Copyright 2020 EraO Prosopagnosia Helper Dev Team, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
#
# Supervised by Prof. <NAME> (http://www.eecg.toronto.edu/~mann/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import mysql.connector
import re
import time
from app.sql.config.DbConfig import db_config
from flask import render_template, redirect, url_for, request, g, session
from flask_bcrypt import Bcrypt
from app import EmailSender as email_confirmation
from app import webapp
validUsernameChar = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
# The function used to establish connection to sql database
def connect_to_database():
'''
Function used to connect to database
:return:
'''
return mysql.connector.connect(user=db_config['user'], password=db_config['password'], host=db_config['host'],
database=db_config['database'], use_pure=True)
def get_database():
'''
function used to get database
:return:
'''
db = getattr(g, '_database', None)
if db is None:
db = g._database = connect_to_database()
return db
"""
#############################################################
Login Settings
############################################################
"""
@webapp.route('/login', methods=['GET', 'POST'])
def user_login():
'''
This function takes GET/POST http request with URL of "/login"
It returns the user with an html website of the login page
:return: the rendered "login_index.html"
'''
return render_template("/login_index.html", title="Welcome")
@webapp.route('/login_submit', methods=['POST'])
def login_submit():
'''
This function takes POST http request with URL of "/login_submit". It firstly reads the user submitted username,
password and the check statue of "remember me" option based on whether the user checked "remember me" the function
adjust the session expiry time by adjusting the value of webapp.permanent_session_lifetime. The function then
connects to the database and reads the search results based on user inputs. If no search results find based on
the user provided username, the function will return the user with "login_index.html" with error message; if the
user input password doesn't match the database password after bcrypt,the function will return the user with
login_index.html" with error message; If it passed all the condition, the function will redirect to URL"/secure/index"
:return: /login_index.html or /secure/index
'''
session.permanent = True
bcrypt = Bcrypt(webapp)
username = request.form['username']
password = request.form['password']
remember = request.form.get('remember')
print(remember)
rememberMe = False
# if remember!=None and remember=="on":
if remember:
rememberMe = True
else:
session.clear()
webapp.permanent_session_lifetime = datetime.timedelta(milliseconds=0)
# password = <PASSWORD>.generate_password_hash(password).decode("utf-8")
# bcrypt.check_password_hash
# connect to database
cnx = get_database()
cursor = cnx.cursor()
query = "SELECT password FROM user_info WHERE username = %s and active = 1"
cursor.execute(query, (username,))
results = cursor.fetchall()
if len(results) == 1:
hashed_pwd = results[0][0]
if bcrypt.check_password_hash(hashed_pwd, password):
session['authenticated'] = True
session['username'] = username
session['error'] = None
if rememberMe:
webapp.permanent_session_lifetime = datetime.timedelta(weeks=1)
return redirect(url_for('sensitive'))
session['username'] = username
session['error'] = "<=Error! Incorrect username or password!=>"
return render_template("/login_index.html", title="Main Page", username=username, error=session['error'])
"""
#############################################################
Sign up Settings
############################################################
"""
# Display an empty HTML form that allows users to fill the info and sign up.
@webapp.route('/signup', methods=['GET'])
def user_signup():
'''
This function takes GET http request with URL of "/signup"
It returns the user with an html website of the signup page
:return: the rendered "signup_index.html"
'''
return render_template("signup_index.html", title="Join Us!")
# Create a new account and save them in the database.
@webapp.route('/signup/save', methods=['POST'])
def sign_up_save():
'''
This function takes POST http request with a URL of "/signup/save". It firstly reads the user submitted username,
password1 and password2. It then connects to the database to check if there is already an existing username in the
database. The function also checks whether the user provided all the necessary information; whether the format of
the username and password are correct and whether the two passwords match. If any of the above condition failed,
the function will return user with "signup_index.html" with error message. If not, the function will insert the
user provided information to the database and return "signup_succeed_index.html" page to user indicating the user
has successfully created a new account.
:return: "signup_index.html" or "signup_succeed_index.html"
'''
bcrypt = Bcrypt(webapp)
# need to trim the user name
username = request.form.get('username', "")
password1 = request.form.get('password1', "")
password2 = request.form.get('password2', "")
# connect to database
cnx = get_database()
cursor = cnx.cursor()
query = "SELECT COUNT(username) FROM user_info WHERE username = %s "
cursor.execute(query, (username,))
results = cursor.fetchall()
numberOfExistUser = results[0][0]
if username == "" or password1 == "" or password2 == "":
error_msg = "Error: All fields are required!"
return render_template("signup_index.html", title="Sign Up", error_msg=error_msg,
username=username, password1=<PASSWORD>, password2=<PASSWORD>)
if re.findall(r'\s+', username) != []:
error_msg = "Error: No space allowed in user name!"
return render_template("signup_index.html", title="Sign Up", error_msg=error_msg,
username=username, password1=<PASSWORD>, password2=<PASSWORD>)
if numberOfExistUser != 0:
error_msg = "Error: User name already exist!"
return render_template("signup_index.html", title="Sign Up", error_msg=error_msg,
username=username, password1=<PASSWORD>, password2=<PASSWORD>)
if not (password1 == <PASSWORD>):
error_msg = "Error: Two passwords not matching!"
return render_template("signup_index.html", title="Sign Up", error_msg=error_msg,
username=username, password1=<PASSWORD>, password2=<PASSWORD>)
if (len(username) > 20 or len(username) < 1) or not all(c in validUsernameChar for c in username):
print(len(username))
error_msg = "Error: Username violation, username must have length between 1 to 20, only letters and numbers allowed"
return render_template("signup_index.html", title="Sign Up", error_msg=error_msg,
username=username, password1=<PASSWORD>, password2=<PASSWORD>)
if len(password1) > 16 or len(password1) < 1:
error_msg = "Error: Password length violation"
return render_template("signup_index.html", title="Sign Up", error_msg=error_msg,
username=username, password1=<PASSWORD>, password2=<PASSWORD>)
ts = time.time()
timestamp = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
password = <PASSWORD>.generate_password_hash(<PASSWORD>).decode("utf-8")
query = ''' INSERT INTO user_info (username,password,create_date,active)
VALUES (%s,%s, %s,1)
'''
cursor.execute(query, (username, password, timestamp))
cnx.commit()
# Add error catch here for sql
return render_template("signup_succeed_index.html", title="Sign Up Succeed", username=username, password=<PASSWORD>)
"""
#############################################################
Secure Index
############################################################
"""
@webapp.route('/secure/index', methods=['GET', 'POST'])
def sensitive():
'''
This function takes GET/POST http request with URL of "/secure/index". The function firstly check if the user
session has key of “authenticated” and value of True which indicating the user has passed the security check.
If not, the user will be redirected back to ‘/user_login’. If the user session contains “authenticated” and
has a value of True, the function will perform a database search based on the “username” in the client’s
session and store the user’s uid, upload_counter and create_date into the session and return the page
of "/secured_index.html".
:return: "/secure/index" or "/secured_index.html"
'''
if 'authenticated' not in session:
return redirect(url_for('user_login'))
# ==========Read user Info and sign in =========#
if session['authenticated'] == True:
# connect to database
cnx = get_database()
cursor = cnx.cursor()
query = "SELECT uid , create_date FROM user_info WHERE username = %s and active = 1"
cursor.execute(query, (session['username'],))
results = cursor.fetchall()
uid = results[0][0]
memberSince = results[0][1]
session['uid'] = uid
session['membersince'] = memberSince
return render_template("/secured_index.html", username=session['username'], membersince=session['membersince'])
else:
return redirect(url_for('user_login'))
@webapp.route('/logout', methods=['GET', 'POST'])
def logout():
'''
This function takes GET/POST http request with URL of “/logout”. The function clear all the contents in the
current user’s session and terminate the user’s session’s lifetime. The function then redirect the user to
the main page.
:return: /secure/index
'''
session.clear()
webapp.permanent_session_lifetime = datetime.timedelta(milliseconds=0)
return redirect(url_for("sensitive"))
"""
#############################################################
Send Email
############################################################
"""
# Create a new account and save them in the database.
@webapp.route('/signup/send_email', methods=['POST'])
def send_email():
'''
This function takes POST http request with URL of “/signup/send_email”. The function read the user email,
username and password and check if the user email is in correct form with Regex, if the email address is correct,
it will call “send_email” function in “EmailSender” class which can send an email to the user with registered
username and password and redirect the user back to “signup_succeed_index.html” with success message. If the user
provided email address is not a correct form, the function will redirect back to “signup_succeed_index.html” with
error message.
:return: “signup_succeed_index.html”
'''
# need to trim the user name
email = request.form.get('email', "")
username = request.form.get('username', "")
password = request.form.get('password', "")
if not re.match(r"^[A-Za-z0-9\.\+_-]+@[A-Za-z0-9\._-]+\.[a-zA-Z]*$", email):
error_msg = "Error: Not a correct email address!"
return render_template("signup_succeed_index.html", title="Sign Up Succeed", username=username,
password=password, error_msg=error_msg)
# send email
email_confirmation.send_email(email, username, password)
success_msg = "=================Email Sent!==================="
return render_template("signup_succeed_index.html", title="Sign Up Succeed", username=username, password=password,
success_msg=success_msg)
| en | 0.695935 | # Copyright 2020 EraO Prosopagnosia Helper Dev Team, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> # # Supervised by Prof. <NAME> (http://www.eecg.toronto.edu/~mann/) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # The function used to establish connection to sql database Function used to connect to database :return: function used to get database :return: ############################################################# Login Settings ############################################################ This function takes GET/POST http request with URL of "/login" It returns the user with an html website of the login page :return: the rendered "login_index.html" This function takes POST http request with URL of "/login_submit". It firstly reads the user submitted username, password and the check statue of "remember me" option based on whether the user checked "remember me" the function adjust the session expiry time by adjusting the value of webapp.permanent_session_lifetime. The function then connects to the database and reads the search results based on user inputs. If no search results find based on the user provided username, the function will return the user with "login_index.html" with error message; if the user input password doesn't match the database password after bcrypt,the function will return the user with login_index.html" with error message; If it passed all the condition, the function will redirect to URL"/secure/index" :return: /login_index.html or /secure/index # if remember!=None and remember=="on": # password = <PASSWORD>.generate_password_hash(password).decode("utf-8") # bcrypt.check_password_hash # connect to database ############################################################# Sign up Settings ############################################################ # Display an empty HTML form that allows users to fill the info and sign up. This function takes GET http request with URL of "/signup" It returns the user with an html website of the signup page :return: the rendered "signup_index.html" # Create a new account and save them in the database. This function takes POST http request with a URL of "/signup/save". It firstly reads the user submitted username, password1 and password2. It then connects to the database to check if there is already an existing username in the database. The function also checks whether the user provided all the necessary information; whether the format of the username and password are correct and whether the two passwords match. If any of the above condition failed, the function will return user with "signup_index.html" with error message. If not, the function will insert the user provided information to the database and return "signup_succeed_index.html" page to user indicating the user has successfully created a new account. :return: "signup_index.html" or "signup_succeed_index.html" # need to trim the user name # connect to database INSERT INTO user_info (username,password,create_date,active) VALUES (%s,%s, %s,1) # Add error catch here for sql ############################################################# Secure Index ############################################################ This function takes GET/POST http request with URL of "/secure/index". The function firstly check if the user session has key of “authenticated” and value of True which indicating the user has passed the security check. If not, the user will be redirected back to ‘/user_login’. If the user session contains “authenticated” and has a value of True, the function will perform a database search based on the “username” in the client’s session and store the user’s uid, upload_counter and create_date into the session and return the page of "/secured_index.html". :return: "/secure/index" or "/secured_index.html" # ==========Read user Info and sign in =========# # connect to database This function takes GET/POST http request with URL of “/logout”. The function clear all the contents in the current user’s session and terminate the user’s session’s lifetime. The function then redirect the user to the main page. :return: /secure/index ############################################################# Send Email ############################################################ # Create a new account and save them in the database. This function takes POST http request with URL of “/signup/send_email”. The function read the user email, username and password and check if the user email is in correct form with Regex, if the email address is correct, it will call “send_email” function in “EmailSender” class which can send an email to the user with registered username and password and redirect the user back to “signup_succeed_index.html” with success message. If the user provided email address is not a correct form, the function will redirect back to “signup_succeed_index.html” with error message. :return: “signup_succeed_index.html” # need to trim the user name # send email | 1.8744 | 2 |
scripts/slice.py | priyablue/lidar_navigation | 2 | 9542 | <reponame>priyablue/lidar_navigation
import math
from point2d import Point2D
def to_point(rads, dist):
x = math.cos(rads) * dist
y = math.sin(rads) * dist
return Point2D(x, y)
class Slice(object):
def __init__(self, begin, end):
self.__begin = begin
self.__end = end
self.__begin_rad = math.radians(self.__begin)
self.__end_rad = math.radians(self.__end)
self.__begin_point = None
self.__end_point = None
# Calculate the angle halfway between the begin and end
self.__mid_rad = math.radians(self.__begin + ((self.__end - self.__begin) / 2.0))
self.__nearest_point = None
@property
def nearest(self):
return self.__nearest_point
def __contains__(self, point):
return self.__begin <= point.angle <= self.__end
def begin_point(self, max_dist):
return to_point(self.__begin_rad, max_dist)
def end_point(self, max_dist):
return to_point(self.__end_rad, max_dist)
def add_point(self, point):
# See if point is closer than the previously closest point
if point.origin_dist < self.__nearest_point.origin_dist:
self.__nearest_point = point
def reset(self, max_dist):
self.__nearest_point = to_point(self.__mid_rad, max_dist)
def __str__(self):
return "Begin: {} End: {} Nearest: {}".format(self.__begin, self.__end, self.__nearest_point)
| import math
from point2d import Point2D
def to_point(rads, dist):
x = math.cos(rads) * dist
y = math.sin(rads) * dist
return Point2D(x, y)
class Slice(object):
def __init__(self, begin, end):
self.__begin = begin
self.__end = end
self.__begin_rad = math.radians(self.__begin)
self.__end_rad = math.radians(self.__end)
self.__begin_point = None
self.__end_point = None
# Calculate the angle halfway between the begin and end
self.__mid_rad = math.radians(self.__begin + ((self.__end - self.__begin) / 2.0))
self.__nearest_point = None
@property
def nearest(self):
return self.__nearest_point
def __contains__(self, point):
return self.__begin <= point.angle <= self.__end
def begin_point(self, max_dist):
return to_point(self.__begin_rad, max_dist)
def end_point(self, max_dist):
return to_point(self.__end_rad, max_dist)
def add_point(self, point):
# See if point is closer than the previously closest point
if point.origin_dist < self.__nearest_point.origin_dist:
self.__nearest_point = point
def reset(self, max_dist):
self.__nearest_point = to_point(self.__mid_rad, max_dist)
def __str__(self):
return "Begin: {} End: {} Nearest: {}".format(self.__begin, self.__end, self.__nearest_point) | en | 0.923255 | # Calculate the angle halfway between the begin and end # See if point is closer than the previously closest point | 3.272705 | 3 |
src/audio_korpora_pipeline/inputadapter/adapters.py | WernerDreier/audio-korpora-pipeline | 1 | 9543 | import concurrent
import os
import re
import shutil
import xml.etree.ElementTree as ET # TODO do we have this as requirement?
from concurrent.futures import as_completed
from concurrent.futures._base import as_completed
from pathlib import Path
import ffmpeg
import pandas as pd
import webrtcvad
from audio_korpora_pipeline.baseobjects import FileHandlingObject
from audio_korpora_pipeline.inputadapter.audiosplit.splitter import Splitter
from audio_korpora_pipeline.metamodel.mediasession import MediaAnnotationBundle, \
MediaAnnotationBundleWithoutTranscription, WrittenResource, MediaFile, \
MediaSessionActor, Sex, \
MediaSessionActors, MediaSession
class Adapter(FileHandlingObject):
def __init__(self, config):
super(Adapter, self).__init__()
def toMetamodel(self) -> MediaSession:
raise NotImplementedError("Please use a subclass")
def skipAlreadyProcessedFiles(self):
skip = self.config['global']['skipAlreadyProcessedFiles']
if not (skip):
self.logger.warn("No config setting for skipAlreadyProcessedFiles set. Assuming True")
return True
return skip
class UntranscribedMediaSplittingAdapter(Adapter):
AUDIO_SPLIT_AGRESSIVENESS = 3 # webrtcvad 1 (low), 3 (max)
ADAPTERNAME = "MediaSplittingAdapter"
mediaAnnotationBundles = []
mediaSessionActors = set() # using a set so we don't have duplets
def __init__(self, config):
super(UntranscribedMediaSplittingAdapter, self).__init__(config=config)
self.config = config
self.mediaSessionActors.add(MediaSessionActor("UNKNOWN", Sex.UNKNOWN, None))
def _splitMonoRawAudioToVoiceSectionsThread(self, file, outputpath):
self.logger.debug("Splitting file into chunks: {}".format(self._getFilenameWithExtension(file)))
splitter = Splitter()
vad = webrtcvad.Vad(int(self.AUDIO_SPLIT_AGRESSIVENESS))
basename = self._getFilenameWithoutExtension(file)
audiochunkPathsForThisfile = []
try:
audio, sample_rate = splitter.read_wave(file)
frames = splitter.frame_generator(30, audio, sample_rate)
frames = list(frames)
segments = splitter.vad_collector(sample_rate, 30, 300, vad, frames)
for i, segment in enumerate(segments):
path = os.path.join(outputpath, basename + '_chunk_{:05d}.wav'.format(i))
self.logger.debug("Write chunk {} of file {}".format(i, file))
splitter.write_wave(path, segment, sample_rate)
audiochunkPathsForThisfile.append(path)
# write staging complete file
stagingPath = os.path.join(outputpath, basename + ".stagingComplete")
with open(stagingPath, 'a'):
os.utime(stagingPath, None)
self.logger.debug("Finished splitting file {}".format(file))
except Exception as excep:
self.logger.warn("Could split file into chunks {}. Skipping".format(file), exc_info=excep)
return (False, str(file), []) # returning an empty list, as no success here
return (True, str(file), audiochunkPathsForThisfile)
def _convertMediafileToMonoAudioThread(self, filenumber, totalNumberOfFiles, singleFilepathToProcess, outputPath):
self.logger.debug(
"Processing file {}/{} on path {}".format(filenumber + 1, totalNumberOfFiles, singleFilepathToProcess))
nextFilename = os.path.join(outputPath, self._getFilenameWithoutExtension(singleFilepathToProcess) + ".wav")
try:
(ffmpeg
.input(singleFilepathToProcess)
.output(nextFilename, format='wav', acodec='pcm_s16le', ac=1, ar='16k')
.overwrite_output()
.run()
)
except ffmpeg.Error as ffmpgError:
self.logger.warn("Ffmpeg rose an error", exc_info=ffmpgError)
self.logger.warn("Due to error of ffmpeg skipped file {}".format(singleFilepathToProcess))
return (False, str(singleFilepathToProcess), str(nextFilename))
except Exception as e:
self.logger.warn("Got an error while using ffmpeg for file {}".format(singleFilepathToProcess), exc_info=e)
return (False, str(singleFilepathToProcess), str(nextFilename))
return (True, str(singleFilepathToProcess), str(nextFilename))
def createMediaSession(self, bundles):
session = MediaSession(self.ADAPTERNAME, self.mediaSessionActors, bundles)
return session
def createMediaAnnotationBundles(self, audiochunks):
annotationBundles = []
for index, filepath in enumerate(audiochunks):
bundle = MediaAnnotationBundleWithoutTranscription(identifier=filepath) # we do not have any written ressources
bundle.setMediaFile(filepath)
annotationBundles.append(bundle)
return annotationBundles
def splitAudioToChunks(self, filesToChunk, outputPath):
if ((filesToChunk == None) or (len(filesToChunk) == 0)):
self.logger.info("Nothing to split, received empty wav-filenamelist")
return []
successfullyChunkedFiles = []
with concurrent.futures.ThreadPoolExecutor(max_workers=None) as executor:
futures = []
for filenumber, file in enumerate(filesToChunk):
futures.append(
executor.submit(self._splitMonoRawAudioToVoiceSectionsThread, file, outputPath))
for future in as_completed(futures):
if (future.result()[0] == False):
self.logger.warning("Couldnt split audiofile {}, removing from list".format(future.result()[1]))
else:
successfullyChunkedFiles.extend(future.result()[2])
self.logger.debug("Splitting Audio is done {}".format(future.result()))
self.logger.debug("Finished splitting {} wav files".format(len(filesToChunk)))
return successfullyChunkedFiles
def determineWavFilesToChunk(self, baseFilesToChunk, stagingChunkPath):
allStageIndicatorFilesFullpath = set(self._getAllMediaFilesInBasepath(stagingChunkPath, {".stagingComplete"}))
allExistingChunkedFilesFullpath = set(self._getAllMediaFilesInBasepath(stagingChunkPath, {".wav"}))
allStageIndicatorFilesDictionary = self._toFilenameDictionary(allStageIndicatorFilesFullpath)
allBaseFilesDictionary = self._toFilenameDictionary(baseFilesToChunk)
stagingCompleteCorrectKeys = set(allBaseFilesDictionary.keys()).intersection(
set(allStageIndicatorFilesDictionary.keys()))
stagingIncompleteCorrectKeys = set(allBaseFilesDictionary.keys()).difference(
set(allStageIndicatorFilesDictionary.keys()))
stagingComplete = []
for fullpath in allExistingChunkedFilesFullpath:
if any(self._getFilenameWithoutExtension(fullpath).startswith(cm) for cm in stagingCompleteCorrectKeys):
stagingComplete.append(fullpath)
stagingIncomplete = [allBaseFilesDictionary[key] for key in stagingIncompleteCorrectKeys]
self.logger.debug("Got {} files not yet chunked".format(len(stagingIncomplete)))
self.logger.debug("Got {} files chunked".format(len(stagingComplete)))
return stagingIncomplete, stagingComplete
def convertMediaFilesToMonoAudio(self, filesToProcess, outputpath, adapterName):
if (filesToProcess == None or len(filesToProcess) == 0):
self.logger.debug("No files to convert for {}, skipping".format(adapterName))
return []
successfulFilenames = []
with concurrent.futures.ThreadPoolExecutor(max_workers=None) as executor:
futures = []
for filenumber, currentFile in enumerate(filesToProcess):
futures.append(
executor.submit(self._convertMediafileToMonoAudioThread, filenumber, len(filesToProcess),
currentFile, outputpath))
for future in as_completed(futures):
if (future.result()[0] == False):
self.logger.warning("Couldnt process audiofile {}, removing from list".format(future.result()[1]))
else:
successfulFilenames.append(future.result()[2])
self.logger.debug("Processing Audio is done {} for Converter {}".format(future.result(), adapterName))
return successfulFilenames
def _toFilenameDictionary(self, list):
if (list == None or len(list) == 0):
self.logger.debug("Got nothing in list, returning empty dictionary")
return dict()
listDict = dict()
for fullpath in list:
listDict[self._getFilenameWithoutExtension(fullpath)] = fullpath
self.logger.debug("Created dictionary of files of length {}".format(len(listDict)))
return listDict
def determineFilesToConvertToMonoFromGivenLists(self, alreadyStagedFiles, originalFiles, adaptername):
dictionaryOfOriginalFilepaths = self._toFilenameDictionary(originalFiles)
dictionaryOfStagedFilepaths = self._toFilenameDictionary(alreadyStagedFiles)
notYetProcessedKeys = set(dictionaryOfOriginalFilepaths.keys()).difference(set(dictionaryOfStagedFilepaths.keys()))
alreadyProcessedKeys = set(dictionaryOfOriginalFilepaths.keys()).intersection(
set(dictionaryOfStagedFilepaths.keys()))
fullpathsToNotYetProcessed = [dictionaryOfOriginalFilepaths[key] for key in notYetProcessedKeys]
fullpathsProcessed = [dictionaryOfStagedFilepaths[key] for key in alreadyProcessedKeys]
self.logger.debug("Got {} files not yet processed for corpus {}".format(len(notYetProcessedKeys), adaptername))
self.logger.debug("Got {} files already processed for corpus {}".format(len(alreadyProcessedKeys), adaptername))
return fullpathsToNotYetProcessed, fullpathsProcessed
def _preprocess_workflow_with_splitting(self, filesAlreadyProcessed, filesToProcess, monoPath, chunkPath,
adaptername):
filesSuccessfullyProcessed = self.convertMediaFilesToMonoAudio(filesToProcess, monoPath, adaptername)
baseFilesToChunk = []
baseFilesToChunk = baseFilesToChunk + filesSuccessfullyProcessed + filesAlreadyProcessed
# split mono audio to chunks
filesToChunk, filesAlreadyChunked = self.determineWavFilesToChunk(baseFilesToChunk,
chunkPath)
filesSuccessfullyChunked = self.splitAudioToChunks(filesToChunk, chunkPath)
# add chunks to media session
mediaBundleFiles = [] + filesSuccessfullyChunked + filesAlreadyChunked
mediaAnnotationbundles = self.createMediaAnnotationBundles(mediaBundleFiles)
mediaSession = self.createMediaSession(mediaAnnotationbundles)
return mediaSession
class UntranscribedVideoAdapter(UntranscribedMediaSplittingAdapter):
ADAPTERNAME = "UntranscribedVideoAdapter"
def __init__(self, config):
super(UntranscribedVideoAdapter, self).__init__(config=config)
self.config = config
def toMetamodel(self):
self.logger.debug("Untranscribed Video Korpus")
# convert video to mono audio
filesToProcess, filesAlreadyProcessed = self._determineVideoFilesToConvertToMono()
return self._preprocess_workflow_with_splitting(filesAlreadyProcessed, filesToProcess,
self._validateStagingMonoPath(), self._validateStagingChunksPath(),
self.ADAPTERNAME)
def _validateKorpusPath(self):
korpus_path = self.config['untranscribed_videos_input_adapter']['korpus_path']
if not os.path.isdir(korpus_path):
raise IOError("Could not read korpus path" + korpus_path)
return korpus_path
def _validateStagingMonoPath(self):
workdir = self.config['global']['workdir']
if not os.path.isdir(workdir):
raise IOError("Could not read workdir path" + workdir)
workdir = Path(workdir).joinpath("untranscribed_video_staging_mono")
workdir.mkdir(parents=True, exist_ok=True)
return str(workdir)
def _validateStagingChunksPath(self):
workdir = self.config['global']['workdir']
if not os.path.isdir(workdir):
raise IOError("Could not read workdir path" + workdir)
workdir = Path(workdir).joinpath("untranscribed_video_staging_chunks")
workdir.mkdir(parents=True, exist_ok=True)
return str(workdir)
def _determineVideoFilesToConvertToMono(self):
originalFiles = set(self._getAllMediaFilesInBasepath(self._validateKorpusPath(), {".mp4"}))
alreadyStagedFiles = set(self._getAllMediaFilesInBasepath(self._validateStagingMonoPath(), {".wav"}))
self.logger.debug("Got {} original untranscribed mp4 files to process".format(len(originalFiles)))
return self.determineFilesToConvertToMonoFromGivenLists(alreadyStagedFiles, originalFiles, self.ADAPTERNAME)
class ChJugendspracheAdapter(UntranscribedMediaSplittingAdapter):
ADAPTERNAME = "CHJugendspracheAdapter"
def __init__(self, config):
super(ChJugendspracheAdapter, self).__init__(config=config)
self.config = config
def toMetamodel(self):
self.logger.debug("CH-Jugendsprache Korpus")
# convert audio to mono audio
filesToProcess, filesAlreadyProcessed = self._determineChJugendspracheFilesToConvertToMono()
return self._preprocess_workflow_with_splitting(filesAlreadyProcessed, filesToProcess,
self._validateStagingMonoPath(), self._validateStagingChunksPath(),
self.ADAPTERNAME)
def _determineChJugendspracheFilesToConvertToMono(self):
originalFiles = set(self._getAllMediaFilesInBasepath(self._validateKorpusPath(), {".WAV", ".wav"}))
alreadyStagedFiles = set(self._getAllMediaFilesInBasepath(self._validateStagingMonoPath(), {".wav"}))
self.logger.debug("Got {} original jugendsprache files to process".format(len(originalFiles)))
return self.determineFilesToConvertToMonoFromGivenLists(alreadyStagedFiles, originalFiles, self.ADAPTERNAME)
def _validateStagingMonoPath(self):
workdir = self.config['global']['workdir']
if not os.path.isdir(workdir):
raise IOError("Could not read workdir path" + workdir)
workdir = Path(workdir).joinpath("ch_jugensprache_staging_mono")
workdir.mkdir(parents=True, exist_ok=True)
return str(workdir)
def _validateStagingChunksPath(self):
workdir = self.config['global']['workdir']
if not os.path.isdir(workdir):
raise IOError("Could not read workdir path" + workdir)
workdir = Path(workdir).joinpath("ch_jugensprache_staging_chunks")
workdir.mkdir(parents=True, exist_ok=True)
return str(workdir)
def _validateKorpusPath(self):
korpus_path = self.config['ch_jugendsprache_input_adapter']['korpus_path']
if not os.path.isdir(korpus_path):
raise IOError("Could not read korpus path" + korpus_path)
return korpus_path
class ArchimobAdapter(UntranscribedMediaSplittingAdapter):
"""
ArchimobAdapter
"""
ADAPTERNAME = "Archimob"
def __init__(self, config):
super(ArchimobAdapter, self).__init__(config=config)
self.config = config
def _validateKorpusPath(self):
korpus_path = self.config['archimob_input_adapter']['korpus_path']
if not os.path.isdir(korpus_path):
raise IOError("Could not read korpus path" + korpus_path)
return korpus_path
def _transcription_pause_tag_symbol(self):
symbol = self.config['archimob_input_adapter']['transcription_pause_tag_symbol']
if not symbol:
self.logger.warn("No symbol for transcription pause tag configured, falling back to default, which is '@'-Symbol")
symbol = '@'
return symbol
def _transcription_vocal_tag_symbol(self):
symbol = self.config['archimob_input_adapter']['transcription_vocal_tag_symbol']
if not symbol:
self.logger.warn("No symbol for transcription pause tag configured, falling back to default, which is '#'-Symbol")
symbol = '#'
return symbol
def _validateWorkdir(self):
workdir = self.config['global']['workdir']
if not os.path.isdir(workdir):
raise IOError("Could not read workdir path" + workdir)
workdir = Path(workdir).joinpath("archimob_staging")
workdir.mkdir(parents=True, exist_ok=True)
return str(workdir)
def _determineArchimobFilesToProcess(self):
originalFiles = set(self._getAllMediaFilesInBasepath(self._validateKorpusPath(), {".wav"}))
originalFiles = self._fixOriginalDatasetFlawsIfNecessary(originalFiles)
alreadyStagedFiles = set(self._getAllMediaFilesInBasepath(self._validateWorkdir(), {".wav"}))
self.logger.debug("Got {} original archimob files to process".format(len(originalFiles)))
return self.determineFilesToConvertToMonoFromGivenLists(alreadyStagedFiles, originalFiles, self.ADAPTERNAME)
def toMetamodel(self):
self.logger.debug("Archimob V2 Korpus")
# convert chunks to mono audio
filesToProcess, filesAlreadyProcessed = self._determineArchimobFilesToProcess()
filesSuccessfullyProcessed = self.convertMediaFilesToMonoAudio(filesToProcess, self._validateWorkdir(),
self.ADAPTERNAME)
filesForMediaBundle = []
filesForMediaBundle = filesForMediaBundle + filesSuccessfullyProcessed + filesAlreadyProcessed
# add chunks to media session
mediaAnnotationbundles = self.createMediaAnnotationBundles(filesForMediaBundle)
mediaSession = self.createMediaSession(mediaAnnotationbundles)
return mediaSession
def createMediaSession(self, bundles):
actors = self._createMediaSessionActorsFromBundles(bundles)
session = MediaSession(self.ADAPTERNAME, actors, bundles)
return session
def createMediaAnnotationBundles(self, filesForMediaBundle):
allXmlOriginalTranscriptionFiles = self._archimobOriginalTranscriptionFiles(self._validateKorpusPath())
transcriptionsPerSpeaker = self._extract(allXmlOriginalTranscriptionFiles)
mediaFilesAndTranscription = self._onlyTranscriptionsWithMediaFilesAndViceVersa(transcriptionsPerSpeaker,
filesForMediaBundle)
mediaAnnotationBundles = self._createActualMediaAnnotationBundles(mediaFilesAndTranscription)
return mediaAnnotationBundles
def _fixOriginalDatasetFlawsIfNecessary(self, originalFiles):
# As of Archimobe release V2 there are some minor flaws in the data, which are treated sequentially
if (self._fixForDuplicateWavs1063Necessary(originalFiles)):
originalFiles = self._fixForDuplicateWavs1063(originalFiles)
if (self._fixForWrongFilenames1082Necessary(originalFiles)):
originalFiles = self._fixForWrongFilenames1082(originalFiles)
return originalFiles
def _fixForDuplicateWavs1063Necessary(self, originalFiles):
# This flaw is simply, that within 1063 there exists another folder 1063 containing all files again
existingPathsForDoubled1063 = list(
filter(lambda file: os.path.sep + "1063" + os.path.sep + "1063" + os.path.sep in file, originalFiles))
fixNecessary = len(existingPathsForDoubled1063) > 0
self.logger.info("Found {} files of speaker 1063 which are duplicates. They will be ignored".format(
len(existingPathsForDoubled1063)))
return fixNecessary
def _fixForDuplicateWavs1063(self, originalFiles):
# fix is simply by removing the files in question from list
pathsWithout1063duplicates = list(
filter(lambda file: not (os.path.sep + "1063" + os.path.sep + "1063" + os.path.sep in file), originalFiles))
originalFiles = pathsWithout1063duplicates
return originalFiles
def _fixForWrongFilenames1082Necessary(self, originalFiles):
regexForFindingWrongNames = "(^\d{4}_\d)(d\d{4}_.*\.wav)" # like 1082_2d1082_2_TLI_3.wav
onlyFilenames = [os.path.basename(filename) for filename in originalFiles]
for filename in onlyFilenames:
m = re.search(regexForFindingWrongNames, filename)
if (not (m is None)):
return True
return False
def _fixForWrongFilenames1082(self, originalFiles):
fixedFiles = originalFiles.copy()
regexForFindingWrongFullpaths = "(.*\\" + os.path.sep + ")(\d{4}_\d)(d\d{4}_.*\.wav)" # like /home/somebody/files/1082/1082_2d1082_2_TLI_3.wav
for filename in originalFiles:
m = re.search(regexForFindingWrongFullpaths, filename)
if (not (m is None)):
newFilename = m.group(1) + m.group(3)
self.logger.debug(
"Fix 1082: Renaming file {} from {} to {}".format(m.group(2) + m.group(3), filename, newFilename))
try:
shutil.move(filename, newFilename)
fixedFiles.append(newFilename)
except Exception as inst:
self.logger.warn(
"Could not move file {} to {}, skipping and just removing from usable filenames".format(filename,
newFilename),
exc_info=inst)
fixedFiles.remove(filename)
return fixedFiles
def _archimobOriginalTranscriptionFiles(self, path):
xmlOriginalFiles = list(Path(path).glob("**/*.xml"))
self.logger.debug("Found {} original xml files for archimob".format(len(xmlOriginalFiles)))
return xmlOriginalFiles
def _extract(self, allXmlOriginalTranscriptionFiles):
transcriptionsPerSpeaker = []
with concurrent.futures.ThreadPoolExecutor(max_workers=None) as executor:
futures = []
for filenumber, file in enumerate(allXmlOriginalTranscriptionFiles):
futures.append(executor.submit(self._extractSingleXmlFileThread, file))
for future in as_completed(futures):
if (future.result()[0] == False):
self.logger.warning("Couldnt extract metadata for file {}, removing from list".format(future.result()[1]))
else:
transcriptionsPerSpeaker.append(
(future.result()[1], future.result()[2])) # tuple of original file and transcription dataframe
self.logger.debug("Extracting metadata for speaker finished {}".format(future.result()))
self.logger.debug("Finished metadata extraction for all {} xml files".format(len(allXmlOriginalTranscriptionFiles)))
return transcriptionsPerSpeaker
def _extractSingleXmlFileThread(self, xmlFile):
namespaceprefix = "{http://www.tei-c.org/ns/1.0}"
try:
tree = ET.parse(xmlFile)
root = tree.getroot()
ch_datacolumns = pd.DataFrame(columns=['Filename', 'transcript'])
transcriptionForSpeaker = pd.DataFrame(columns=ch_datacolumns.columns)
tagsToIgnore = set([namespaceprefix + tag for tag in {"gap", "incident", "kinesic", "other"}])
for utteranceTag in root.iter(namespaceprefix + 'u'):
media = utteranceTag.attrib['start']
filename = media.split('#')[1]
ch_transcript = [""]
for element in utteranceTag:
extractedWord = ""
if (namespaceprefix + "w" == element.tag):
extractedWord = self._extractWordTag(element)
if (namespaceprefix + "pause" == element.tag):
extractedWord = self._extractPauseTag(element)
if (namespaceprefix + "vocal" == element.tag):
extractedWord = self._extractVocalTag(namespaceprefix, element)
if (namespaceprefix + "del" == element.tag):
extractedWord = self._extractDeletionTag(element)
if (namespaceprefix + "unclear" == element.tag):
extractedWord = self._extractUnclearTag(namespaceprefix, element)
if (element.tag in tagsToIgnore):
self.logger.debug(
"Found tag {} which is in ignore list, ignoring the whole utterance {}".format(element.tag, filename))
break
if (extractedWord):
cleanedWord = self._cleanExtractedWord(extractedWord)
if (cleanedWord):
ch_transcript.append(cleanedWord)
try:
actualTranscript = " ".join(ch_transcript).strip()
if (not actualTranscript or (self._transcription_pause_tag_symbol() == actualTranscript)):
self.logger.debug("Skipping empty transcription for filename {}".format(filename))
continue
transcriptionForSpeaker = transcriptionForSpeaker.append(
{'Filename': filename, 'transcript': actualTranscript}, ignore_index=True)
transcriptionForSpeaker = self._cleanSpecialCaseWhereTwoSentencesPerFileExist(transcriptionForSpeaker)
except Exception as e:
self.logger.warn("Couldn't append single utterance for filename {}".format(filename), exc_info=e)
continue
# writing is just for manual checking
transcriptionForSpeaker.to_csv(
os.path.join(self._getFullFilenameWithoutExtension(xmlFile) + "_transcript_CH.csv"),
header=True, index=False, encoding='utf-8')
return True, xmlFile, transcriptionForSpeaker
except Exception as e:
self.logger.warn("Couldn't extract metadata for xml file {}".format(xmlFile), exc_info=e)
return False, xmlFile, None
def _extractWordTag(self, element):
return element.text
def _extractPauseTag(self, element):
return self._transcription_pause_tag_symbol()
def _extractVocalTag(self, namespaceprefix, element):
desc = element.find(namespaceprefix + "desc")
if desc is not None:
return self._transcription_vocal_tag_symbol() + desc.text
return ""
def _extractDeletionTag(self, element):
truncatedTextWithPotentialSlash = element.text
if truncatedTextWithPotentialSlash:
truncatedText = truncatedTextWithPotentialSlash.replace("/", "")
return truncatedText
return ""
def _extractUnclearTag(self, namespaceprefix, element):
if element is not None:
wordsWithinUnclearTag = element.findall(namespaceprefix + 'w')
unclearText = []
for word in wordsWithinUnclearTag:
unclearText.append(word.text)
return " ".join(unclearText)
return ""
def _cleanExtractedWord(self, extractedWord):
# replace all tokens with gravis with their counterpart
# remove all chars not in allowed list
# Note: q,x and y are not allowed, as thos are not existing within transcription of archimob!
allowed_chars = {
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'r', 's', 't', 'u', 'v', 'w', 'z',
'ä', 'ö', 'ü',
' '
}
allowed_chars.add(self._transcription_pause_tag_symbol())
allowed_chars.add(self._transcription_vocal_tag_symbol())
whitespace_regex = re.compile(r'[ \t]+')
extractedWord = extractedWord.lower()
extractedWord = extractedWord.replace('á', 'a')
extractedWord = extractedWord.replace('à', 'a')
extractedWord = extractedWord.replace('â', 'a')
extractedWord = extractedWord.replace('ç', 'c')
extractedWord = extractedWord.replace('é', 'e')
extractedWord = extractedWord.replace('è', 'e')
extractedWord = extractedWord.replace('ê', 'e')
extractedWord = extractedWord.replace('í', 'i')
extractedWord = extractedWord.replace('ì', 'i')
extractedWord = extractedWord.replace('î', 'i')
extractedWord = extractedWord.replace('ñ', 'n')
extractedWord = extractedWord.replace('ó', 'o')
extractedWord = extractedWord.replace('ò', 'o')
extractedWord = extractedWord.replace('ô', 'o')
extractedWord = extractedWord.replace('ú', 'u')
extractedWord = extractedWord.replace('ù', 'u')
extractedWord = extractedWord.replace('ǜ', 'u')
extractedWord = extractedWord.replace('û', 'u')
extractedWord = extractedWord.replace('ș', 's')
extractedWord = extractedWord.replace('ş', 's')
extractedWord = extractedWord.replace('ß', 'ss')
extractedWord = extractedWord.replace('-', ' ')
# Those should not exist anymore, however, be safe
extractedWord = extractedWord.replace('–', ' ')
extractedWord = extractedWord.replace('/', ' ')
extractedWord = whitespace_regex.sub(' ', extractedWord)
extractedWord = ''.join([char for char in extractedWord if char in allowed_chars])
extractedWord = whitespace_regex.sub(' ', extractedWord)
extractedWord = extractedWord.strip()
return extractedWord
def _onlyTranscriptionsWithMediaFilesAndViceVersa(self, transcriptionsPerSpeaker, filesForMediaBundle):
if not transcriptionsPerSpeaker or not filesForMediaBundle:
return []
existingMediaFilesTuples = [(self._getFilenameWithoutExtension(mediafile), mediafile) for mediafile in
filesForMediaBundle]
existingMediaFiles, existingMediaFilesFullpath = zip(*existingMediaFilesTuples)
# combine all transcriptions
allTranscriptions = pd.concat([transcription[1] for transcription in transcriptionsPerSpeaker])
if any("-" in filename for filename in allTranscriptions.Filename) \
and not any("-" in filename for filename in existingMediaFiles):
self.logger.debug(
"Found filenames with dash (-) instead of underscore (_) but only filenames with underscore. Automatically fixing this...")
allTranscriptions.Filename = allTranscriptions.Filename.str.replace("-", "_")
# Find all files that exist in both sets
# TODO: Performance not good for 70k files
allMatchingTranscriptions = allTranscriptions[allTranscriptions.Filename.isin(existingMediaFiles)].copy()
allMatchingTranscriptions["FullpathFilename"] = ""
allMatchingTranscriptions.set_index("Filename", inplace=True)
for filenumber, existingFile in enumerate(existingMediaFiles):
allMatchingTranscriptions.loc[existingFile, "FullpathFilename"] = existingMediaFilesFullpath[filenumber]
return allMatchingTranscriptions[["FullpathFilename", "transcript"]].copy()
def _createActualMediaAnnotationBundles(self, mediaFilesAndTranscription):
bundles = []
for fileAndTranscription in mediaFilesAndTranscription.itertuples(index=False):
bundle = MediaAnnotationBundle(fileAndTranscription.FullpathFilename)
speakerId = self._speakerIdFromFullpath(fileAndTranscription.FullpathFilename)
bundle.setMediaFile(MediaFile(speakerId))
written_resource = WrittenResource(fileAndTranscription.transcript, speakerId, languageCode="CH",
annotationType=WrittenResource.DIETH_WITHOUT_GRAVIS)
bundle.setWrittenResource(written_resource)
bundles.append(bundle)
self.logger.debug("Created {} mediaAnnotationBundles out of {} transcriptions".format(len(bundles), len(
mediaFilesAndTranscription)))
return bundles
def _speakerIdFromFullpath(self, fullpathFilename):
return self._getFilenameWithoutExtension(fullpathFilename).split("_")[0]
def _createMediaSessionActorsFromBundles(self, bundles):
speakerIds = set([speaker.writtenResource.actorRef for speaker in bundles])
actors = [MediaSessionActor(speakerId, Sex.UNKNOWN, None) for speakerId in speakerIds]
return MediaSessionActors(actors)
def _cleanSpecialCaseWhereTwoSentencesPerFileExist(self, transcriptionForSpeaker):
if transcriptionForSpeaker is None or len(transcriptionForSpeaker) < 2:
return transcriptionForSpeaker
lastFilename = transcriptionForSpeaker.iloc[-1]["Filename"]
filenameBefore = transcriptionForSpeaker.iloc[-2]["Filename"]
if lastFilename == filenameBefore:
lastTranscription = transcriptionForSpeaker.iloc[-1]["transcript"]
transcriptionBefore = transcriptionForSpeaker.iloc[-2]["transcript"]
newTranscript = transcriptionBefore + " " + lastTranscription
transcriptionForSpeaker.drop(transcriptionForSpeaker.tail(2).index, inplace=True)
transcriptionForSpeaker = transcriptionForSpeaker.append(
{'Filename': lastFilename, 'transcript': newTranscript}, ignore_index=True)
self.logger.info(
"Found a case {} where two sentences '{}' and '{}' are within one audio-file, merging them together".format(
lastFilename,
transcriptionBefore, lastTranscription))
return transcriptionForSpeaker
class CommonVoiceAdapter(Adapter):
RELATIVE_PATH_TO_AUDIO = "clips"
LANGUAGECODE_DE = "de_DE"
ADAPTERNAME = "CommonVoiceDE"
mediaAnnotationBundles = []
mediaSessionActors = set() # using a set so we don't have duplets
def __init__(self, config):
super(CommonVoiceAdapter, self).__init__(config=config)
self.config = config
def toMetamodel(self):
self.logger.debug("Created CommonVoice Adapter")
self.audiofilenames = self._readExistingAudioFiles()
self.speakermetadata = self._readExistingSpeakerMetadata()
self._persistMetamodel()
self._buildMediaSession()
return self.mediaSession
def _validateKorpusPath(self):
korpus_path = self.config['common_voice_input_adapter']['korpus_path']
if not os.path.isdir(korpus_path):
raise IOError("Could not read korpus path" + korpus_path)
return korpus_path
def _existingAudioFileFullpath(self, filename):
return os.path.join(self._validateKorpusPath(), self.RELATIVE_PATH_TO_AUDIO, filename)
def _readExistingAudioFiles(self):
fullpath = os.path.join(self._validateKorpusPath(), self.RELATIVE_PATH_TO_AUDIO)
for file in os.listdir(fullpath):
if file.endswith(".mp3"):
currentfile = MediaAnnotationBundle(self._existingAudioFileFullpath(file))
self.mediaAnnotationBundles.append(currentfile)
self.logger.debug("Found {} audiofiles to process".format(len(self.mediaAnnotationBundles)))
pass
def _readExistingSpeakerMetadata(self, ):
existing_audio_identifier = self._getFilenamesFromMediaAnnotationBundles()
common_voice_valid_metadata = self._getCommonVoiceValidMetadata(
existing_audio_identifier, self._validateKorpusPath())
self._enrichWithTranscription(common_voice_valid_metadata)
self._extractMediaSessionActors(common_voice_valid_metadata)
def _enrichWithTranscription(self, common_voice_valid_metadata):
self.mediaAnnotationBundles_dictionary_withoutExtension = {self._getFilenameWithoutExtension(x.identifier): x for x
in self.mediaAnnotationBundles}
self.mediaAnnotationBundles_dictionary_withExtension = {self._getFilenameWithExtension(x.identifier): x for x in
self.mediaAnnotationBundles}
common_voice_valid_metadata.apply(self._enrichWithTranscriptionInner, axis=1)
pass
def _enrichWithTranscriptionInner(self, row):
currentMediaAnnotationBundle = self.mediaAnnotationBundles_dictionary_withoutExtension.get(row.path,
self.mediaAnnotationBundles_dictionary_withExtension.get(
row.path))
currentMediaAnnotationBundle.setWrittenResource(
WrittenResource(row.sentence, row.client_id, self.LANGUAGECODE_DE))
currentMediaAnnotationBundle.setMediaFile(MediaFile(row.client_id))
self.logger.debug(
"Found matching media-annotation bundle for identifier {} and path {}".format(row.client_id, row.path))
def _extractMediaSessionActors(self, common_voice_valid_metadata):
common_voice_valid_metadata.apply(self._createMediaSessionActorFromRow, axis=1)
self.logger.debug("Found {} Speakers".format(len(self.mediaSessionActors)))
pass
def _createMediaSessionActorFromRow(self, row):
self.mediaSessionActors.add(MediaSessionActor(row.client_id, Sex.toSexEnum(row.gender), row.age))
pass
def _getCommonVoiceValidMetadata(self, existing_audio_identifier,
korpus_path):
commonvoice_valid_metadatafilenames = ["dev.tsv", "test.tsv", "train.tsv", "validated.tsv"]
combined_csv = pd.concat(
[pd.read_csv(os.path.join(korpus_path, f), sep="\t", header=0) for f in commonvoice_valid_metadatafilenames])
common_voice_valid_metadata = combined_csv[combined_csv.path.isin(existing_audio_identifier)]
common_voice_valid_metadata = self._fixChangeInDataFormatCommonVoice(common_voice_valid_metadata, combined_csv)
return common_voice_valid_metadata
def _getFilenamesFromMediaAnnotationBundles(self):
return [os.path.splitext(os.path.basename(base.identifier))[0] for base in
self.mediaAnnotationBundles]
def _getFilenamesFromMediaAnnotationBundlesWithExtension(self):
return [os.path.basename(base.identifier) for base in self.mediaAnnotationBundles]
def _persistMetamodel(self):
# TODO actual persisting of working json
# Actual json output
# print(json.dumps(self.mediaAnnotationBundles, default=lambda o: o.__dict__, sort_keys=True, indent=4))
pass
def _buildMediaSession(self):
actors = MediaSessionActors(self.mediaSessionActors)
session = MediaSession(self.ADAPTERNAME, actors, self.mediaAnnotationBundles)
# TODO Validate
self.mediaSession = session
pass
def _fixChangeInDataFormatCommonVoice(self, common_voice_valid_metadata, combined_csv):
if (len(common_voice_valid_metadata) == 0):
self.logger.debug(
"CommonVoice tsv-files seem to have filename-extension set (new fileformat). Trying matching with extension")
common_voice_valid_metadata = combined_csv[
combined_csv.path.isin(self._getFilenamesFromMediaAnnotationBundlesWithExtension())]
self.logger.debug(
"CommonVoice Valid metadata length is: {}".format(len(common_voice_valid_metadata)))
return common_voice_valid_metadata
| import concurrent
import os
import re
import shutil
import xml.etree.ElementTree as ET # TODO do we have this as requirement?
from concurrent.futures import as_completed
from concurrent.futures._base import as_completed
from pathlib import Path
import ffmpeg
import pandas as pd
import webrtcvad
from audio_korpora_pipeline.baseobjects import FileHandlingObject
from audio_korpora_pipeline.inputadapter.audiosplit.splitter import Splitter
from audio_korpora_pipeline.metamodel.mediasession import MediaAnnotationBundle, \
MediaAnnotationBundleWithoutTranscription, WrittenResource, MediaFile, \
MediaSessionActor, Sex, \
MediaSessionActors, MediaSession
class Adapter(FileHandlingObject):
def __init__(self, config):
super(Adapter, self).__init__()
def toMetamodel(self) -> MediaSession:
raise NotImplementedError("Please use a subclass")
def skipAlreadyProcessedFiles(self):
skip = self.config['global']['skipAlreadyProcessedFiles']
if not (skip):
self.logger.warn("No config setting for skipAlreadyProcessedFiles set. Assuming True")
return True
return skip
class UntranscribedMediaSplittingAdapter(Adapter):
AUDIO_SPLIT_AGRESSIVENESS = 3 # webrtcvad 1 (low), 3 (max)
ADAPTERNAME = "MediaSplittingAdapter"
mediaAnnotationBundles = []
mediaSessionActors = set() # using a set so we don't have duplets
def __init__(self, config):
super(UntranscribedMediaSplittingAdapter, self).__init__(config=config)
self.config = config
self.mediaSessionActors.add(MediaSessionActor("UNKNOWN", Sex.UNKNOWN, None))
def _splitMonoRawAudioToVoiceSectionsThread(self, file, outputpath):
self.logger.debug("Splitting file into chunks: {}".format(self._getFilenameWithExtension(file)))
splitter = Splitter()
vad = webrtcvad.Vad(int(self.AUDIO_SPLIT_AGRESSIVENESS))
basename = self._getFilenameWithoutExtension(file)
audiochunkPathsForThisfile = []
try:
audio, sample_rate = splitter.read_wave(file)
frames = splitter.frame_generator(30, audio, sample_rate)
frames = list(frames)
segments = splitter.vad_collector(sample_rate, 30, 300, vad, frames)
for i, segment in enumerate(segments):
path = os.path.join(outputpath, basename + '_chunk_{:05d}.wav'.format(i))
self.logger.debug("Write chunk {} of file {}".format(i, file))
splitter.write_wave(path, segment, sample_rate)
audiochunkPathsForThisfile.append(path)
# write staging complete file
stagingPath = os.path.join(outputpath, basename + ".stagingComplete")
with open(stagingPath, 'a'):
os.utime(stagingPath, None)
self.logger.debug("Finished splitting file {}".format(file))
except Exception as excep:
self.logger.warn("Could split file into chunks {}. Skipping".format(file), exc_info=excep)
return (False, str(file), []) # returning an empty list, as no success here
return (True, str(file), audiochunkPathsForThisfile)
def _convertMediafileToMonoAudioThread(self, filenumber, totalNumberOfFiles, singleFilepathToProcess, outputPath):
self.logger.debug(
"Processing file {}/{} on path {}".format(filenumber + 1, totalNumberOfFiles, singleFilepathToProcess))
nextFilename = os.path.join(outputPath, self._getFilenameWithoutExtension(singleFilepathToProcess) + ".wav")
try:
(ffmpeg
.input(singleFilepathToProcess)
.output(nextFilename, format='wav', acodec='pcm_s16le', ac=1, ar='16k')
.overwrite_output()
.run()
)
except ffmpeg.Error as ffmpgError:
self.logger.warn("Ffmpeg rose an error", exc_info=ffmpgError)
self.logger.warn("Due to error of ffmpeg skipped file {}".format(singleFilepathToProcess))
return (False, str(singleFilepathToProcess), str(nextFilename))
except Exception as e:
self.logger.warn("Got an error while using ffmpeg for file {}".format(singleFilepathToProcess), exc_info=e)
return (False, str(singleFilepathToProcess), str(nextFilename))
return (True, str(singleFilepathToProcess), str(nextFilename))
def createMediaSession(self, bundles):
session = MediaSession(self.ADAPTERNAME, self.mediaSessionActors, bundles)
return session
def createMediaAnnotationBundles(self, audiochunks):
annotationBundles = []
for index, filepath in enumerate(audiochunks):
bundle = MediaAnnotationBundleWithoutTranscription(identifier=filepath) # we do not have any written ressources
bundle.setMediaFile(filepath)
annotationBundles.append(bundle)
return annotationBundles
def splitAudioToChunks(self, filesToChunk, outputPath):
if ((filesToChunk == None) or (len(filesToChunk) == 0)):
self.logger.info("Nothing to split, received empty wav-filenamelist")
return []
successfullyChunkedFiles = []
with concurrent.futures.ThreadPoolExecutor(max_workers=None) as executor:
futures = []
for filenumber, file in enumerate(filesToChunk):
futures.append(
executor.submit(self._splitMonoRawAudioToVoiceSectionsThread, file, outputPath))
for future in as_completed(futures):
if (future.result()[0] == False):
self.logger.warning("Couldnt split audiofile {}, removing from list".format(future.result()[1]))
else:
successfullyChunkedFiles.extend(future.result()[2])
self.logger.debug("Splitting Audio is done {}".format(future.result()))
self.logger.debug("Finished splitting {} wav files".format(len(filesToChunk)))
return successfullyChunkedFiles
def determineWavFilesToChunk(self, baseFilesToChunk, stagingChunkPath):
allStageIndicatorFilesFullpath = set(self._getAllMediaFilesInBasepath(stagingChunkPath, {".stagingComplete"}))
allExistingChunkedFilesFullpath = set(self._getAllMediaFilesInBasepath(stagingChunkPath, {".wav"}))
allStageIndicatorFilesDictionary = self._toFilenameDictionary(allStageIndicatorFilesFullpath)
allBaseFilesDictionary = self._toFilenameDictionary(baseFilesToChunk)
stagingCompleteCorrectKeys = set(allBaseFilesDictionary.keys()).intersection(
set(allStageIndicatorFilesDictionary.keys()))
stagingIncompleteCorrectKeys = set(allBaseFilesDictionary.keys()).difference(
set(allStageIndicatorFilesDictionary.keys()))
stagingComplete = []
for fullpath in allExistingChunkedFilesFullpath:
if any(self._getFilenameWithoutExtension(fullpath).startswith(cm) for cm in stagingCompleteCorrectKeys):
stagingComplete.append(fullpath)
stagingIncomplete = [allBaseFilesDictionary[key] for key in stagingIncompleteCorrectKeys]
self.logger.debug("Got {} files not yet chunked".format(len(stagingIncomplete)))
self.logger.debug("Got {} files chunked".format(len(stagingComplete)))
return stagingIncomplete, stagingComplete
def convertMediaFilesToMonoAudio(self, filesToProcess, outputpath, adapterName):
if (filesToProcess == None or len(filesToProcess) == 0):
self.logger.debug("No files to convert for {}, skipping".format(adapterName))
return []
successfulFilenames = []
with concurrent.futures.ThreadPoolExecutor(max_workers=None) as executor:
futures = []
for filenumber, currentFile in enumerate(filesToProcess):
futures.append(
executor.submit(self._convertMediafileToMonoAudioThread, filenumber, len(filesToProcess),
currentFile, outputpath))
for future in as_completed(futures):
if (future.result()[0] == False):
self.logger.warning("Couldnt process audiofile {}, removing from list".format(future.result()[1]))
else:
successfulFilenames.append(future.result()[2])
self.logger.debug("Processing Audio is done {} for Converter {}".format(future.result(), adapterName))
return successfulFilenames
def _toFilenameDictionary(self, list):
if (list == None or len(list) == 0):
self.logger.debug("Got nothing in list, returning empty dictionary")
return dict()
listDict = dict()
for fullpath in list:
listDict[self._getFilenameWithoutExtension(fullpath)] = fullpath
self.logger.debug("Created dictionary of files of length {}".format(len(listDict)))
return listDict
def determineFilesToConvertToMonoFromGivenLists(self, alreadyStagedFiles, originalFiles, adaptername):
dictionaryOfOriginalFilepaths = self._toFilenameDictionary(originalFiles)
dictionaryOfStagedFilepaths = self._toFilenameDictionary(alreadyStagedFiles)
notYetProcessedKeys = set(dictionaryOfOriginalFilepaths.keys()).difference(set(dictionaryOfStagedFilepaths.keys()))
alreadyProcessedKeys = set(dictionaryOfOriginalFilepaths.keys()).intersection(
set(dictionaryOfStagedFilepaths.keys()))
fullpathsToNotYetProcessed = [dictionaryOfOriginalFilepaths[key] for key in notYetProcessedKeys]
fullpathsProcessed = [dictionaryOfStagedFilepaths[key] for key in alreadyProcessedKeys]
self.logger.debug("Got {} files not yet processed for corpus {}".format(len(notYetProcessedKeys), adaptername))
self.logger.debug("Got {} files already processed for corpus {}".format(len(alreadyProcessedKeys), adaptername))
return fullpathsToNotYetProcessed, fullpathsProcessed
def _preprocess_workflow_with_splitting(self, filesAlreadyProcessed, filesToProcess, monoPath, chunkPath,
adaptername):
filesSuccessfullyProcessed = self.convertMediaFilesToMonoAudio(filesToProcess, monoPath, adaptername)
baseFilesToChunk = []
baseFilesToChunk = baseFilesToChunk + filesSuccessfullyProcessed + filesAlreadyProcessed
# split mono audio to chunks
filesToChunk, filesAlreadyChunked = self.determineWavFilesToChunk(baseFilesToChunk,
chunkPath)
filesSuccessfullyChunked = self.splitAudioToChunks(filesToChunk, chunkPath)
# add chunks to media session
mediaBundleFiles = [] + filesSuccessfullyChunked + filesAlreadyChunked
mediaAnnotationbundles = self.createMediaAnnotationBundles(mediaBundleFiles)
mediaSession = self.createMediaSession(mediaAnnotationbundles)
return mediaSession
class UntranscribedVideoAdapter(UntranscribedMediaSplittingAdapter):
ADAPTERNAME = "UntranscribedVideoAdapter"
def __init__(self, config):
super(UntranscribedVideoAdapter, self).__init__(config=config)
self.config = config
def toMetamodel(self):
self.logger.debug("Untranscribed Video Korpus")
# convert video to mono audio
filesToProcess, filesAlreadyProcessed = self._determineVideoFilesToConvertToMono()
return self._preprocess_workflow_with_splitting(filesAlreadyProcessed, filesToProcess,
self._validateStagingMonoPath(), self._validateStagingChunksPath(),
self.ADAPTERNAME)
def _validateKorpusPath(self):
korpus_path = self.config['untranscribed_videos_input_adapter']['korpus_path']
if not os.path.isdir(korpus_path):
raise IOError("Could not read korpus path" + korpus_path)
return korpus_path
def _validateStagingMonoPath(self):
workdir = self.config['global']['workdir']
if not os.path.isdir(workdir):
raise IOError("Could not read workdir path" + workdir)
workdir = Path(workdir).joinpath("untranscribed_video_staging_mono")
workdir.mkdir(parents=True, exist_ok=True)
return str(workdir)
def _validateStagingChunksPath(self):
workdir = self.config['global']['workdir']
if not os.path.isdir(workdir):
raise IOError("Could not read workdir path" + workdir)
workdir = Path(workdir).joinpath("untranscribed_video_staging_chunks")
workdir.mkdir(parents=True, exist_ok=True)
return str(workdir)
def _determineVideoFilesToConvertToMono(self):
originalFiles = set(self._getAllMediaFilesInBasepath(self._validateKorpusPath(), {".mp4"}))
alreadyStagedFiles = set(self._getAllMediaFilesInBasepath(self._validateStagingMonoPath(), {".wav"}))
self.logger.debug("Got {} original untranscribed mp4 files to process".format(len(originalFiles)))
return self.determineFilesToConvertToMonoFromGivenLists(alreadyStagedFiles, originalFiles, self.ADAPTERNAME)
class ChJugendspracheAdapter(UntranscribedMediaSplittingAdapter):
ADAPTERNAME = "CHJugendspracheAdapter"
def __init__(self, config):
super(ChJugendspracheAdapter, self).__init__(config=config)
self.config = config
def toMetamodel(self):
self.logger.debug("CH-Jugendsprache Korpus")
# convert audio to mono audio
filesToProcess, filesAlreadyProcessed = self._determineChJugendspracheFilesToConvertToMono()
return self._preprocess_workflow_with_splitting(filesAlreadyProcessed, filesToProcess,
self._validateStagingMonoPath(), self._validateStagingChunksPath(),
self.ADAPTERNAME)
def _determineChJugendspracheFilesToConvertToMono(self):
originalFiles = set(self._getAllMediaFilesInBasepath(self._validateKorpusPath(), {".WAV", ".wav"}))
alreadyStagedFiles = set(self._getAllMediaFilesInBasepath(self._validateStagingMonoPath(), {".wav"}))
self.logger.debug("Got {} original jugendsprache files to process".format(len(originalFiles)))
return self.determineFilesToConvertToMonoFromGivenLists(alreadyStagedFiles, originalFiles, self.ADAPTERNAME)
def _validateStagingMonoPath(self):
workdir = self.config['global']['workdir']
if not os.path.isdir(workdir):
raise IOError("Could not read workdir path" + workdir)
workdir = Path(workdir).joinpath("ch_jugensprache_staging_mono")
workdir.mkdir(parents=True, exist_ok=True)
return str(workdir)
def _validateStagingChunksPath(self):
workdir = self.config['global']['workdir']
if not os.path.isdir(workdir):
raise IOError("Could not read workdir path" + workdir)
workdir = Path(workdir).joinpath("ch_jugensprache_staging_chunks")
workdir.mkdir(parents=True, exist_ok=True)
return str(workdir)
def _validateKorpusPath(self):
korpus_path = self.config['ch_jugendsprache_input_adapter']['korpus_path']
if not os.path.isdir(korpus_path):
raise IOError("Could not read korpus path" + korpus_path)
return korpus_path
class ArchimobAdapter(UntranscribedMediaSplittingAdapter):
"""
ArchimobAdapter
"""
ADAPTERNAME = "Archimob"
def __init__(self, config):
super(ArchimobAdapter, self).__init__(config=config)
self.config = config
def _validateKorpusPath(self):
korpus_path = self.config['archimob_input_adapter']['korpus_path']
if not os.path.isdir(korpus_path):
raise IOError("Could not read korpus path" + korpus_path)
return korpus_path
def _transcription_pause_tag_symbol(self):
symbol = self.config['archimob_input_adapter']['transcription_pause_tag_symbol']
if not symbol:
self.logger.warn("No symbol for transcription pause tag configured, falling back to default, which is '@'-Symbol")
symbol = '@'
return symbol
def _transcription_vocal_tag_symbol(self):
symbol = self.config['archimob_input_adapter']['transcription_vocal_tag_symbol']
if not symbol:
self.logger.warn("No symbol for transcription pause tag configured, falling back to default, which is '#'-Symbol")
symbol = '#'
return symbol
def _validateWorkdir(self):
workdir = self.config['global']['workdir']
if not os.path.isdir(workdir):
raise IOError("Could not read workdir path" + workdir)
workdir = Path(workdir).joinpath("archimob_staging")
workdir.mkdir(parents=True, exist_ok=True)
return str(workdir)
def _determineArchimobFilesToProcess(self):
originalFiles = set(self._getAllMediaFilesInBasepath(self._validateKorpusPath(), {".wav"}))
originalFiles = self._fixOriginalDatasetFlawsIfNecessary(originalFiles)
alreadyStagedFiles = set(self._getAllMediaFilesInBasepath(self._validateWorkdir(), {".wav"}))
self.logger.debug("Got {} original archimob files to process".format(len(originalFiles)))
return self.determineFilesToConvertToMonoFromGivenLists(alreadyStagedFiles, originalFiles, self.ADAPTERNAME)
def toMetamodel(self):
self.logger.debug("Archimob V2 Korpus")
# convert chunks to mono audio
filesToProcess, filesAlreadyProcessed = self._determineArchimobFilesToProcess()
filesSuccessfullyProcessed = self.convertMediaFilesToMonoAudio(filesToProcess, self._validateWorkdir(),
self.ADAPTERNAME)
filesForMediaBundle = []
filesForMediaBundle = filesForMediaBundle + filesSuccessfullyProcessed + filesAlreadyProcessed
# add chunks to media session
mediaAnnotationbundles = self.createMediaAnnotationBundles(filesForMediaBundle)
mediaSession = self.createMediaSession(mediaAnnotationbundles)
return mediaSession
def createMediaSession(self, bundles):
actors = self._createMediaSessionActorsFromBundles(bundles)
session = MediaSession(self.ADAPTERNAME, actors, bundles)
return session
def createMediaAnnotationBundles(self, filesForMediaBundle):
allXmlOriginalTranscriptionFiles = self._archimobOriginalTranscriptionFiles(self._validateKorpusPath())
transcriptionsPerSpeaker = self._extract(allXmlOriginalTranscriptionFiles)
mediaFilesAndTranscription = self._onlyTranscriptionsWithMediaFilesAndViceVersa(transcriptionsPerSpeaker,
filesForMediaBundle)
mediaAnnotationBundles = self._createActualMediaAnnotationBundles(mediaFilesAndTranscription)
return mediaAnnotationBundles
def _fixOriginalDatasetFlawsIfNecessary(self, originalFiles):
# As of Archimobe release V2 there are some minor flaws in the data, which are treated sequentially
if (self._fixForDuplicateWavs1063Necessary(originalFiles)):
originalFiles = self._fixForDuplicateWavs1063(originalFiles)
if (self._fixForWrongFilenames1082Necessary(originalFiles)):
originalFiles = self._fixForWrongFilenames1082(originalFiles)
return originalFiles
def _fixForDuplicateWavs1063Necessary(self, originalFiles):
# This flaw is simply, that within 1063 there exists another folder 1063 containing all files again
existingPathsForDoubled1063 = list(
filter(lambda file: os.path.sep + "1063" + os.path.sep + "1063" + os.path.sep in file, originalFiles))
fixNecessary = len(existingPathsForDoubled1063) > 0
self.logger.info("Found {} files of speaker 1063 which are duplicates. They will be ignored".format(
len(existingPathsForDoubled1063)))
return fixNecessary
def _fixForDuplicateWavs1063(self, originalFiles):
# fix is simply by removing the files in question from list
pathsWithout1063duplicates = list(
filter(lambda file: not (os.path.sep + "1063" + os.path.sep + "1063" + os.path.sep in file), originalFiles))
originalFiles = pathsWithout1063duplicates
return originalFiles
def _fixForWrongFilenames1082Necessary(self, originalFiles):
regexForFindingWrongNames = "(^\d{4}_\d)(d\d{4}_.*\.wav)" # like 1082_2d1082_2_TLI_3.wav
onlyFilenames = [os.path.basename(filename) for filename in originalFiles]
for filename in onlyFilenames:
m = re.search(regexForFindingWrongNames, filename)
if (not (m is None)):
return True
return False
def _fixForWrongFilenames1082(self, originalFiles):
fixedFiles = originalFiles.copy()
regexForFindingWrongFullpaths = "(.*\\" + os.path.sep + ")(\d{4}_\d)(d\d{4}_.*\.wav)" # like /home/somebody/files/1082/1082_2d1082_2_TLI_3.wav
for filename in originalFiles:
m = re.search(regexForFindingWrongFullpaths, filename)
if (not (m is None)):
newFilename = m.group(1) + m.group(3)
self.logger.debug(
"Fix 1082: Renaming file {} from {} to {}".format(m.group(2) + m.group(3), filename, newFilename))
try:
shutil.move(filename, newFilename)
fixedFiles.append(newFilename)
except Exception as inst:
self.logger.warn(
"Could not move file {} to {}, skipping and just removing from usable filenames".format(filename,
newFilename),
exc_info=inst)
fixedFiles.remove(filename)
return fixedFiles
def _archimobOriginalTranscriptionFiles(self, path):
xmlOriginalFiles = list(Path(path).glob("**/*.xml"))
self.logger.debug("Found {} original xml files for archimob".format(len(xmlOriginalFiles)))
return xmlOriginalFiles
def _extract(self, allXmlOriginalTranscriptionFiles):
transcriptionsPerSpeaker = []
with concurrent.futures.ThreadPoolExecutor(max_workers=None) as executor:
futures = []
for filenumber, file in enumerate(allXmlOriginalTranscriptionFiles):
futures.append(executor.submit(self._extractSingleXmlFileThread, file))
for future in as_completed(futures):
if (future.result()[0] == False):
self.logger.warning("Couldnt extract metadata for file {}, removing from list".format(future.result()[1]))
else:
transcriptionsPerSpeaker.append(
(future.result()[1], future.result()[2])) # tuple of original file and transcription dataframe
self.logger.debug("Extracting metadata for speaker finished {}".format(future.result()))
self.logger.debug("Finished metadata extraction for all {} xml files".format(len(allXmlOriginalTranscriptionFiles)))
return transcriptionsPerSpeaker
def _extractSingleXmlFileThread(self, xmlFile):
namespaceprefix = "{http://www.tei-c.org/ns/1.0}"
try:
tree = ET.parse(xmlFile)
root = tree.getroot()
ch_datacolumns = pd.DataFrame(columns=['Filename', 'transcript'])
transcriptionForSpeaker = pd.DataFrame(columns=ch_datacolumns.columns)
tagsToIgnore = set([namespaceprefix + tag for tag in {"gap", "incident", "kinesic", "other"}])
for utteranceTag in root.iter(namespaceprefix + 'u'):
media = utteranceTag.attrib['start']
filename = media.split('#')[1]
ch_transcript = [""]
for element in utteranceTag:
extractedWord = ""
if (namespaceprefix + "w" == element.tag):
extractedWord = self._extractWordTag(element)
if (namespaceprefix + "pause" == element.tag):
extractedWord = self._extractPauseTag(element)
if (namespaceprefix + "vocal" == element.tag):
extractedWord = self._extractVocalTag(namespaceprefix, element)
if (namespaceprefix + "del" == element.tag):
extractedWord = self._extractDeletionTag(element)
if (namespaceprefix + "unclear" == element.tag):
extractedWord = self._extractUnclearTag(namespaceprefix, element)
if (element.tag in tagsToIgnore):
self.logger.debug(
"Found tag {} which is in ignore list, ignoring the whole utterance {}".format(element.tag, filename))
break
if (extractedWord):
cleanedWord = self._cleanExtractedWord(extractedWord)
if (cleanedWord):
ch_transcript.append(cleanedWord)
try:
actualTranscript = " ".join(ch_transcript).strip()
if (not actualTranscript or (self._transcription_pause_tag_symbol() == actualTranscript)):
self.logger.debug("Skipping empty transcription for filename {}".format(filename))
continue
transcriptionForSpeaker = transcriptionForSpeaker.append(
{'Filename': filename, 'transcript': actualTranscript}, ignore_index=True)
transcriptionForSpeaker = self._cleanSpecialCaseWhereTwoSentencesPerFileExist(transcriptionForSpeaker)
except Exception as e:
self.logger.warn("Couldn't append single utterance for filename {}".format(filename), exc_info=e)
continue
# writing is just for manual checking
transcriptionForSpeaker.to_csv(
os.path.join(self._getFullFilenameWithoutExtension(xmlFile) + "_transcript_CH.csv"),
header=True, index=False, encoding='utf-8')
return True, xmlFile, transcriptionForSpeaker
except Exception as e:
self.logger.warn("Couldn't extract metadata for xml file {}".format(xmlFile), exc_info=e)
return False, xmlFile, None
def _extractWordTag(self, element):
return element.text
def _extractPauseTag(self, element):
return self._transcription_pause_tag_symbol()
def _extractVocalTag(self, namespaceprefix, element):
desc = element.find(namespaceprefix + "desc")
if desc is not None:
return self._transcription_vocal_tag_symbol() + desc.text
return ""
def _extractDeletionTag(self, element):
truncatedTextWithPotentialSlash = element.text
if truncatedTextWithPotentialSlash:
truncatedText = truncatedTextWithPotentialSlash.replace("/", "")
return truncatedText
return ""
def _extractUnclearTag(self, namespaceprefix, element):
if element is not None:
wordsWithinUnclearTag = element.findall(namespaceprefix + 'w')
unclearText = []
for word in wordsWithinUnclearTag:
unclearText.append(word.text)
return " ".join(unclearText)
return ""
def _cleanExtractedWord(self, extractedWord):
# replace all tokens with gravis with their counterpart
# remove all chars not in allowed list
# Note: q,x and y are not allowed, as thos are not existing within transcription of archimob!
allowed_chars = {
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'r', 's', 't', 'u', 'v', 'w', 'z',
'ä', 'ö', 'ü',
' '
}
allowed_chars.add(self._transcription_pause_tag_symbol())
allowed_chars.add(self._transcription_vocal_tag_symbol())
whitespace_regex = re.compile(r'[ \t]+')
extractedWord = extractedWord.lower()
extractedWord = extractedWord.replace('á', 'a')
extractedWord = extractedWord.replace('à', 'a')
extractedWord = extractedWord.replace('â', 'a')
extractedWord = extractedWord.replace('ç', 'c')
extractedWord = extractedWord.replace('é', 'e')
extractedWord = extractedWord.replace('è', 'e')
extractedWord = extractedWord.replace('ê', 'e')
extractedWord = extractedWord.replace('í', 'i')
extractedWord = extractedWord.replace('ì', 'i')
extractedWord = extractedWord.replace('î', 'i')
extractedWord = extractedWord.replace('ñ', 'n')
extractedWord = extractedWord.replace('ó', 'o')
extractedWord = extractedWord.replace('ò', 'o')
extractedWord = extractedWord.replace('ô', 'o')
extractedWord = extractedWord.replace('ú', 'u')
extractedWord = extractedWord.replace('ù', 'u')
extractedWord = extractedWord.replace('ǜ', 'u')
extractedWord = extractedWord.replace('û', 'u')
extractedWord = extractedWord.replace('ș', 's')
extractedWord = extractedWord.replace('ş', 's')
extractedWord = extractedWord.replace('ß', 'ss')
extractedWord = extractedWord.replace('-', ' ')
# Those should not exist anymore, however, be safe
extractedWord = extractedWord.replace('–', ' ')
extractedWord = extractedWord.replace('/', ' ')
extractedWord = whitespace_regex.sub(' ', extractedWord)
extractedWord = ''.join([char for char in extractedWord if char in allowed_chars])
extractedWord = whitespace_regex.sub(' ', extractedWord)
extractedWord = extractedWord.strip()
return extractedWord
def _onlyTranscriptionsWithMediaFilesAndViceVersa(self, transcriptionsPerSpeaker, filesForMediaBundle):
if not transcriptionsPerSpeaker or not filesForMediaBundle:
return []
existingMediaFilesTuples = [(self._getFilenameWithoutExtension(mediafile), mediafile) for mediafile in
filesForMediaBundle]
existingMediaFiles, existingMediaFilesFullpath = zip(*existingMediaFilesTuples)
# combine all transcriptions
allTranscriptions = pd.concat([transcription[1] for transcription in transcriptionsPerSpeaker])
if any("-" in filename for filename in allTranscriptions.Filename) \
and not any("-" in filename for filename in existingMediaFiles):
self.logger.debug(
"Found filenames with dash (-) instead of underscore (_) but only filenames with underscore. Automatically fixing this...")
allTranscriptions.Filename = allTranscriptions.Filename.str.replace("-", "_")
# Find all files that exist in both sets
# TODO: Performance not good for 70k files
allMatchingTranscriptions = allTranscriptions[allTranscriptions.Filename.isin(existingMediaFiles)].copy()
allMatchingTranscriptions["FullpathFilename"] = ""
allMatchingTranscriptions.set_index("Filename", inplace=True)
for filenumber, existingFile in enumerate(existingMediaFiles):
allMatchingTranscriptions.loc[existingFile, "FullpathFilename"] = existingMediaFilesFullpath[filenumber]
return allMatchingTranscriptions[["FullpathFilename", "transcript"]].copy()
def _createActualMediaAnnotationBundles(self, mediaFilesAndTranscription):
bundles = []
for fileAndTranscription in mediaFilesAndTranscription.itertuples(index=False):
bundle = MediaAnnotationBundle(fileAndTranscription.FullpathFilename)
speakerId = self._speakerIdFromFullpath(fileAndTranscription.FullpathFilename)
bundle.setMediaFile(MediaFile(speakerId))
written_resource = WrittenResource(fileAndTranscription.transcript, speakerId, languageCode="CH",
annotationType=WrittenResource.DIETH_WITHOUT_GRAVIS)
bundle.setWrittenResource(written_resource)
bundles.append(bundle)
self.logger.debug("Created {} mediaAnnotationBundles out of {} transcriptions".format(len(bundles), len(
mediaFilesAndTranscription)))
return bundles
def _speakerIdFromFullpath(self, fullpathFilename):
return self._getFilenameWithoutExtension(fullpathFilename).split("_")[0]
def _createMediaSessionActorsFromBundles(self, bundles):
speakerIds = set([speaker.writtenResource.actorRef for speaker in bundles])
actors = [MediaSessionActor(speakerId, Sex.UNKNOWN, None) for speakerId in speakerIds]
return MediaSessionActors(actors)
def _cleanSpecialCaseWhereTwoSentencesPerFileExist(self, transcriptionForSpeaker):
if transcriptionForSpeaker is None or len(transcriptionForSpeaker) < 2:
return transcriptionForSpeaker
lastFilename = transcriptionForSpeaker.iloc[-1]["Filename"]
filenameBefore = transcriptionForSpeaker.iloc[-2]["Filename"]
if lastFilename == filenameBefore:
lastTranscription = transcriptionForSpeaker.iloc[-1]["transcript"]
transcriptionBefore = transcriptionForSpeaker.iloc[-2]["transcript"]
newTranscript = transcriptionBefore + " " + lastTranscription
transcriptionForSpeaker.drop(transcriptionForSpeaker.tail(2).index, inplace=True)
transcriptionForSpeaker = transcriptionForSpeaker.append(
{'Filename': lastFilename, 'transcript': newTranscript}, ignore_index=True)
self.logger.info(
"Found a case {} where two sentences '{}' and '{}' are within one audio-file, merging them together".format(
lastFilename,
transcriptionBefore, lastTranscription))
return transcriptionForSpeaker
class CommonVoiceAdapter(Adapter):
RELATIVE_PATH_TO_AUDIO = "clips"
LANGUAGECODE_DE = "de_DE"
ADAPTERNAME = "CommonVoiceDE"
mediaAnnotationBundles = []
mediaSessionActors = set() # using a set so we don't have duplets
def __init__(self, config):
super(CommonVoiceAdapter, self).__init__(config=config)
self.config = config
def toMetamodel(self):
self.logger.debug("Created CommonVoice Adapter")
self.audiofilenames = self._readExistingAudioFiles()
self.speakermetadata = self._readExistingSpeakerMetadata()
self._persistMetamodel()
self._buildMediaSession()
return self.mediaSession
def _validateKorpusPath(self):
korpus_path = self.config['common_voice_input_adapter']['korpus_path']
if not os.path.isdir(korpus_path):
raise IOError("Could not read korpus path" + korpus_path)
return korpus_path
def _existingAudioFileFullpath(self, filename):
return os.path.join(self._validateKorpusPath(), self.RELATIVE_PATH_TO_AUDIO, filename)
def _readExistingAudioFiles(self):
fullpath = os.path.join(self._validateKorpusPath(), self.RELATIVE_PATH_TO_AUDIO)
for file in os.listdir(fullpath):
if file.endswith(".mp3"):
currentfile = MediaAnnotationBundle(self._existingAudioFileFullpath(file))
self.mediaAnnotationBundles.append(currentfile)
self.logger.debug("Found {} audiofiles to process".format(len(self.mediaAnnotationBundles)))
pass
def _readExistingSpeakerMetadata(self, ):
existing_audio_identifier = self._getFilenamesFromMediaAnnotationBundles()
common_voice_valid_metadata = self._getCommonVoiceValidMetadata(
existing_audio_identifier, self._validateKorpusPath())
self._enrichWithTranscription(common_voice_valid_metadata)
self._extractMediaSessionActors(common_voice_valid_metadata)
def _enrichWithTranscription(self, common_voice_valid_metadata):
self.mediaAnnotationBundles_dictionary_withoutExtension = {self._getFilenameWithoutExtension(x.identifier): x for x
in self.mediaAnnotationBundles}
self.mediaAnnotationBundles_dictionary_withExtension = {self._getFilenameWithExtension(x.identifier): x for x in
self.mediaAnnotationBundles}
common_voice_valid_metadata.apply(self._enrichWithTranscriptionInner, axis=1)
pass
def _enrichWithTranscriptionInner(self, row):
currentMediaAnnotationBundle = self.mediaAnnotationBundles_dictionary_withoutExtension.get(row.path,
self.mediaAnnotationBundles_dictionary_withExtension.get(
row.path))
currentMediaAnnotationBundle.setWrittenResource(
WrittenResource(row.sentence, row.client_id, self.LANGUAGECODE_DE))
currentMediaAnnotationBundle.setMediaFile(MediaFile(row.client_id))
self.logger.debug(
"Found matching media-annotation bundle for identifier {} and path {}".format(row.client_id, row.path))
def _extractMediaSessionActors(self, common_voice_valid_metadata):
common_voice_valid_metadata.apply(self._createMediaSessionActorFromRow, axis=1)
self.logger.debug("Found {} Speakers".format(len(self.mediaSessionActors)))
pass
def _createMediaSessionActorFromRow(self, row):
self.mediaSessionActors.add(MediaSessionActor(row.client_id, Sex.toSexEnum(row.gender), row.age))
pass
def _getCommonVoiceValidMetadata(self, existing_audio_identifier,
korpus_path):
commonvoice_valid_metadatafilenames = ["dev.tsv", "test.tsv", "train.tsv", "validated.tsv"]
combined_csv = pd.concat(
[pd.read_csv(os.path.join(korpus_path, f), sep="\t", header=0) for f in commonvoice_valid_metadatafilenames])
common_voice_valid_metadata = combined_csv[combined_csv.path.isin(existing_audio_identifier)]
common_voice_valid_metadata = self._fixChangeInDataFormatCommonVoice(common_voice_valid_metadata, combined_csv)
return common_voice_valid_metadata
def _getFilenamesFromMediaAnnotationBundles(self):
return [os.path.splitext(os.path.basename(base.identifier))[0] for base in
self.mediaAnnotationBundles]
def _getFilenamesFromMediaAnnotationBundlesWithExtension(self):
return [os.path.basename(base.identifier) for base in self.mediaAnnotationBundles]
def _persistMetamodel(self):
# TODO actual persisting of working json
# Actual json output
# print(json.dumps(self.mediaAnnotationBundles, default=lambda o: o.__dict__, sort_keys=True, indent=4))
pass
def _buildMediaSession(self):
actors = MediaSessionActors(self.mediaSessionActors)
session = MediaSession(self.ADAPTERNAME, actors, self.mediaAnnotationBundles)
# TODO Validate
self.mediaSession = session
pass
def _fixChangeInDataFormatCommonVoice(self, common_voice_valid_metadata, combined_csv):
if (len(common_voice_valid_metadata) == 0):
self.logger.debug(
"CommonVoice tsv-files seem to have filename-extension set (new fileformat). Trying matching with extension")
common_voice_valid_metadata = combined_csv[
combined_csv.path.isin(self._getFilenamesFromMediaAnnotationBundlesWithExtension())]
self.logger.debug(
"CommonVoice Valid metadata length is: {}".format(len(common_voice_valid_metadata)))
return common_voice_valid_metadata
| en | 0.811543 | # TODO do we have this as requirement? # webrtcvad 1 (low), 3 (max) # using a set so we don't have duplets # write staging complete file # returning an empty list, as no success here # we do not have any written ressources # split mono audio to chunks # add chunks to media session # convert video to mono audio # convert audio to mono audio ArchimobAdapter # convert chunks to mono audio # add chunks to media session # As of Archimobe release V2 there are some minor flaws in the data, which are treated sequentially # This flaw is simply, that within 1063 there exists another folder 1063 containing all files again # fix is simply by removing the files in question from list # like 1082_2d1082_2_TLI_3.wav # like /home/somebody/files/1082/1082_2d1082_2_TLI_3.wav # tuple of original file and transcription dataframe # writing is just for manual checking # replace all tokens with gravis with their counterpart # remove all chars not in allowed list # Note: q,x and y are not allowed, as thos are not existing within transcription of archimob! # Those should not exist anymore, however, be safe # combine all transcriptions # Find all files that exist in both sets # TODO: Performance not good for 70k files # using a set so we don't have duplets # TODO actual persisting of working json # Actual json output # print(json.dumps(self.mediaAnnotationBundles, default=lambda o: o.__dict__, sort_keys=True, indent=4)) # TODO Validate | 2.124168 | 2 |
development/multiImage_pytorch/experiment.py | anaikawadi/svbrdf-estimation | 0 | 9544 | <reponame>anaikawadi/svbrdf-estimation
import matplotlib.pyplot as plt
import math
import shutil
import torch
from accelerate import Accelerator
from tensorboardX import SummaryWriter
from cli import parse_args
from dataset import SvbrdfDataset
from losses import MixedLoss, MixedLoss2, MixedLoss3
from models import MultiViewModel, SingleViewModel
from pathlib import Path
from persistence import Checkpoint
from renderers import LocalRenderer, RednerRenderer
import utils
import environment as env
import numpy as np
import sys
from PIL import Image
class Identity(torch.nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
args = parse_args()
clean_training = args.mode == 'train' and args.retrain
# Load the checkpoint
checkpoint_dir = Path(args.model_dir)
checkpoint = Checkpoint()
if not clean_training:
checkpoint = Checkpoint.load(checkpoint_dir)
# Immediatly restore the arguments if we have a valid checkpoint
if checkpoint.is_valid():
args = checkpoint.restore_args(args)
# Make the result reproducible
utils.enable_deterministic_random_engine()
# Determine the device
accelerator = Accelerator()
device = accelerator.device
# Create the model
model = MultiViewModel(use_coords=args.use_coords).to(device)
if checkpoint.is_valid():
model = checkpoint.restore_model_state(model)
elif args.mode == 'test':
print("No model found in the model directory but it is required for testing.")
exit(1)
# TODO: Choose a random number for the used input image count if we are training and we don't request it to be fix (see fixImageNb for reference)
data = SvbrdfDataset(data_directory=args.input_dir,
image_size=args.image_size, scale_mode=args.scale_mode, input_image_count=args.image_count, used_input_image_count=args.used_image_count,
use_augmentation=True, mix_materials=args.mode == 'train',
no_svbrdf=args.no_svbrdf_input, is_linear=args.linear_input)
epoch_start = 0
# model.generator.delete()
# model = torch.nn.Sequential(
# *list(model.children())[:-8],
# )
# print(*list(model.parameters()))
if args.mode == 'train':
validation_split = 0.01
print("Using {:.2f} % of the data for validation".format(
round(validation_split * 100.0, 2)))
training_data, validation_data = torch.utils.data.random_split(data, [int(math.ceil(
len(data) * (1.0 - validation_split))), int(math.floor(len(data) * validation_split))])
print("Training samples: {:d}.".format(len(training_data)))
print("Validation samples: {:d}.".format(len(validation_data)))
training_dataloader = torch.utils.data.DataLoader(
training_data, batch_size=8, pin_memory=True, shuffle=True)
validation_dataloader = torch.utils.data.DataLoader(
validation_data, batch_size=8, pin_memory=True, shuffle=False)
batch_count = int(math.ceil(len(training_data) /
training_dataloader.batch_size))
# Train as many epochs as specified
epoch_end = args.epochs
print("Training from epoch {:d} to {:d}".format(epoch_start, epoch_end))
# Set up the optimizer
# TODO: Use betas=(0.5, 0.999)
L = torch.FloatTensor(5, 3).uniform_(0.2, 1.0)
L = L / torch.linalg.norm(L, ord=2, dim=-1, keepdim=True)
L[:, :2] = 2.0 * L[:, :2] - 1.0
V = torch.FloatTensor(1, 3).uniform_(0.2, 1.0)
V = V / torch.linalg.norm(V, ord=2, dim=-1, keepdim=True)
V[:, :2] = 2.0 * V[:, :2] - 1.0
scenes = env.generate_specific_scenes(5, L, L)
L.requires_grad = True
VIP = [L]
# V.requires_grad = True
optimizer = torch.optim.Adam(VIP, lr=0.1)
model, optimizer, training_dataloader, validation_dataloader = accelerator.prepare(
model, optimizer, training_dataloader, validation_dataloader)
# print("scene", scene.camera)
# TODO: Use scheduler if necessary
#scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min')
# Set up the loss
loss_renderer = LocalRenderer()
loss_function = MixedLoss2(loss_renderer, scenes)
# Setup statistics stuff
statistics_dir = checkpoint_dir / "logs"
if clean_training and statistics_dir.exists():
# Nuke the stats dir
shutil.rmtree(statistics_dir)
statistics_dir.mkdir(parents=True, exist_ok=True)
writer = SummaryWriter(str(statistics_dir.absolute()))
last_batch_inputs = None
# Clear checkpoint in order to free up some memory
checkpoint.purge()
lights = []
losses = []
for epoch in range(epoch_start, epoch_end):
for i, batch in enumerate(training_dataloader):
# Unique index of this batch
print("Ldet", (L.detach().numpy())[0])
lights.append(((L.detach().numpy())[0]).tolist())
scenes = env.generate_specific_scenes(5, L, L)
print("L", L)
# if(epoch_end - epoch < 3):
loss_function = MixedLoss2(loss_renderer, scenes)
# else:
# loss_function = MixedLoss2(loss_renderer, scene[0])
batch_index = epoch * batch_count + i
# Construct inputs
batch_inputs = batch["inputs"].to(device)
batch_svbrdf = batch["svbrdf"].to(device)
# Perform a step
optimizer.zero_grad()
outputs = model(batch_inputs)
print("batch_inputs", batch_inputs.size())
print("batch_svbrdfs", batch_svbrdf.size())
print("batch_outputs", outputs.size())
loss = loss_function(outputs, batch_svbrdf)
accelerator.backward(loss)
optimizer.step()
print("Epoch {:d}, Batch {:d}, loss: {:f}".format(
epoch, i + 1, loss.item()))
losses.append((epoch, loss.item()))
# Statistics
writer.add_scalar("loss", loss.item(), batch_index)
last_batch_inputs = batch_inputs
lights.append(((L.detach().numpy())[0]).tolist())
with open('/content/experiment1/losses/loss.txt', "w") as text_file:
text_file.write(str(losses))
print("lights1", lights)
# print(len(lights))
lights2 = []
for j in range(len(lights)):
if j%10 == 0:
lights2.append(lights[j])
# print("lights2", lights)
# l=np.array(lights)
l = np.array(lights2)
renderer = LocalRenderer()
rendered_scene = env.generate_specific_scenes(1, L.detach(), L.detach())
img = renderer.render(rendered_scene[0], batch_svbrdf[0])
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img[0].detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/render1.png')
img = renderer.render(rendered_scene[0], outputs[0])
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img[0].detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/render2.png')
# print("size", batch_inputs.size())
torch.add(L, 5)
print("L", L)
rendered_scene = env.generate_specific_scenes(1, L, L)
img = renderer.render(rendered_scene[0], batch_svbrdf[0])
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img[0].detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/render3.png')
print("size", batch_inputs[0][0].size())
img = batch_inputs[0][0]
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img.detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/render4.png')
print("size", batch_inputs[0][0].size())
normals, diffuse, roughness, specular = utils.unpack_svbrdf(outputs[0])
img = normals
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img.detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/output_normal.png')
img = diffuse
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img.detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/output_diffuse.png')
img = roughness
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img.detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/output_roughness.png')
img = specular
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img.detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/output_specular.png')
print("size", batch_inputs[0][0].size())
normals, diffuse, roughness, specular = utils.unpack_svbrdf(batch_svbrdf[0])
img = normals
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img.detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/target_normal.png')
img = diffuse
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img.detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/target_diffuse.png')
img = roughness
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img.detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/target_roughness.png')
img = specular
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img.detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/target_specular.png')
images = [Image.open(x) for x in ['/content/experiment1/figures/target_normal.png', '/content/experiment1/figures/target_diffuse.png', '/content/experiment1/figures/target_roughness.png', '/content/experiment1/figures/target_specular.png']]
widths, heights = zip(*(i.size for i in images))
total_width = sum(widths)
max_height = max(heights)
new_im = Image.new('RGB', (total_width, max_height))
x_offset = 0
for im in images:
new_im.paste(im, (x_offset,0))
x_offset += im.size[0]
new_im.save('/content/experiment1/figures/target_svbrdf.png')
images = [Image.open(x) for x in ['/content/experiment1/figures/output_normal.png', '/content/experiment1/figures/output_diffuse.png', '/content/experiment1/figures/output_roughness.png', '/content/experiment1/figures/output_specular.png']]
widths, heights = zip(*(i.size for i in images))
total_width = sum(widths)
max_height = max(heights)
new_im = Image.new('RGB', (total_width, max_height))
x_offset = 0
for im in images:
new_im.paste(im, (x_offset,0))
x_offset += im.size[0]
new_im.save('/content/experiment1/figures/output_svbrdf.png')
print("size", batch_inputs[0][0].size())
normals, diffuse, roughness, specular = utils.unpack_svbrdf(outputs[0])
img = normals
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img.detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/output_normal.png')
print("lights3", l)
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.scatter([0.0], [0.0], [0.0], marker='o', c='r')
# v = V.detach().numpy()
ax.scatter(l[:,0], l[:,1], l[:,2], marker='.', c='g')
# ax.scatter(v[:,0], v[:,1], v[:,2], marker='^', c='b')
ax.set_xlim(-8, 8)
ax.set_ylim(-8, 8)
ax.set_zlim(-8., 8.)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
# plt.show()
plt.savefig('/content/experiment1/figures/light.png')
plt.show()
# if epoch % args.save_frequency == 0:
# Checkpoint.save(checkpoint_dir, args, model, optimizer, epoch)
# if epoch % args.validation_frequency == 0 and len(validation_data) > 0:
# model.eval()
# val_loss = 0.0
# batch_count_val = 0
# for batch in validation_dataloader:
# # Construct inputs
# batch_inputs = batch["inputs"].to(device)
# batch_svbrdf = batch["svbrdf"].to(device)
# outputs = model(batch_inputs)
# val_loss += loss_function(outputs, batch_svbrdf).item()
# batch_count_val += 1
# val_loss /= batch_count_val
# print("Epoch {:d}, validation loss: {:f}".format(epoch, val_loss))
# writer.add_scalar("val_loss", val_loss, epoch * batch_count)
# model.train()
| import matplotlib.pyplot as plt
import math
import shutil
import torch
from accelerate import Accelerator
from tensorboardX import SummaryWriter
from cli import parse_args
from dataset import SvbrdfDataset
from losses import MixedLoss, MixedLoss2, MixedLoss3
from models import MultiViewModel, SingleViewModel
from pathlib import Path
from persistence import Checkpoint
from renderers import LocalRenderer, RednerRenderer
import utils
import environment as env
import numpy as np
import sys
from PIL import Image
class Identity(torch.nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
args = parse_args()
clean_training = args.mode == 'train' and args.retrain
# Load the checkpoint
checkpoint_dir = Path(args.model_dir)
checkpoint = Checkpoint()
if not clean_training:
checkpoint = Checkpoint.load(checkpoint_dir)
# Immediatly restore the arguments if we have a valid checkpoint
if checkpoint.is_valid():
args = checkpoint.restore_args(args)
# Make the result reproducible
utils.enable_deterministic_random_engine()
# Determine the device
accelerator = Accelerator()
device = accelerator.device
# Create the model
model = MultiViewModel(use_coords=args.use_coords).to(device)
if checkpoint.is_valid():
model = checkpoint.restore_model_state(model)
elif args.mode == 'test':
print("No model found in the model directory but it is required for testing.")
exit(1)
# TODO: Choose a random number for the used input image count if we are training and we don't request it to be fix (see fixImageNb for reference)
data = SvbrdfDataset(data_directory=args.input_dir,
image_size=args.image_size, scale_mode=args.scale_mode, input_image_count=args.image_count, used_input_image_count=args.used_image_count,
use_augmentation=True, mix_materials=args.mode == 'train',
no_svbrdf=args.no_svbrdf_input, is_linear=args.linear_input)
epoch_start = 0
# model.generator.delete()
# model = torch.nn.Sequential(
# *list(model.children())[:-8],
# )
# print(*list(model.parameters()))
if args.mode == 'train':
validation_split = 0.01
print("Using {:.2f} % of the data for validation".format(
round(validation_split * 100.0, 2)))
training_data, validation_data = torch.utils.data.random_split(data, [int(math.ceil(
len(data) * (1.0 - validation_split))), int(math.floor(len(data) * validation_split))])
print("Training samples: {:d}.".format(len(training_data)))
print("Validation samples: {:d}.".format(len(validation_data)))
training_dataloader = torch.utils.data.DataLoader(
training_data, batch_size=8, pin_memory=True, shuffle=True)
validation_dataloader = torch.utils.data.DataLoader(
validation_data, batch_size=8, pin_memory=True, shuffle=False)
batch_count = int(math.ceil(len(training_data) /
training_dataloader.batch_size))
# Train as many epochs as specified
epoch_end = args.epochs
print("Training from epoch {:d} to {:d}".format(epoch_start, epoch_end))
# Set up the optimizer
# TODO: Use betas=(0.5, 0.999)
L = torch.FloatTensor(5, 3).uniform_(0.2, 1.0)
L = L / torch.linalg.norm(L, ord=2, dim=-1, keepdim=True)
L[:, :2] = 2.0 * L[:, :2] - 1.0
V = torch.FloatTensor(1, 3).uniform_(0.2, 1.0)
V = V / torch.linalg.norm(V, ord=2, dim=-1, keepdim=True)
V[:, :2] = 2.0 * V[:, :2] - 1.0
scenes = env.generate_specific_scenes(5, L, L)
L.requires_grad = True
VIP = [L]
# V.requires_grad = True
optimizer = torch.optim.Adam(VIP, lr=0.1)
model, optimizer, training_dataloader, validation_dataloader = accelerator.prepare(
model, optimizer, training_dataloader, validation_dataloader)
# print("scene", scene.camera)
# TODO: Use scheduler if necessary
#scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min')
# Set up the loss
loss_renderer = LocalRenderer()
loss_function = MixedLoss2(loss_renderer, scenes)
# Setup statistics stuff
statistics_dir = checkpoint_dir / "logs"
if clean_training and statistics_dir.exists():
# Nuke the stats dir
shutil.rmtree(statistics_dir)
statistics_dir.mkdir(parents=True, exist_ok=True)
writer = SummaryWriter(str(statistics_dir.absolute()))
last_batch_inputs = None
# Clear checkpoint in order to free up some memory
checkpoint.purge()
lights = []
losses = []
for epoch in range(epoch_start, epoch_end):
for i, batch in enumerate(training_dataloader):
# Unique index of this batch
print("Ldet", (L.detach().numpy())[0])
lights.append(((L.detach().numpy())[0]).tolist())
scenes = env.generate_specific_scenes(5, L, L)
print("L", L)
# if(epoch_end - epoch < 3):
loss_function = MixedLoss2(loss_renderer, scenes)
# else:
# loss_function = MixedLoss2(loss_renderer, scene[0])
batch_index = epoch * batch_count + i
# Construct inputs
batch_inputs = batch["inputs"].to(device)
batch_svbrdf = batch["svbrdf"].to(device)
# Perform a step
optimizer.zero_grad()
outputs = model(batch_inputs)
print("batch_inputs", batch_inputs.size())
print("batch_svbrdfs", batch_svbrdf.size())
print("batch_outputs", outputs.size())
loss = loss_function(outputs, batch_svbrdf)
accelerator.backward(loss)
optimizer.step()
print("Epoch {:d}, Batch {:d}, loss: {:f}".format(
epoch, i + 1, loss.item()))
losses.append((epoch, loss.item()))
# Statistics
writer.add_scalar("loss", loss.item(), batch_index)
last_batch_inputs = batch_inputs
lights.append(((L.detach().numpy())[0]).tolist())
with open('/content/experiment1/losses/loss.txt', "w") as text_file:
text_file.write(str(losses))
print("lights1", lights)
# print(len(lights))
lights2 = []
for j in range(len(lights)):
if j%10 == 0:
lights2.append(lights[j])
# print("lights2", lights)
# l=np.array(lights)
l = np.array(lights2)
renderer = LocalRenderer()
rendered_scene = env.generate_specific_scenes(1, L.detach(), L.detach())
img = renderer.render(rendered_scene[0], batch_svbrdf[0])
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img[0].detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/render1.png')
img = renderer.render(rendered_scene[0], outputs[0])
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img[0].detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/render2.png')
# print("size", batch_inputs.size())
torch.add(L, 5)
print("L", L)
rendered_scene = env.generate_specific_scenes(1, L, L)
img = renderer.render(rendered_scene[0], batch_svbrdf[0])
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img[0].detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/render3.png')
print("size", batch_inputs[0][0].size())
img = batch_inputs[0][0]
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img.detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/render4.png')
print("size", batch_inputs[0][0].size())
normals, diffuse, roughness, specular = utils.unpack_svbrdf(outputs[0])
img = normals
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img.detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/output_normal.png')
img = diffuse
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img.detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/output_diffuse.png')
img = roughness
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img.detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/output_roughness.png')
img = specular
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img.detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/output_specular.png')
print("size", batch_inputs[0][0].size())
normals, diffuse, roughness, specular = utils.unpack_svbrdf(batch_svbrdf[0])
img = normals
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img.detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/target_normal.png')
img = diffuse
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img.detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/target_diffuse.png')
img = roughness
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img.detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/target_roughness.png')
img = specular
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img.detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/target_specular.png')
images = [Image.open(x) for x in ['/content/experiment1/figures/target_normal.png', '/content/experiment1/figures/target_diffuse.png', '/content/experiment1/figures/target_roughness.png', '/content/experiment1/figures/target_specular.png']]
widths, heights = zip(*(i.size for i in images))
total_width = sum(widths)
max_height = max(heights)
new_im = Image.new('RGB', (total_width, max_height))
x_offset = 0
for im in images:
new_im.paste(im, (x_offset,0))
x_offset += im.size[0]
new_im.save('/content/experiment1/figures/target_svbrdf.png')
images = [Image.open(x) for x in ['/content/experiment1/figures/output_normal.png', '/content/experiment1/figures/output_diffuse.png', '/content/experiment1/figures/output_roughness.png', '/content/experiment1/figures/output_specular.png']]
widths, heights = zip(*(i.size for i in images))
total_width = sum(widths)
max_height = max(heights)
new_im = Image.new('RGB', (total_width, max_height))
x_offset = 0
for im in images:
new_im.paste(im, (x_offset,0))
x_offset += im.size[0]
new_im.save('/content/experiment1/figures/output_svbrdf.png')
print("size", batch_inputs[0][0].size())
normals, diffuse, roughness, specular = utils.unpack_svbrdf(outputs[0])
img = normals
fig = plt.figure(frameon=False)
# fig.set_size_inches(w,h)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# print("shape", img.size())
ax.imshow(img.detach().permute(1,2,0), aspect='auto')
fig.savefig('/content/experiment1/figures/output_normal.png')
print("lights3", l)
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.scatter([0.0], [0.0], [0.0], marker='o', c='r')
# v = V.detach().numpy()
ax.scatter(l[:,0], l[:,1], l[:,2], marker='.', c='g')
# ax.scatter(v[:,0], v[:,1], v[:,2], marker='^', c='b')
ax.set_xlim(-8, 8)
ax.set_ylim(-8, 8)
ax.set_zlim(-8., 8.)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
# plt.show()
plt.savefig('/content/experiment1/figures/light.png')
plt.show()
# if epoch % args.save_frequency == 0:
# Checkpoint.save(checkpoint_dir, args, model, optimizer, epoch)
# if epoch % args.validation_frequency == 0 and len(validation_data) > 0:
# model.eval()
# val_loss = 0.0
# batch_count_val = 0
# for batch in validation_dataloader:
# # Construct inputs
# batch_inputs = batch["inputs"].to(device)
# batch_svbrdf = batch["svbrdf"].to(device)
# outputs = model(batch_inputs)
# val_loss += loss_function(outputs, batch_svbrdf).item()
# batch_count_val += 1
# val_loss /= batch_count_val
# print("Epoch {:d}, validation loss: {:f}".format(epoch, val_loss))
# writer.add_scalar("val_loss", val_loss, epoch * batch_count)
# model.train() | en | 0.435227 | # Load the checkpoint # Immediatly restore the arguments if we have a valid checkpoint # Make the result reproducible # Determine the device # Create the model # TODO: Choose a random number for the used input image count if we are training and we don't request it to be fix (see fixImageNb for reference) # model.generator.delete() # model = torch.nn.Sequential( # *list(model.children())[:-8], # ) # print(*list(model.parameters())) # Train as many epochs as specified # Set up the optimizer # TODO: Use betas=(0.5, 0.999) # V.requires_grad = True # print("scene", scene.camera) # TODO: Use scheduler if necessary #scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min') # Set up the loss # Setup statistics stuff # Nuke the stats dir # Clear checkpoint in order to free up some memory # Unique index of this batch # if(epoch_end - epoch < 3): # else: # loss_function = MixedLoss2(loss_renderer, scene[0]) # Construct inputs # Perform a step # Statistics # print(len(lights)) # print("lights2", lights) # l=np.array(lights) # fig.set_size_inches(w,h) # print("shape", img.size()) # fig.set_size_inches(w,h) # print("shape", img.size()) # print("size", batch_inputs.size()) # fig.set_size_inches(w,h) # print("shape", img.size()) # fig.set_size_inches(w,h) # print("shape", img.size()) # fig.set_size_inches(w,h) # print("shape", img.size()) # fig.set_size_inches(w,h) # print("shape", img.size()) # fig.set_size_inches(w,h) # print("shape", img.size()) # fig.set_size_inches(w,h) # print("shape", img.size()) # fig.set_size_inches(w,h) # print("shape", img.size()) # fig.set_size_inches(w,h) # print("shape", img.size()) # fig.set_size_inches(w,h) # print("shape", img.size()) # fig.set_size_inches(w,h) # print("shape", img.size()) # fig.set_size_inches(w,h) # print("shape", img.size()) # v = V.detach().numpy() # ax.scatter(v[:,0], v[:,1], v[:,2], marker='^', c='b') # plt.show() # if epoch % args.save_frequency == 0: # Checkpoint.save(checkpoint_dir, args, model, optimizer, epoch) # if epoch % args.validation_frequency == 0 and len(validation_data) > 0: # model.eval() # val_loss = 0.0 # batch_count_val = 0 # for batch in validation_dataloader: # # Construct inputs # batch_inputs = batch["inputs"].to(device) # batch_svbrdf = batch["svbrdf"].to(device) # outputs = model(batch_inputs) # val_loss += loss_function(outputs, batch_svbrdf).item() # batch_count_val += 1 # val_loss /= batch_count_val # print("Epoch {:d}, validation loss: {:f}".format(epoch, val_loss)) # writer.add_scalar("val_loss", val_loss, epoch * batch_count) # model.train() | 2.09539 | 2 |
src/rekognition_online_action_detection/engines/__init__.py | amazon-research/long-short-term-transformer | 52 | 9545 | <reponame>amazon-research/long-short-term-transformer
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from .engines import do_train, do_inference
from .lstr.lstr_trainer import *
from .lstr.lstr_inference import *
| # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from .engines import do_train, do_inference
from .lstr.lstr_trainer import *
from .lstr.lstr_inference import * | en | 0.655458 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 | 1.085536 | 1 |
nuage_tempest_plugin/tests/api/test_nuage_ports.py | nuagenetworks/nuage-tempest-plugin | 1 | 9546 | <reponame>nuagenetworks/nuage-tempest-plugin
# Copyright 2017 NOKIA
# All Rights Reserved.
from netaddr import IPNetwork
import testtools
from tempest.common import waiters
from tempest.lib import exceptions
from tempest.scenario import manager
from tempest.test import decorators
from nuage_tempest_plugin.lib.test.nuage_test import NuageAdminNetworksTest
from nuage_tempest_plugin.lib.test.nuage_test import NuageBaseTest
from nuage_tempest_plugin.lib.topology import Topology
from nuage_tempest_plugin.lib.utils import constants
from nuage_tempest_plugin.services.nuage_client import NuageRestClient
CONF = Topology.get_conf()
LOG = Topology.get_logger(__name__)
class PortsTest(NuageBaseTest, NuageAdminNetworksTest,
manager.NetworkScenarioTest):
@classmethod
def setup_clients(cls):
super(PortsTest, cls).setup_clients()
cls.vsd_client = NuageRestClient()
def show_port(self, port_id):
"""Wrapper utility that shows a given port."""
body = self.ports_client.show_port(port_id)
return body['port']
def _create_server(self, name, network, port_id=None):
keypair = self.create_keypair()
network = {'uuid': network['id']}
if port_id is not None:
network['port'] = port_id
return self.create_server(
name=name,
networks=[network],
key_name=keypair['name'],
wait_until='ACTIVE')
def _delete_server(self, server_id, clients=None):
if clients is None:
clients = self.os_primary
clients.servers_client.delete_server(server_id)
waiters.wait_for_server_termination(clients.servers_client, server_id)
@decorators.attr(type='smoke')
def test_nuage_dhcp_port_create_check_status(self):
network = self.create_network()
self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=24)
filters = {
'device_owner': 'network:dhcp:nuage',
'network_id': network['id']
}
dhcp_port = self.ports_client.list_ports(**filters)['ports'][0]
self.assertEqual('ACTIVE', dhcp_port['status'])
@decorators.attr(type='smoke')
def test_nuage_dhcp_port_with_router_detach_check_status(self):
network = self.create_network()
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=24)
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"],
cleanup=False)
self.routers_client.remove_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
filters = {
'device_owner': 'network:dhcp:nuage',
'network_id': network['id']
}
dhcp_port = self.ports_client.list_ports(**filters)['ports'][0]
self.assertEqual('ACTIVE', dhcp_port['status'])
@decorators.attr(type='smoke')
def test_nuage_port_create_show_check_status(self):
network = self.create_network()
self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=24)
port = self.create_port(network)
self.assertEqual('DOWN', port['status'])
port = self.show_port(port['id'])
# state has to remain DOWN as long as port is not bound
self.assertEqual('DOWN', port['status'])
@decorators.attr(type='smoke')
def test_nuage_port_create_server_create_delete_check_status(self):
network = self.create_network()
self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=24)
port = self.create_port(network)
server = self._create_server('s1', network, port['id'])
port = self.show_port(port['id'])
self.assertEqual('ACTIVE', port['status'])
self._delete_server(server['id'])
port = self.show_port(port['id'])
self.assertEqual('DOWN', port['status'])
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ips_negative(self):
# Set up resources
# Base resources
if self.is_dhcp_agent_present():
raise self.skipException(
'Cannot run this test case when DHCP agent is enabled')
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
subnet2 = self.create_subnet(network, cidr=IPNetwork("192.168.3.11/24"),
mask_bits=28)
self.assertIsNotNone(subnet2, "Unable to create second subnet")
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "172.16.31.10",
"subnet_id": subnet2["id"]
}
]
# Fail
msg = "Port can't have multiple IPv4 IPs of different subnets"
self.assertRaisesRegex(exceptions.BadRequest,
msg,
self.create_port,
network=network, fixed_ips=fixed_ips)
@testtools.skipIf(Topology.before_nuage('5.4'), 'Unsupported pre-5.4')
def test_nuage_os_managed_subnet_port_create_with_nuage_policy_negative(
self):
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
msg = ("Cannot use VSP policy groups on OS managed subnets,"
" use neutron security groups instead.")
self.assertRaisesRegex(exceptions.BadRequest,
msg,
self.create_port,
network=network,
nuage_policy_groups=['Random_value'])
@testtools.skipIf(Topology.before_nuage('5.4'), 'Unsupported pre-5.4')
def test_nuage_os_managed_subnet_port_update_with_nuage_policy_negative(
self):
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
port = self.create_port(network=network)
self.assertIsNotNone(port, "Unable to create port")
msg = ("Cannot use VSP policy groups on OS managed subnets,"
" use neutron security groups instead.")
self.assertRaisesRegex(exceptions.BadRequest,
msg,
self.update_port,
port=port,
nuage_policy_groups=['Random_value'])
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ips_negative(self):
if self.is_dhcp_agent_present():
raise self.skipException(
'Multiple subnets in a network not supported when DHCP agent '
'is enabled.')
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
subnet2 = self.create_subnet(network, cidr=IPNetwork("192.168.3.11/24"),
mask_bits=28)
self.assertIsNotNone(subnet2, "Unable to create second subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
self.create_router_interface(router_id=router["id"],
subnet_id=subnet2["id"])
# Create port
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to create port on network")
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
port = self.update_port(port=port, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to update port")
self.assertEqual(port["fixed_ips"][0]["ip_address"], "10.0.0.5",
message="The port did not update properly.")
# Update to subnet2 should fail
fixed_ips = [
{
"ip_address": "172.16.31.10",
"subnet_id": subnet2["id"]
}
]
try:
self.update_port(port=port, fixed_ips=fixed_ips)
self.fail("Exception expected when updating to"
" a different subnet!")
except exceptions.BadRequest as e:
if "Updating fixed ip of port" in e._error_string:
pass
else:
# Differentiate between VSD failure and update failure
LOG.debug(e._error_string)
self.fail("A different NuageBadRequest exception"
" was expected for this operation.")
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ips_same_subnet_l2(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.L2_DOMAIN,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.L2_DOMAIN,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ips_same_subnet_l2(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.L2_DOMAIN,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.L2_DOMAIN,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
port = self.update_port(port=port, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.L2_DOMAIN,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ips_same_subnet_l3(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = ['10.0.0.4']
vip_mismatch = False
mac_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
if nuage_vport_vip['MAC'] != port['mac_address']:
mac_mismatch = True
self.assertEqual(vip_mismatch, False)
self.assertEqual(mac_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ips_same_subnet_l3_no_security(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips,
port_security_enabled=False)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = ['10.0.0.4']
vip_mismatch = False
mac_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
if nuage_vport_vip['MAC'] != port['mac_address']:
mac_mismatch = True
self.assertEqual(vip_mismatch, False)
self.assertEqual(mac_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ips_same_subnet_l3_no_security(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.5',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network,
fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
port = self.update_port(port=port, fixed_ips=fixed_ips,
allowed_address_pairs=[],
security_groups=[],
port_security_enabled=False)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = ['10.0.0.4']
vip_mismatch = False
mac_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
if nuage_vport_vip['MAC'] != port['mac_address']:
mac_mismatch = True
self.assertEqual(vip_mismatch, False)
self.assertEqual(mac_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ips_same_subnet_l3(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
port = self.update_port(port=port, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = ['10.0.0.4']
vip_mismatch = False
mac_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
if nuage_vport_vip['MAC'] != port['mac_address']:
mac_mismatch = True
self.assertEqual(vip_mismatch, False)
self.assertEqual(mac_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ips_same_subnet_l2_with_aap(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.50',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.L2_DOMAIN,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.L2_DOMAIN,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED,
nuage_vport[0]['addressSpoofing'])
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ips_same_subnet_l2_with_aap(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.L2_DOMAIN,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.L2_DOMAIN,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.50',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.update_port(port=port,
fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.L2_DOMAIN,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED,
nuage_vport[0]['addressSpoofing'])
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ips_same_subnet_l3_with_aap(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.6',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = ['10.0.0.4', allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ips_same_subnet_l3_with_aap_outside_cidr(
self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '1.1.1.5',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = ['10.0.0.4']
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ips_same_subnet_l3_with_aap(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.6',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.update_port(port=port, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = ['10.0.0.4', allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ips_same_subnet_l3_with_aap_with_vm(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.10',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = [fixed_ips[0]["ip_address"],
allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
self._create_server(name='vm-' + network['name'],
network=network, port_id=port['id'])
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.6",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.7',
'mac_address': 'fe:a0:36:4b:c8:70'},
{'ip_address': '10.0.0.10',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.update_port(port=port, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = [fixed_ips[0]["ip_address"],
allowed_address_pairs[0]['ip_address'],
allowed_address_pairs[1]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_update_app_to_fixed_ips_l3_with_vm(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.6',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = [fixed_ips[0]["ip_address"],
allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
self._create_server(name='vm-' + network['name'],
network=network, port_id=port['id'])
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.6",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.7',
'mac_address': 'fe:a0:36:4b:c8:70'},
{'ip_address': '10.0.0.10',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.update_port(port=port, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = [fixed_ips[0]["ip_address"],
allowed_address_pairs[0]['ip_address'],
allowed_address_pairs[1]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ip_with_vm_and_conflict_with_aap_neg(
self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.10',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = [fixed_ips[0]["ip_address"],
allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
self._create_server(name='vm-' + network['name'],
network=network, port_id=port['id'])
fixed_ips = [
{
"ip_address": "10.0.0.8",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.6',
'mac_address': 'fe:a0:36:4b:c8:70'}]
self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.6",
"subnet_id": subnet["id"]
}
]
# below update will fail with proper roll back
try:
self.update_port(port=port, fixed_ips=fixed_ips)
self.fail("Exception expected when updating to"
" a different subnet!")
except exceptions.BadRequest as e:
if ('Bad request: The IP Address 10.0.0.6 is'
' currently in use by subnet' in e._error_string):
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
pass
else:
# Differentiate between VSD failure and update failure
LOG.debug(e._error_string)
self.fail("A different NuageBadRequest exception"
" was expected for this operation.")
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ip_same_as_aap(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.6",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.6',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = [fixed_ips[0]["ip_address"],
allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
mac_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
if nuage_vport_vip['MAC'] != port['mac_address']:
mac_mismatch = True
self.assertEqual(vip_mismatch, False)
self.assertEqual(mac_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ips_same_as_aap(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.6',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = [fixed_ips[0]["ip_address"],
allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
fixed_ips = [
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.6",
"subnet_id": subnet["id"]
}
]
port = self.update_port(port=port, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = [fixed_ips[0]["ip_address"],
allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
mac_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
if nuage_vport_vip['MAC'] != port['mac_address']:
mac_mismatch = True
self.assertEqual(vip_mismatch, False)
self.assertEqual(mac_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ips_same_subnet_with_aap_router_attach(
self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.6',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.L2_DOMAIN,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.L2_DOMAIN,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED,
nuage_vport[0]['addressSpoofing'])
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = ['10.0.0.4', allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
@decorators.attr(type='smoke')
@testtools.skipIf(Topology.before_nuage('5.4'), 'Unsupported pre-5.4')
def test_nuage_port_update_fixed_ips_same_subnet_with_aap_router_detach(
self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"], cleanup=False)
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.6',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.update_port(port=port, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
valid_vips = ['10.0.0.4', allowed_address_pairs[0]['ip_address']]
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
self.admin_routers_client.remove_router_interface(
router['id'],
subnet_id=subnet['id'])
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.L2_DOMAIN,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.L2_DOMAIN,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED if Topology.from_nuage('5.4')
else constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
@decorators.attr(type='smoke')
@testtools.skipIf(Topology.before_nuage('5.4'), 'Unsupported pre-5.4')
def test_delete_unbound_port_with_hanging_vminterface(self):
# OPENSTACK-2797
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
port = self.create_port(network=network, cleanup=False)
self.addCleanup(self._try_delete,
self.manager.ports_client.delete_port,
port['id'])
# Find vport
l2domain = self.vsd.get_l2domain(by_subnet_id=subnet['id'])
vport = self.vsd.get_vport(l2domain=l2domain, by_port_id=port['id'])
# Create "Fake" VM interface to simulate following behavior:
# -> Port is being bound -> VM created -> port deleted ->
# Port not bound but leftover VM on VSD
vminterface = self.vsd.vspk.NUVMInterface(
name='test-fip-vm', vport_id=vport.id,
external_id=self.vsd.external_id(port['id']),
mac='E6:04:AA:7A:AA:86', ip_address='10.0.0.10')
vm = self.vsd.vspk.NUVM(name='test-port-delete-vm',
uuid='1339f7f4-f7a0-445f-b257-8dbfaf0d6fc8',
external_id=self.vsd.external_id(
'1339f7f4-f7a0-445f-b257-8dbfaf0d6fc8'),
interfaces=[vminterface])
# Impersonate tenant user for appropriate permissions on VM
self.vsd.session().impersonate(port['tenant_id'],
self.default_netpartition_name)
self.vsd.session().user.create_child(vm)
self.vsd.session().stop_impersonate()
# Delete port, VM should be deleted in this request
self.delete_port(port)
# Verify that vport is deleted
vport = self.vsd.get_vport(l2domain=l2domain, by_port_id=port['id'])
self.assertIsNone(vport, 'Vport not deleted by Port delete statement')
| # Copyright 2017 NOKIA
# All Rights Reserved.
from netaddr import IPNetwork
import testtools
from tempest.common import waiters
from tempest.lib import exceptions
from tempest.scenario import manager
from tempest.test import decorators
from nuage_tempest_plugin.lib.test.nuage_test import NuageAdminNetworksTest
from nuage_tempest_plugin.lib.test.nuage_test import NuageBaseTest
from nuage_tempest_plugin.lib.topology import Topology
from nuage_tempest_plugin.lib.utils import constants
from nuage_tempest_plugin.services.nuage_client import NuageRestClient
CONF = Topology.get_conf()
LOG = Topology.get_logger(__name__)
class PortsTest(NuageBaseTest, NuageAdminNetworksTest,
manager.NetworkScenarioTest):
@classmethod
def setup_clients(cls):
super(PortsTest, cls).setup_clients()
cls.vsd_client = NuageRestClient()
def show_port(self, port_id):
"""Wrapper utility that shows a given port."""
body = self.ports_client.show_port(port_id)
return body['port']
def _create_server(self, name, network, port_id=None):
keypair = self.create_keypair()
network = {'uuid': network['id']}
if port_id is not None:
network['port'] = port_id
return self.create_server(
name=name,
networks=[network],
key_name=keypair['name'],
wait_until='ACTIVE')
def _delete_server(self, server_id, clients=None):
if clients is None:
clients = self.os_primary
clients.servers_client.delete_server(server_id)
waiters.wait_for_server_termination(clients.servers_client, server_id)
@decorators.attr(type='smoke')
def test_nuage_dhcp_port_create_check_status(self):
network = self.create_network()
self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=24)
filters = {
'device_owner': 'network:dhcp:nuage',
'network_id': network['id']
}
dhcp_port = self.ports_client.list_ports(**filters)['ports'][0]
self.assertEqual('ACTIVE', dhcp_port['status'])
@decorators.attr(type='smoke')
def test_nuage_dhcp_port_with_router_detach_check_status(self):
network = self.create_network()
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=24)
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"],
cleanup=False)
self.routers_client.remove_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
filters = {
'device_owner': 'network:dhcp:nuage',
'network_id': network['id']
}
dhcp_port = self.ports_client.list_ports(**filters)['ports'][0]
self.assertEqual('ACTIVE', dhcp_port['status'])
@decorators.attr(type='smoke')
def test_nuage_port_create_show_check_status(self):
network = self.create_network()
self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=24)
port = self.create_port(network)
self.assertEqual('DOWN', port['status'])
port = self.show_port(port['id'])
# state has to remain DOWN as long as port is not bound
self.assertEqual('DOWN', port['status'])
@decorators.attr(type='smoke')
def test_nuage_port_create_server_create_delete_check_status(self):
network = self.create_network()
self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=24)
port = self.create_port(network)
server = self._create_server('s1', network, port['id'])
port = self.show_port(port['id'])
self.assertEqual('ACTIVE', port['status'])
self._delete_server(server['id'])
port = self.show_port(port['id'])
self.assertEqual('DOWN', port['status'])
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ips_negative(self):
# Set up resources
# Base resources
if self.is_dhcp_agent_present():
raise self.skipException(
'Cannot run this test case when DHCP agent is enabled')
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
subnet2 = self.create_subnet(network, cidr=IPNetwork("192.168.3.11/24"),
mask_bits=28)
self.assertIsNotNone(subnet2, "Unable to create second subnet")
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "172.16.31.10",
"subnet_id": subnet2["id"]
}
]
# Fail
msg = "Port can't have multiple IPv4 IPs of different subnets"
self.assertRaisesRegex(exceptions.BadRequest,
msg,
self.create_port,
network=network, fixed_ips=fixed_ips)
@testtools.skipIf(Topology.before_nuage('5.4'), 'Unsupported pre-5.4')
def test_nuage_os_managed_subnet_port_create_with_nuage_policy_negative(
self):
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
msg = ("Cannot use VSP policy groups on OS managed subnets,"
" use neutron security groups instead.")
self.assertRaisesRegex(exceptions.BadRequest,
msg,
self.create_port,
network=network,
nuage_policy_groups=['Random_value'])
@testtools.skipIf(Topology.before_nuage('5.4'), 'Unsupported pre-5.4')
def test_nuage_os_managed_subnet_port_update_with_nuage_policy_negative(
self):
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
port = self.create_port(network=network)
self.assertIsNotNone(port, "Unable to create port")
msg = ("Cannot use VSP policy groups on OS managed subnets,"
" use neutron security groups instead.")
self.assertRaisesRegex(exceptions.BadRequest,
msg,
self.update_port,
port=port,
nuage_policy_groups=['Random_value'])
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ips_negative(self):
if self.is_dhcp_agent_present():
raise self.skipException(
'Multiple subnets in a network not supported when DHCP agent '
'is enabled.')
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
subnet2 = self.create_subnet(network, cidr=IPNetwork("192.168.3.11/24"),
mask_bits=28)
self.assertIsNotNone(subnet2, "Unable to create second subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
self.create_router_interface(router_id=router["id"],
subnet_id=subnet2["id"])
# Create port
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to create port on network")
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
port = self.update_port(port=port, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to update port")
self.assertEqual(port["fixed_ips"][0]["ip_address"], "10.0.0.5",
message="The port did not update properly.")
# Update to subnet2 should fail
fixed_ips = [
{
"ip_address": "172.16.31.10",
"subnet_id": subnet2["id"]
}
]
try:
self.update_port(port=port, fixed_ips=fixed_ips)
self.fail("Exception expected when updating to"
" a different subnet!")
except exceptions.BadRequest as e:
if "Updating fixed ip of port" in e._error_string:
pass
else:
# Differentiate between VSD failure and update failure
LOG.debug(e._error_string)
self.fail("A different NuageBadRequest exception"
" was expected for this operation.")
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ips_same_subnet_l2(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.L2_DOMAIN,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.L2_DOMAIN,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ips_same_subnet_l2(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.L2_DOMAIN,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.L2_DOMAIN,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
port = self.update_port(port=port, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.L2_DOMAIN,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ips_same_subnet_l3(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = ['10.0.0.4']
vip_mismatch = False
mac_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
if nuage_vport_vip['MAC'] != port['mac_address']:
mac_mismatch = True
self.assertEqual(vip_mismatch, False)
self.assertEqual(mac_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ips_same_subnet_l3_no_security(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips,
port_security_enabled=False)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = ['10.0.0.4']
vip_mismatch = False
mac_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
if nuage_vport_vip['MAC'] != port['mac_address']:
mac_mismatch = True
self.assertEqual(vip_mismatch, False)
self.assertEqual(mac_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ips_same_subnet_l3_no_security(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.5',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network,
fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
port = self.update_port(port=port, fixed_ips=fixed_ips,
allowed_address_pairs=[],
security_groups=[],
port_security_enabled=False)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = ['10.0.0.4']
vip_mismatch = False
mac_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
if nuage_vport_vip['MAC'] != port['mac_address']:
mac_mismatch = True
self.assertEqual(vip_mismatch, False)
self.assertEqual(mac_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ips_same_subnet_l3(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
port = self.update_port(port=port, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = ['10.0.0.4']
vip_mismatch = False
mac_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
if nuage_vport_vip['MAC'] != port['mac_address']:
mac_mismatch = True
self.assertEqual(vip_mismatch, False)
self.assertEqual(mac_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ips_same_subnet_l2_with_aap(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.50',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.L2_DOMAIN,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.L2_DOMAIN,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED,
nuage_vport[0]['addressSpoofing'])
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ips_same_subnet_l2_with_aap(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.L2_DOMAIN,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.L2_DOMAIN,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.50',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.update_port(port=port,
fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.L2_DOMAIN,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED,
nuage_vport[0]['addressSpoofing'])
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ips_same_subnet_l3_with_aap(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.6',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = ['10.0.0.4', allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ips_same_subnet_l3_with_aap_outside_cidr(
self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '1.1.1.5',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = ['10.0.0.4']
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ips_same_subnet_l3_with_aap(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.6',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.update_port(port=port, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = ['10.0.0.4', allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ips_same_subnet_l3_with_aap_with_vm(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.10',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = [fixed_ips[0]["ip_address"],
allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
self._create_server(name='vm-' + network['name'],
network=network, port_id=port['id'])
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.6",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.7',
'mac_address': 'fe:a0:36:4b:c8:70'},
{'ip_address': '10.0.0.10',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.update_port(port=port, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = [fixed_ips[0]["ip_address"],
allowed_address_pairs[0]['ip_address'],
allowed_address_pairs[1]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_update_app_to_fixed_ips_l3_with_vm(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.6',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = [fixed_ips[0]["ip_address"],
allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
self._create_server(name='vm-' + network['name'],
network=network, port_id=port['id'])
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.6",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.7',
'mac_address': 'fe:a0:36:4b:c8:70'},
{'ip_address': '10.0.0.10',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.update_port(port=port, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = [fixed_ips[0]["ip_address"],
allowed_address_pairs[0]['ip_address'],
allowed_address_pairs[1]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ip_with_vm_and_conflict_with_aap_neg(
self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.10',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = [fixed_ips[0]["ip_address"],
allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
self._create_server(name='vm-' + network['name'],
network=network, port_id=port['id'])
fixed_ips = [
{
"ip_address": "10.0.0.8",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.6',
'mac_address': 'fe:a0:36:4b:c8:70'}]
self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.6",
"subnet_id": subnet["id"]
}
]
# below update will fail with proper roll back
try:
self.update_port(port=port, fixed_ips=fixed_ips)
self.fail("Exception expected when updating to"
" a different subnet!")
except exceptions.BadRequest as e:
if ('Bad request: The IP Address 10.0.0.6 is'
' currently in use by subnet' in e._error_string):
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
pass
else:
# Differentiate between VSD failure and update failure
LOG.debug(e._error_string)
self.fail("A different NuageBadRequest exception"
" was expected for this operation.")
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ip_same_as_aap(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.6",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.6',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = [fixed_ips[0]["ip_address"],
allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
mac_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
if nuage_vport_vip['MAC'] != port['mac_address']:
mac_mismatch = True
self.assertEqual(vip_mismatch, False)
self.assertEqual(mac_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ips_same_as_aap(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.6',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = [fixed_ips[0]["ip_address"],
allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
fixed_ips = [
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.6",
"subnet_id": subnet["id"]
}
]
port = self.update_port(port=port, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = [fixed_ips[0]["ip_address"],
allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
mac_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
if nuage_vport_vip['MAC'] != port['mac_address']:
mac_mismatch = True
self.assertEqual(vip_mismatch, False)
self.assertEqual(mac_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ips_same_subnet_with_aap_router_attach(
self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.6',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.L2_DOMAIN,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.L2_DOMAIN,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED,
nuage_vport[0]['addressSpoofing'])
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = ['10.0.0.4', allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
@decorators.attr(type='smoke')
@testtools.skipIf(Topology.before_nuage('5.4'), 'Unsupported pre-5.4')
def test_nuage_port_update_fixed_ips_same_subnet_with_aap_router_detach(
self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"], cleanup=False)
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.6',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.update_port(port=port, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
valid_vips = ['10.0.0.4', allowed_address_pairs[0]['ip_address']]
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
self.admin_routers_client.remove_router_interface(
router['id'],
subnet_id=subnet['id'])
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.L2_DOMAIN,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.L2_DOMAIN,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED if Topology.from_nuage('5.4')
else constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
@decorators.attr(type='smoke')
@testtools.skipIf(Topology.before_nuage('5.4'), 'Unsupported pre-5.4')
def test_delete_unbound_port_with_hanging_vminterface(self):
# OPENSTACK-2797
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
port = self.create_port(network=network, cleanup=False)
self.addCleanup(self._try_delete,
self.manager.ports_client.delete_port,
port['id'])
# Find vport
l2domain = self.vsd.get_l2domain(by_subnet_id=subnet['id'])
vport = self.vsd.get_vport(l2domain=l2domain, by_port_id=port['id'])
# Create "Fake" VM interface to simulate following behavior:
# -> Port is being bound -> VM created -> port deleted ->
# Port not bound but leftover VM on VSD
vminterface = self.vsd.vspk.NUVMInterface(
name='test-fip-vm', vport_id=vport.id,
external_id=self.vsd.external_id(port['id']),
mac='E6:04:AA:7A:AA:86', ip_address='10.0.0.10')
vm = self.vsd.vspk.NUVM(name='test-port-delete-vm',
uuid='1339f7f4-f7a0-445f-b257-8dbfaf0d6fc8',
external_id=self.vsd.external_id(
'1339f7f4-f7a0-445f-b257-8dbfaf0d6fc8'),
interfaces=[vminterface])
# Impersonate tenant user for appropriate permissions on VM
self.vsd.session().impersonate(port['tenant_id'],
self.default_netpartition_name)
self.vsd.session().user.create_child(vm)
self.vsd.session().stop_impersonate()
# Delete port, VM should be deleted in this request
self.delete_port(port)
# Verify that vport is deleted
vport = self.vsd.get_vport(l2domain=l2domain, by_port_id=port['id'])
self.assertIsNone(vport, 'Vport not deleted by Port delete statement') | en | 0.811049 | # Copyright 2017 NOKIA # All Rights Reserved. Wrapper utility that shows a given port. # state has to remain DOWN as long as port is not bound # Set up resources # Base resources # Fail # Set up resources # Base resources # Attach subnet # Create port # update within subnet should succeed # Update to subnet2 should fail # Differentiate between VSD failure and update failure # Set up resources # Base resources # Set up resources # Base resources # update within subnet should succeed # Set up resources # Base resources # Attach subnet # Set up resources # Base resources # Attach subnet # Set up resources # Base resources # Attach subnet # update within subnet should succeed # Set up resources # Base resources # Attach subnet # update within subnet should succeed # Set up resources # Base resources # Set up resources # Base resources # update within subnet should succeed # Set up resources # Base resources # Attach subnet # Set up resources # Base resources # Attach subnet # Set up resources # Base resources # Attach subnet # update within subnet should succeed # Set up resources # Base resources # Attach subnet # update within subnet should succeed # Set up resources # Base resources # Attach subnet # update within subnet should succeed # Set up resources # Base resources # Attach subnet # update within subnet should succeed # below update will fail with proper roll back # Differentiate between VSD failure and update failure # Set up resources # Base resources # Attach subnet # Set up resources # Base resources # Attach subnet # Set up resources # Base resources # Attach subnet # Set up resources # Base resources # Attach subnet # update within subnet should succeed # OPENSTACK-2797 # Find vport # Create "Fake" VM interface to simulate following behavior: # -> Port is being bound -> VM created -> port deleted -> # Port not bound but leftover VM on VSD # Impersonate tenant user for appropriate permissions on VM # Delete port, VM should be deleted in this request # Verify that vport is deleted | 1.907408 | 2 |
advent_of_code/2019/11_space_police/aoc_2019_11.py | thanosa/coding-challenges | 0 | 9547 | ''' Advent of code 2019 Day 11 - Space police '''
from typing import NamedTuple
from enum import Enum
INPUT_FILE=__file__.replace('.py', '.dat')
def to_number(digits: list) -> int:
return int(''.join(map(str, digits)))
def to_list(number: int) -> list:
return [int(i) for i in str(number)]
def get_modes(instruction: int, parameter_count: int = 3) -> list:
params = instruction // 100
string = str(params).zfill(parameter_count)
return list(reversed(to_list(string)))
def get_dict(lst: list):
return {k: v for k,v in enumerate(lst)}
def get_value(code: dict, key: int):
if key in code:
return code[key]
else:
return 0
def run_program(code: dict, inputs: list) -> int:
code = code.copy()
output = 0
pos = 0
base = 0
counter = 0
while (code[pos] % 100) != 99:
instruction = code[pos + 0]
params = []
for i in range(3):
try:
param = code[pos + 1 + i]
except:
param = None
params.append(param)
operation = instruction % 100
modes = get_modes(instruction)
values = [0] * 2
# Addition
if operation == 1:
for i in range(2):
if modes[i] == 0:
values[i] = get_value(code, params[i])
elif modes[i] == 1:
values[i] = params[i]
elif modes[i] == 2:
values[i] = get_value(code, params[i] + base)
if modes[2] == 0:
code[params[2]] = values[0] + values[1]
else:
code[params[2] + base] = values[0] + values[1]
pos += 4
# Multiplication
elif operation == 2:
for i in range(2):
if modes[i] == 0:
values[i] = get_value(code, params[i])
elif modes[i] == 1:
values[i] = params[i]
elif modes[i] == 2:
values[i] = get_value(code, params[i] + base)
if modes[2] == 0:
code[params[2]] = values[0] * values[1]
else:
code[params[2] + base] = values[0] * values[1]
pos += 4
# Store input
elif operation == 3:
if modes[0] == 0:
code[params[0]] = inputs.pop(0)
elif modes[0] == 2:
code[params[0] + base] = inputs.pop(0)
else:
raise RuntimeError("fail")
pos += 2
# Get output
elif operation == 4:
if modes[0] == 0:
values[0] = get_value(code, params[0])
elif modes[0] == 1:
values[0] = params[0]
elif modes[0] == 2:
values[0] = get_value(code, params[0] + base)
yield values[0]
pos += 2
# Jump if true
elif operation == 5:
for i in range(2):
if modes[i] == 0:
values[i] = get_value(code, params[i])
elif modes[i] == 1:
values[i] = params[i]
elif modes[i] == 2:
values[i] = get_value(code, params[i] + base)
if values[0] != 0:
pos = values[1]
else:
pos += 3
# Jump if false
elif operation == 6:
for i in range(2):
if modes[i] == 0:
values[i] = get_value(code, params[i])
elif modes[i] == 1:
values[i] = params[i]
elif modes[i] == 2:
values[i] = get_value(code, params[i] + base)
if values[0] == 0:
pos = values[1]
else:
pos += 3
# Less than
elif operation == 7:
for i in range(2):
if modes[i] == 0:
values[i] = get_value(code, params[i])
elif modes[i] == 1:
values[i] = params[i]
elif modes[i] == 2:
values[i] = get_value(code, params[i] + base)
if values[0] < values[1]:
if modes[2] == 0:
code[params[2]] = 1
else:
code[params[2] + base] = 1
else:
if modes[2] == 0:
code[params[2]] = 0
else:
code[params[2] + base] = 0
pos += 4
# Equals
elif operation == 8:
for i in range(2):
if modes[i] == 0:
values[i] = get_value(code, params[i])
elif modes[i] == 1:
values[i] = params[i]
elif modes[i] == 2:
values[i] = get_value(code, params[i] + base)
if values[0] == values[1]:
if modes[2] == 0:
code[params[2]] = 1
else:
code[params[2] + base] = 1
else:
if modes[2] == 0:
code[params[2]] = 0
else:
code[params[2] + base] = 0
pos += 4
# Relative base shift
elif operation == 9:
i = 0
if modes[i] == 0:
values[i] = get_value(code, params[i])
elif modes[i] == 1:
values[i] = params[i]
elif modes[i] == 2:
values[i] = get_value(code, params[i] + base)
base += values[i]
pos += 2
else:
raise RuntimeError(f"error in operation: {pos}")
class Point(NamedTuple):
X: int
Y: int
class Direction(Enum):
UP = 0
LEFT = 1
DOWN = 2
RIGHT = 3
def run_robot(code: dict, start_on_white: bool = False) -> int:
DIRECTIONS_COUNT = 4
direction = Direction.UP
panels = {}
seen = set()
color = []
position = Point(0, 0)
if start_on_white:
panels[position] = 1
finished = False
brain = run_program(code, color)
while True:
try:
# Sense the color on the point. Default is black (0).
if position in panels:
color.append(panels[position])
else:
color.append(0)
paint = next(brain)
rotation = next(brain)
if paint == "" or rotation == "":
raise RuntimeError(f"Failed to read paint: {paint}, rotation: {rotation}")
# Paints the panel.
panels[position] = paint
# Keeps track of all visited points.
seen.add(position)
# Turn left (0) or right (1).
if rotation == 0:
direction = Direction((direction.value + 1) % DIRECTIONS_COUNT)
elif rotation == 1:
direction = Direction((direction.value - 1) % DIRECTIONS_COUNT)
# Move a step forward.
if direction == Direction.UP:
position = Point(position.X, position.Y - 1)
elif direction == Direction.LEFT:
position = Point(position.X - 1, position.Y)
elif direction == Direction.DOWN:
position = Point(position.X, position.Y + 1)
elif direction == Direction.RIGHT:
position = Point(position.X + 1, position.Y)
else:
raise RuntimeError(f"Wrong direction: {direction}")
except StopIteration:
return panels
def print_panels(panels: dict):
min_x = min(panels, key=lambda panel: panel.X).X
max_x = max(panels, key=lambda panel: panel.X).X
min_y = min(panels, key=lambda panel: panel.Y).Y
max_y = max(panels, key=lambda panel: panel.Y).Y
print(f"{min_x} {max_x} {min_y} {max_y}")
for y in range(min_y, max_y + 1):
row = []
for x in range(min_x, max_x + 1):
point = Point(x, y)
if point in panels:
if panels[Point(x, y)] == 1:
row.append("#")
else:
row.append(" ")
else:
row.append(" ")
print(''.join(row))
# Read the input
with open(INPUT_FILE) as f:
input_dict = get_dict(list(map(int, f.read().strip().split(','))))
# Part 1 solution
panels_count = len(run_robot(input_dict))
print(f"Part 1: {panels_count}")
# Part 2 solution
panels = run_robot(input_dict, True)
print(f"Part 2:")
print_panels(panels)
| ''' Advent of code 2019 Day 11 - Space police '''
from typing import NamedTuple
from enum import Enum
INPUT_FILE=__file__.replace('.py', '.dat')
def to_number(digits: list) -> int:
return int(''.join(map(str, digits)))
def to_list(number: int) -> list:
return [int(i) for i in str(number)]
def get_modes(instruction: int, parameter_count: int = 3) -> list:
params = instruction // 100
string = str(params).zfill(parameter_count)
return list(reversed(to_list(string)))
def get_dict(lst: list):
return {k: v for k,v in enumerate(lst)}
def get_value(code: dict, key: int):
if key in code:
return code[key]
else:
return 0
def run_program(code: dict, inputs: list) -> int:
code = code.copy()
output = 0
pos = 0
base = 0
counter = 0
while (code[pos] % 100) != 99:
instruction = code[pos + 0]
params = []
for i in range(3):
try:
param = code[pos + 1 + i]
except:
param = None
params.append(param)
operation = instruction % 100
modes = get_modes(instruction)
values = [0] * 2
# Addition
if operation == 1:
for i in range(2):
if modes[i] == 0:
values[i] = get_value(code, params[i])
elif modes[i] == 1:
values[i] = params[i]
elif modes[i] == 2:
values[i] = get_value(code, params[i] + base)
if modes[2] == 0:
code[params[2]] = values[0] + values[1]
else:
code[params[2] + base] = values[0] + values[1]
pos += 4
# Multiplication
elif operation == 2:
for i in range(2):
if modes[i] == 0:
values[i] = get_value(code, params[i])
elif modes[i] == 1:
values[i] = params[i]
elif modes[i] == 2:
values[i] = get_value(code, params[i] + base)
if modes[2] == 0:
code[params[2]] = values[0] * values[1]
else:
code[params[2] + base] = values[0] * values[1]
pos += 4
# Store input
elif operation == 3:
if modes[0] == 0:
code[params[0]] = inputs.pop(0)
elif modes[0] == 2:
code[params[0] + base] = inputs.pop(0)
else:
raise RuntimeError("fail")
pos += 2
# Get output
elif operation == 4:
if modes[0] == 0:
values[0] = get_value(code, params[0])
elif modes[0] == 1:
values[0] = params[0]
elif modes[0] == 2:
values[0] = get_value(code, params[0] + base)
yield values[0]
pos += 2
# Jump if true
elif operation == 5:
for i in range(2):
if modes[i] == 0:
values[i] = get_value(code, params[i])
elif modes[i] == 1:
values[i] = params[i]
elif modes[i] == 2:
values[i] = get_value(code, params[i] + base)
if values[0] != 0:
pos = values[1]
else:
pos += 3
# Jump if false
elif operation == 6:
for i in range(2):
if modes[i] == 0:
values[i] = get_value(code, params[i])
elif modes[i] == 1:
values[i] = params[i]
elif modes[i] == 2:
values[i] = get_value(code, params[i] + base)
if values[0] == 0:
pos = values[1]
else:
pos += 3
# Less than
elif operation == 7:
for i in range(2):
if modes[i] == 0:
values[i] = get_value(code, params[i])
elif modes[i] == 1:
values[i] = params[i]
elif modes[i] == 2:
values[i] = get_value(code, params[i] + base)
if values[0] < values[1]:
if modes[2] == 0:
code[params[2]] = 1
else:
code[params[2] + base] = 1
else:
if modes[2] == 0:
code[params[2]] = 0
else:
code[params[2] + base] = 0
pos += 4
# Equals
elif operation == 8:
for i in range(2):
if modes[i] == 0:
values[i] = get_value(code, params[i])
elif modes[i] == 1:
values[i] = params[i]
elif modes[i] == 2:
values[i] = get_value(code, params[i] + base)
if values[0] == values[1]:
if modes[2] == 0:
code[params[2]] = 1
else:
code[params[2] + base] = 1
else:
if modes[2] == 0:
code[params[2]] = 0
else:
code[params[2] + base] = 0
pos += 4
# Relative base shift
elif operation == 9:
i = 0
if modes[i] == 0:
values[i] = get_value(code, params[i])
elif modes[i] == 1:
values[i] = params[i]
elif modes[i] == 2:
values[i] = get_value(code, params[i] + base)
base += values[i]
pos += 2
else:
raise RuntimeError(f"error in operation: {pos}")
class Point(NamedTuple):
X: int
Y: int
class Direction(Enum):
UP = 0
LEFT = 1
DOWN = 2
RIGHT = 3
def run_robot(code: dict, start_on_white: bool = False) -> int:
DIRECTIONS_COUNT = 4
direction = Direction.UP
panels = {}
seen = set()
color = []
position = Point(0, 0)
if start_on_white:
panels[position] = 1
finished = False
brain = run_program(code, color)
while True:
try:
# Sense the color on the point. Default is black (0).
if position in panels:
color.append(panels[position])
else:
color.append(0)
paint = next(brain)
rotation = next(brain)
if paint == "" or rotation == "":
raise RuntimeError(f"Failed to read paint: {paint}, rotation: {rotation}")
# Paints the panel.
panels[position] = paint
# Keeps track of all visited points.
seen.add(position)
# Turn left (0) or right (1).
if rotation == 0:
direction = Direction((direction.value + 1) % DIRECTIONS_COUNT)
elif rotation == 1:
direction = Direction((direction.value - 1) % DIRECTIONS_COUNT)
# Move a step forward.
if direction == Direction.UP:
position = Point(position.X, position.Y - 1)
elif direction == Direction.LEFT:
position = Point(position.X - 1, position.Y)
elif direction == Direction.DOWN:
position = Point(position.X, position.Y + 1)
elif direction == Direction.RIGHT:
position = Point(position.X + 1, position.Y)
else:
raise RuntimeError(f"Wrong direction: {direction}")
except StopIteration:
return panels
def print_panels(panels: dict):
min_x = min(panels, key=lambda panel: panel.X).X
max_x = max(panels, key=lambda panel: panel.X).X
min_y = min(panels, key=lambda panel: panel.Y).Y
max_y = max(panels, key=lambda panel: panel.Y).Y
print(f"{min_x} {max_x} {min_y} {max_y}")
for y in range(min_y, max_y + 1):
row = []
for x in range(min_x, max_x + 1):
point = Point(x, y)
if point in panels:
if panels[Point(x, y)] == 1:
row.append("#")
else:
row.append(" ")
else:
row.append(" ")
print(''.join(row))
# Read the input
with open(INPUT_FILE) as f:
input_dict = get_dict(list(map(int, f.read().strip().split(','))))
# Part 1 solution
panels_count = len(run_robot(input_dict))
print(f"Part 1: {panels_count}")
# Part 2 solution
panels = run_robot(input_dict, True)
print(f"Part 2:")
print_panels(panels)
| en | 0.538181 | Advent of code 2019 Day 11 - Space police # Addition # Multiplication # Store input # Get output # Jump if true # Jump if false # Less than # Equals # Relative base shift # Sense the color on the point. Default is black (0). # Paints the panel. # Keeps track of all visited points. # Turn left (0) or right (1). # Move a step forward. # Read the input # Part 1 solution # Part 2 solution | 3.399508 | 3 |
nicos_mlz/mira/setups/mezeiflip.py | jkrueger1/nicos | 12 | 9548 | description = 'Mezei spin flipper using TTI power supply'
group = 'optional'
tango_base = 'tango://miractrl.mira.frm2:10000/mira/'
devices = dict(
dct1 = device('nicos.devices.entangle.PowerSupply',
description = 'current in first channel of supply (flipper current)',
tangodevice = tango_base + 'tti1/out1',
timeout = 1,
precision = 0.01,
),
dct2 = device('nicos.devices.entangle.PowerSupply',
description = 'current in second channel of supply (compensation current)',
tangodevice = tango_base + 'tti1/out2',
timeout = 1,
precision = 0.01,
),
flip = device('nicos.devices.polarized.MezeiFlipper',
description = 'Mezei flipper before sample (in shielding table)',
flip = 'dct1',
corr = 'dct2',
),
)
| description = 'Mezei spin flipper using TTI power supply'
group = 'optional'
tango_base = 'tango://miractrl.mira.frm2:10000/mira/'
devices = dict(
dct1 = device('nicos.devices.entangle.PowerSupply',
description = 'current in first channel of supply (flipper current)',
tangodevice = tango_base + 'tti1/out1',
timeout = 1,
precision = 0.01,
),
dct2 = device('nicos.devices.entangle.PowerSupply',
description = 'current in second channel of supply (compensation current)',
tangodevice = tango_base + 'tti1/out2',
timeout = 1,
precision = 0.01,
),
flip = device('nicos.devices.polarized.MezeiFlipper',
description = 'Mezei flipper before sample (in shielding table)',
flip = 'dct1',
corr = 'dct2',
),
)
| none | 1 | 2.048685 | 2 |
|
mars/learn/cluster/_k_means_init.py | hxri/mars | 2,413 | 9549 | <filename>mars/learn/cluster/_k_means_init.py<gh_stars>1000+
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ... import opcodes
from ... import tensor as mt
from ...core import OutputType, recursive_tile
from ...core.operand import OperandStage
from ...serialization.serializables import KeyField, Int32Field
from ...tensor.array_utils import as_same_device, device
from ...tensor.core import TensorOrder
from ...tensor.random import RandomStateField
from ...utils import has_unknown_shape
from ..metrics import euclidean_distances
from ..operands import LearnOperand, LearnOperandMixin
def _kmeans_plus_plus_init(X,
x_squared_norms,
random_state,
n_clusters: int,
n_local_trials: int = None):
n_samples, n_features = X.shape
centers = mt.empty((n_clusters, n_features), dtype=X.dtype)
assert x_squared_norms is not None, 'x_squared_norms None in _k_init'
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly
center_id = random_state.randint(n_samples)
if X.issparse(): # pragma: no cover
centers[0] = X[center_id].todense()
else:
centers[0] = X[center_id]
# Initialize list of closest distances and calculate current potential
closest_dist_sq = euclidean_distances(
centers[0, mt.newaxis], X, Y_norm_squared=x_squared_norms,
squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = mt.searchsorted(closest_dist_sq.cumsum(),
rand_vals)
# XXX: numerical imprecision can result in a candidate_id out of range
candidate_ids = mt.clip(candidate_ids, None, closest_dist_sq.size - 1)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# update closest distances squared and potential for each candidate
distance_to_candidates = mt.minimum(closest_dist_sq, distance_to_candidates)
candidates_pot = distance_to_candidates.sum(axis=1)
# Decide which candidate is the best
best_candidate = mt.argmin(candidates_pot)
current_pot = candidates_pot[best_candidate]
closest_dist_sq = distance_to_candidates[best_candidate]
best_candidate = candidate_ids[best_candidate]
# Permanently add best center candidate found in local tries
if X.issparse(): # pragma: no cover
c_center = X[best_candidate].todense()
else:
c_center = X[best_candidate]
centers[c] = c_center
return centers
class KMeansPlusPlusInit(LearnOperand, LearnOperandMixin):
_op_type_ = opcodes.KMEANS_PLUS_PLUS_INIT
_x = KeyField('x')
_n_clusters = Int32Field('n_clusters')
_x_squared_norms = KeyField('x_squared_norms')
_state = RandomStateField('state')
_n_local_trials = Int32Field('n_local_trials')
def __init__(self, x=None, n_clusters=None, x_squared_norms=None,
state=None, n_local_trials=None, output_types=None, **kw):
super().__init__(_x=x, _n_clusters=n_clusters, _x_squared_norms=x_squared_norms,
_state=state, _n_local_trials=n_local_trials,
_output_types=output_types, **kw)
if self._output_types is None:
self._output_types = [OutputType.tensor]
@property
def x(self):
return self._x
@property
def n_clusters(self):
return self._n_clusters
@property
def x_squared_norms(self):
return self._x_squared_norms
@property
def state(self):
return self._state
@property
def n_local_trials(self):
return self._n_local_trials
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
self._x = self._inputs[0]
self._x_squared_norms = self._inputs[-1]
def __call__(self):
inputs = [self._x, self._x_squared_norms]
kw = {
'shape': (self._n_clusters, self._x.shape[1]),
'dtype': self._x.dtype,
'order': TensorOrder.C_ORDER
}
return self.new_tileable(inputs, kws=[kw])
@classmethod
def _tile_one_chunk(cls, op: "KMeansPlusPlusInit"):
out = op.outputs[0]
chunk_op = op.copy().reset_key()
chunk_kw = out.params.copy()
chunk_kw['index'] = (0, 0)
chunk_inputs = [op.x.chunks[0], op.x_squared_norms.chunks[0]]
chunk = chunk_op.new_chunk(chunk_inputs, kws=[chunk_kw])
kw = out.params
kw['chunks'] = [chunk]
kw['nsplits'] = tuple((s,) for s in out.shape)
new_op = op.copy()
return new_op.new_tileables(op.inputs, kws=[kw])
@classmethod
def tile(cls, op: "KMeansPlusPlusInit"):
if len(op.x.chunks) == 1:
assert len(op.x_squared_norms.chunks) == 1
return cls._tile_one_chunk(op)
else:
return (yield from cls._tile_k_init(op))
@classmethod
def _tile_k_init(cls, op: "KMeansPlusPlusInit"):
X = op.x
n_clusters = op.n_clusters
x_squared_norms = op.x_squared_norms
random_state = op.state
n_local_trials = op.n_local_trials
centers = _kmeans_plus_plus_init(X, x_squared_norms, random_state,
n_clusters, n_local_trials)
return (yield from recursive_tile(centers))
@classmethod
def execute(cls, ctx, op: "KMeansPlusPlusInit"):
try:
from sklearn.cluster._kmeans import _kmeans_plusplus
except ImportError: # pragma: no cover
try:
from sklearn.cluster._kmeans import _k_init
except ImportError:
from sklearn.cluster.k_means_ import _k_init
def _kmeans_plusplus(*args, **kwargs):
return _k_init(*args, **kwargs), None
(x, x_squared_norms), device_id, _ = as_same_device(
[ctx[inp.key] for inp in op.inputs], device=op.device, ret_extra=True)
with device(device_id):
ctx[op.outputs[0].key] = _kmeans_plusplus(
x, op.n_clusters, x_squared_norms=x_squared_norms, random_state=op.state,
n_local_trials=op.n_local_trials)[0]
###############################################################################
# Initialization heuristic
def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
"""Init n_clusters seeds according to k-means++
Parameters
----------
X : array or sparse matrix, shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
n_clusters : integer
The number of seeds to choose
x_squared_norms : array, shape (n_samples,)
Squared Euclidean norm of each data point.
random_state : int, RandomState instance
The generator used to initialize the centers. Use an int to make the
randomness deterministic.
See :term:`Glossary <random_state>`.
n_local_trials : integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: <NAME>. and <NAME>.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
op = KMeansPlusPlusInit(x=X, n_clusters=n_clusters, x_squared_norms=x_squared_norms,
state=random_state, n_local_trials=n_local_trials)
return op()
class KMeansScalablePlusPlusInit(LearnOperand, LearnOperandMixin):
_op_type_ = opcodes.KMEANS_SCALABLE_PLUS_PLUS_INIT
_x = KeyField('x')
_n_clusters = Int32Field('n_clusters')
_x_squared_norms = KeyField('x_squared_norms')
_state = RandomStateField('state')
_init_iter = Int32Field('init_iter')
_oversampling_factor = Int32Field('oversampling_factor')
def __init__(self, x=None, n_clusters=None, x_squared_norms=None,
state=None, init_iter=None, oversampling_factor=None,
output_types=None, **kw):
super().__init__(_x=x, _n_clusters=n_clusters, _x_squared_norms=x_squared_norms,
_state=state, _init_iter=init_iter,
_oversampling_factor=oversampling_factor,
_output_types=output_types, **kw)
if self._output_types is None:
self._output_types = [OutputType.tensor]
@property
def x(self):
return self._x
@property
def n_clusters(self):
return self._n_clusters
@property
def x_squared_norms(self):
return self._x_squared_norms
@property
def state(self):
return self._state
@property
def init_iter(self):
return self._init_iter
@property
def oversampling_factor(self):
return self._oversampling_factor
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
if self._x is not None:
self._x = self._inputs[0]
if self._x_squared_norms is not None:
self._x_squared_norms = self._inputs[-1]
def __call__(self):
inputs = [self._x, self._x_squared_norms]
kw = {
'shape': (self._n_clusters, self._x.shape[1]),
'dtype': self._x.dtype,
'order': TensorOrder.C_ORDER
}
return self.new_tileable(inputs, kws=[kw])
@classmethod
def tile(cls, op: "KMeansScalablePlusPlusInit"):
if has_unknown_shape(*op.inputs):
yield
x = mt.tensor(op.x)
x_squared_norms = mt.atleast_2d(op.x_squared_norms)
out = op.outputs[0]
random_state = op.state
rs = mt.random.RandomState.from_numpy(random_state)
n_samples, n_features = x.shape
n_clusters = op.n_clusters
# step 1, sample a centroid
centers = x[random_state.randint(n_samples, size=1)]
for _ in range(op.init_iter):
distances = euclidean_distances(
x, centers, X_norm_squared=x_squared_norms, squared=True)
# calculate the cost of data with respect to current centers
cost = mt.sum(mt.min(distances, axis=1))
# calculate the distribution to sample new centers
distribution = mt.full(len(distances), 1 / len(distances))
mt.true_divide(mt.min(distances, axis=1), cost,
where=cost != 0, out=distribution)
# pick new centers
new_centers_size = op.oversampling_factor * n_clusters
new_centers = x[rs.choice(n_samples, new_centers_size, p=distribution)]
centers = mt.concatenate([centers, new_centers])
# rechunk centers into one chunk
centers = (yield from recursive_tile(centers)).rechunk(centers.shape)
distances = yield from recursive_tile(euclidean_distances(
x, centers, X_norm_squared=x_squared_norms, squared=True))
map_index_to_chunks = {}
# calculate weight for each chunk
for c in distances.chunks:
map_chunk_op = KMeansScalablePlusPlusInit(stage=OperandStage.map)
map_chunk_kw = {
'shape': (len(centers),),
'dtype': np.dtype(np.int64),
'order': TensorOrder.C_ORDER,
'index': c.index
}
map_chunk = map_chunk_op.new_chunk([c], kws=[map_chunk_kw])
map_index_to_chunks[c.index] = map_chunk
combine_chunks = []
for i in range(distances.chunk_shape[0]):
map_chunks = [map_index_to_chunks[i, j]
for j in range(distances.chunk_shape[1])]
combine_chunk_op = KMeansScalablePlusPlusInit(stage=OperandStage.combine)
combine_chunk_kw = {
'shape': (len(centers),),
'dtype': np.dtype(np.int64),
'order': TensorOrder.C_ORDER,
'index': (i,)
}
combine_chunk = combine_chunk_op.new_chunk(
map_chunks, kws=[combine_chunk_kw])
combine_chunks.append(combine_chunk)
reduce_chunk_op = KMeansScalablePlusPlusInit(n_clusters=op.n_clusters,
state=random_state,
stage=OperandStage.reduce)
reduce_chunk_kw = out.params
reduce_chunk_kw['index'] = (0, 0)
reduce_chunk = reduce_chunk_op.new_chunk([centers.chunks[0]] + combine_chunks,
kws=[reduce_chunk_kw])
new_op = op.copy()
kw = out.params
kw['chunks'] = [reduce_chunk]
kw['nsplits'] = tuple((s,) for s in out.shape)
return new_op.new_tileables(op.inputs, kws=[kw])
@classmethod
def _execute_map(cls, ctx, op: "KMeansScalablePlusPlusInit"):
distances = ctx[op.inputs[0].key]
min_distance_ids = np.argmin(distances, axis=1)
min_distances = distances[range(len(distances)), min_distance_ids]
ctx[op.outputs[0].key] = (min_distances, min_distance_ids)
@classmethod
def _execute_combine(cls, ctx, op: "KMeansScalablePlusPlusInit"):
out = op.outputs[0]
all_distances, all_min_distance_ids = tuple(zip(*(ctx[inp.key] for inp in op.inputs)))
distances = np.stack(all_distances).T
min_distance_ids = np.stack(all_min_distance_ids).T
combined_min_distance_id = np.argmin(distances, axis=1)
min_distance_ids = min_distance_ids[range(len(distances)), combined_min_distance_id]
count = np.bincount(min_distance_ids)
result = np.zeros(out.shape[0], dtype=np.int64)
result[:len(count)] = count
ctx[out.key] = result
@classmethod
def _execute_reduce(cls, ctx, op: "KMeansScalablePlusPlusInit"):
from sklearn.cluster import KMeans
inputs = [ctx[inp.key] for inp in op.inputs]
count = np.zeros(inputs[1].shape[0], dtype=np.int64)
for inp in inputs[1:]:
count += inp
weight = count / count.sum()
centers = inputs[0]
kmeans = KMeans(n_clusters=op.n_clusters, n_init=1,
random_state=op.state)
kmeans.fit(centers, sample_weight=weight)
ctx[op.outputs[0].key] = kmeans.cluster_centers_
@classmethod
def execute(cls, ctx, op: "KMeansScalablePlusPlusInit"):
if op.stage == OperandStage.map:
return cls._execute_map(ctx, op)
elif op.stage == OperandStage.combine:
return cls._execute_combine(ctx, op)
else:
return cls._execute_reduce(ctx, op)
def _scalable_k_init(X, n_clusters, x_squared_norms, random_state,
oversampling_factor=2, init_iter=5):
op = KMeansScalablePlusPlusInit(x=X, n_clusters=n_clusters,
x_squared_norms=x_squared_norms,
state=random_state, init_iter=init_iter,
oversampling_factor=oversampling_factor)
return op()
| <filename>mars/learn/cluster/_k_means_init.py<gh_stars>1000+
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ... import opcodes
from ... import tensor as mt
from ...core import OutputType, recursive_tile
from ...core.operand import OperandStage
from ...serialization.serializables import KeyField, Int32Field
from ...tensor.array_utils import as_same_device, device
from ...tensor.core import TensorOrder
from ...tensor.random import RandomStateField
from ...utils import has_unknown_shape
from ..metrics import euclidean_distances
from ..operands import LearnOperand, LearnOperandMixin
def _kmeans_plus_plus_init(X,
x_squared_norms,
random_state,
n_clusters: int,
n_local_trials: int = None):
n_samples, n_features = X.shape
centers = mt.empty((n_clusters, n_features), dtype=X.dtype)
assert x_squared_norms is not None, 'x_squared_norms None in _k_init'
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly
center_id = random_state.randint(n_samples)
if X.issparse(): # pragma: no cover
centers[0] = X[center_id].todense()
else:
centers[0] = X[center_id]
# Initialize list of closest distances and calculate current potential
closest_dist_sq = euclidean_distances(
centers[0, mt.newaxis], X, Y_norm_squared=x_squared_norms,
squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = mt.searchsorted(closest_dist_sq.cumsum(),
rand_vals)
# XXX: numerical imprecision can result in a candidate_id out of range
candidate_ids = mt.clip(candidate_ids, None, closest_dist_sq.size - 1)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# update closest distances squared and potential for each candidate
distance_to_candidates = mt.minimum(closest_dist_sq, distance_to_candidates)
candidates_pot = distance_to_candidates.sum(axis=1)
# Decide which candidate is the best
best_candidate = mt.argmin(candidates_pot)
current_pot = candidates_pot[best_candidate]
closest_dist_sq = distance_to_candidates[best_candidate]
best_candidate = candidate_ids[best_candidate]
# Permanently add best center candidate found in local tries
if X.issparse(): # pragma: no cover
c_center = X[best_candidate].todense()
else:
c_center = X[best_candidate]
centers[c] = c_center
return centers
class KMeansPlusPlusInit(LearnOperand, LearnOperandMixin):
_op_type_ = opcodes.KMEANS_PLUS_PLUS_INIT
_x = KeyField('x')
_n_clusters = Int32Field('n_clusters')
_x_squared_norms = KeyField('x_squared_norms')
_state = RandomStateField('state')
_n_local_trials = Int32Field('n_local_trials')
def __init__(self, x=None, n_clusters=None, x_squared_norms=None,
state=None, n_local_trials=None, output_types=None, **kw):
super().__init__(_x=x, _n_clusters=n_clusters, _x_squared_norms=x_squared_norms,
_state=state, _n_local_trials=n_local_trials,
_output_types=output_types, **kw)
if self._output_types is None:
self._output_types = [OutputType.tensor]
@property
def x(self):
return self._x
@property
def n_clusters(self):
return self._n_clusters
@property
def x_squared_norms(self):
return self._x_squared_norms
@property
def state(self):
return self._state
@property
def n_local_trials(self):
return self._n_local_trials
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
self._x = self._inputs[0]
self._x_squared_norms = self._inputs[-1]
def __call__(self):
inputs = [self._x, self._x_squared_norms]
kw = {
'shape': (self._n_clusters, self._x.shape[1]),
'dtype': self._x.dtype,
'order': TensorOrder.C_ORDER
}
return self.new_tileable(inputs, kws=[kw])
@classmethod
def _tile_one_chunk(cls, op: "KMeansPlusPlusInit"):
out = op.outputs[0]
chunk_op = op.copy().reset_key()
chunk_kw = out.params.copy()
chunk_kw['index'] = (0, 0)
chunk_inputs = [op.x.chunks[0], op.x_squared_norms.chunks[0]]
chunk = chunk_op.new_chunk(chunk_inputs, kws=[chunk_kw])
kw = out.params
kw['chunks'] = [chunk]
kw['nsplits'] = tuple((s,) for s in out.shape)
new_op = op.copy()
return new_op.new_tileables(op.inputs, kws=[kw])
@classmethod
def tile(cls, op: "KMeansPlusPlusInit"):
if len(op.x.chunks) == 1:
assert len(op.x_squared_norms.chunks) == 1
return cls._tile_one_chunk(op)
else:
return (yield from cls._tile_k_init(op))
@classmethod
def _tile_k_init(cls, op: "KMeansPlusPlusInit"):
X = op.x
n_clusters = op.n_clusters
x_squared_norms = op.x_squared_norms
random_state = op.state
n_local_trials = op.n_local_trials
centers = _kmeans_plus_plus_init(X, x_squared_norms, random_state,
n_clusters, n_local_trials)
return (yield from recursive_tile(centers))
@classmethod
def execute(cls, ctx, op: "KMeansPlusPlusInit"):
try:
from sklearn.cluster._kmeans import _kmeans_plusplus
except ImportError: # pragma: no cover
try:
from sklearn.cluster._kmeans import _k_init
except ImportError:
from sklearn.cluster.k_means_ import _k_init
def _kmeans_plusplus(*args, **kwargs):
return _k_init(*args, **kwargs), None
(x, x_squared_norms), device_id, _ = as_same_device(
[ctx[inp.key] for inp in op.inputs], device=op.device, ret_extra=True)
with device(device_id):
ctx[op.outputs[0].key] = _kmeans_plusplus(
x, op.n_clusters, x_squared_norms=x_squared_norms, random_state=op.state,
n_local_trials=op.n_local_trials)[0]
###############################################################################
# Initialization heuristic
def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
"""Init n_clusters seeds according to k-means++
Parameters
----------
X : array or sparse matrix, shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
n_clusters : integer
The number of seeds to choose
x_squared_norms : array, shape (n_samples,)
Squared Euclidean norm of each data point.
random_state : int, RandomState instance
The generator used to initialize the centers. Use an int to make the
randomness deterministic.
See :term:`Glossary <random_state>`.
n_local_trials : integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: <NAME>. and <NAME>.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
op = KMeansPlusPlusInit(x=X, n_clusters=n_clusters, x_squared_norms=x_squared_norms,
state=random_state, n_local_trials=n_local_trials)
return op()
class KMeansScalablePlusPlusInit(LearnOperand, LearnOperandMixin):
_op_type_ = opcodes.KMEANS_SCALABLE_PLUS_PLUS_INIT
_x = KeyField('x')
_n_clusters = Int32Field('n_clusters')
_x_squared_norms = KeyField('x_squared_norms')
_state = RandomStateField('state')
_init_iter = Int32Field('init_iter')
_oversampling_factor = Int32Field('oversampling_factor')
def __init__(self, x=None, n_clusters=None, x_squared_norms=None,
state=None, init_iter=None, oversampling_factor=None,
output_types=None, **kw):
super().__init__(_x=x, _n_clusters=n_clusters, _x_squared_norms=x_squared_norms,
_state=state, _init_iter=init_iter,
_oversampling_factor=oversampling_factor,
_output_types=output_types, **kw)
if self._output_types is None:
self._output_types = [OutputType.tensor]
@property
def x(self):
return self._x
@property
def n_clusters(self):
return self._n_clusters
@property
def x_squared_norms(self):
return self._x_squared_norms
@property
def state(self):
return self._state
@property
def init_iter(self):
return self._init_iter
@property
def oversampling_factor(self):
return self._oversampling_factor
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
if self._x is not None:
self._x = self._inputs[0]
if self._x_squared_norms is not None:
self._x_squared_norms = self._inputs[-1]
def __call__(self):
inputs = [self._x, self._x_squared_norms]
kw = {
'shape': (self._n_clusters, self._x.shape[1]),
'dtype': self._x.dtype,
'order': TensorOrder.C_ORDER
}
return self.new_tileable(inputs, kws=[kw])
@classmethod
def tile(cls, op: "KMeansScalablePlusPlusInit"):
if has_unknown_shape(*op.inputs):
yield
x = mt.tensor(op.x)
x_squared_norms = mt.atleast_2d(op.x_squared_norms)
out = op.outputs[0]
random_state = op.state
rs = mt.random.RandomState.from_numpy(random_state)
n_samples, n_features = x.shape
n_clusters = op.n_clusters
# step 1, sample a centroid
centers = x[random_state.randint(n_samples, size=1)]
for _ in range(op.init_iter):
distances = euclidean_distances(
x, centers, X_norm_squared=x_squared_norms, squared=True)
# calculate the cost of data with respect to current centers
cost = mt.sum(mt.min(distances, axis=1))
# calculate the distribution to sample new centers
distribution = mt.full(len(distances), 1 / len(distances))
mt.true_divide(mt.min(distances, axis=1), cost,
where=cost != 0, out=distribution)
# pick new centers
new_centers_size = op.oversampling_factor * n_clusters
new_centers = x[rs.choice(n_samples, new_centers_size, p=distribution)]
centers = mt.concatenate([centers, new_centers])
# rechunk centers into one chunk
centers = (yield from recursive_tile(centers)).rechunk(centers.shape)
distances = yield from recursive_tile(euclidean_distances(
x, centers, X_norm_squared=x_squared_norms, squared=True))
map_index_to_chunks = {}
# calculate weight for each chunk
for c in distances.chunks:
map_chunk_op = KMeansScalablePlusPlusInit(stage=OperandStage.map)
map_chunk_kw = {
'shape': (len(centers),),
'dtype': np.dtype(np.int64),
'order': TensorOrder.C_ORDER,
'index': c.index
}
map_chunk = map_chunk_op.new_chunk([c], kws=[map_chunk_kw])
map_index_to_chunks[c.index] = map_chunk
combine_chunks = []
for i in range(distances.chunk_shape[0]):
map_chunks = [map_index_to_chunks[i, j]
for j in range(distances.chunk_shape[1])]
combine_chunk_op = KMeansScalablePlusPlusInit(stage=OperandStage.combine)
combine_chunk_kw = {
'shape': (len(centers),),
'dtype': np.dtype(np.int64),
'order': TensorOrder.C_ORDER,
'index': (i,)
}
combine_chunk = combine_chunk_op.new_chunk(
map_chunks, kws=[combine_chunk_kw])
combine_chunks.append(combine_chunk)
reduce_chunk_op = KMeansScalablePlusPlusInit(n_clusters=op.n_clusters,
state=random_state,
stage=OperandStage.reduce)
reduce_chunk_kw = out.params
reduce_chunk_kw['index'] = (0, 0)
reduce_chunk = reduce_chunk_op.new_chunk([centers.chunks[0]] + combine_chunks,
kws=[reduce_chunk_kw])
new_op = op.copy()
kw = out.params
kw['chunks'] = [reduce_chunk]
kw['nsplits'] = tuple((s,) for s in out.shape)
return new_op.new_tileables(op.inputs, kws=[kw])
@classmethod
def _execute_map(cls, ctx, op: "KMeansScalablePlusPlusInit"):
distances = ctx[op.inputs[0].key]
min_distance_ids = np.argmin(distances, axis=1)
min_distances = distances[range(len(distances)), min_distance_ids]
ctx[op.outputs[0].key] = (min_distances, min_distance_ids)
@classmethod
def _execute_combine(cls, ctx, op: "KMeansScalablePlusPlusInit"):
out = op.outputs[0]
all_distances, all_min_distance_ids = tuple(zip(*(ctx[inp.key] for inp in op.inputs)))
distances = np.stack(all_distances).T
min_distance_ids = np.stack(all_min_distance_ids).T
combined_min_distance_id = np.argmin(distances, axis=1)
min_distance_ids = min_distance_ids[range(len(distances)), combined_min_distance_id]
count = np.bincount(min_distance_ids)
result = np.zeros(out.shape[0], dtype=np.int64)
result[:len(count)] = count
ctx[out.key] = result
@classmethod
def _execute_reduce(cls, ctx, op: "KMeansScalablePlusPlusInit"):
from sklearn.cluster import KMeans
inputs = [ctx[inp.key] for inp in op.inputs]
count = np.zeros(inputs[1].shape[0], dtype=np.int64)
for inp in inputs[1:]:
count += inp
weight = count / count.sum()
centers = inputs[0]
kmeans = KMeans(n_clusters=op.n_clusters, n_init=1,
random_state=op.state)
kmeans.fit(centers, sample_weight=weight)
ctx[op.outputs[0].key] = kmeans.cluster_centers_
@classmethod
def execute(cls, ctx, op: "KMeansScalablePlusPlusInit"):
if op.stage == OperandStage.map:
return cls._execute_map(ctx, op)
elif op.stage == OperandStage.combine:
return cls._execute_combine(ctx, op)
else:
return cls._execute_reduce(ctx, op)
def _scalable_k_init(X, n_clusters, x_squared_norms, random_state,
oversampling_factor=2, init_iter=5):
op = KMeansScalablePlusPlusInit(x=X, n_clusters=n_clusters,
x_squared_norms=x_squared_norms,
state=random_state, init_iter=init_iter,
oversampling_factor=oversampling_factor)
return op()
| en | 0.789357 | # Copyright 1999-2021 Alibaba Group Holding Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Set the number of local seeding trials if none is given # This is what Arthur/Vassilvitskii tried, but did not report # specific results for other than mentioning in the conclusion # that it helped. # Pick first center randomly # pragma: no cover # Initialize list of closest distances and calculate current potential # Pick the remaining n_clusters-1 points # Choose center candidates by sampling with probability proportional # to the squared distance to the closest existing center # XXX: numerical imprecision can result in a candidate_id out of range # Compute distances to center candidates # update closest distances squared and potential for each candidate # Decide which candidate is the best # Permanently add best center candidate found in local tries # pragma: no cover # pragma: no cover ############################################################################### # Initialization heuristic Init n_clusters seeds according to k-means++ Parameters ---------- X : array or sparse matrix, shape (n_samples, n_features) The data to pick seeds for. To avoid memory copy, the input data should be double precision (dtype=np.float64). n_clusters : integer The number of seeds to choose x_squared_norms : array, shape (n_samples,) Squared Euclidean norm of each data point. random_state : int, RandomState instance The generator used to initialize the centers. Use an int to make the randomness deterministic. See :term:`Glossary <random_state>`. n_local_trials : integer, optional The number of seeding trials for each center (except the first), of which the one reducing inertia the most is greedily chosen. Set to None to make the number of trials depend logarithmically on the number of seeds (2+log(k)); this is the default. Notes ----- Selects initial cluster centers for k-mean clustering in a smart way to speed up convergence. see: <NAME>. and <NAME>. "k-means++: the advantages of careful seeding". ACM-SIAM symposium on Discrete algorithms. 2007 Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip, which is the implementation used in the aforementioned paper. # step 1, sample a centroid # calculate the cost of data with respect to current centers # calculate the distribution to sample new centers # pick new centers # rechunk centers into one chunk # calculate weight for each chunk | 1.815391 | 2 |
wikipedia_parser/infobox/wikitext_parser.py | ojones/wikipedia_parser | 9 | 9550 | <gh_stars>1-10
import re
from wikipedia_parser.infobox import clean_text as clean_help
from wikipedia_parser.infobox import wikitext_helpers as wtext_help
from wikipedia_parser.third_party_adapters import parserfromhell_adapter as adapter
__author__ = 'oswaldjones'
def get_simple_text(wtext, key, clean=True):
text = None
keys = key if type(key) is list else [key]
template_dict = adapter.template_dict(wtext)
wtext_lines = wtext_help.get_wtext_lines(wtext)
if keys:
for possible_key in keys:
# try getting from parserfromhell
if not text and template_dict:
text = template_dict.get(possible_key)
# final attempt if still no text
if not text and wtext_lines:
matched_line = wtext_help.find_key_val_line(wtext, possible_key)
if matched_line:
key_val = matched_line.strip(' \t\n\r').split("=", 1)
if len(key_val) == 2:
text = key_val[1].strip()
if text and clean:
text = clean_help.clean_text(text)
return text
def extract_page_links(wtext, key):
links = []
keys = key if type(key) is list else [key]
template_dict = adapter.template_dict(wtext)
wtext_lines = wtext_help.get_wtext_lines(wtext)
if keys:
for possible_key in keys:
# try parserfromhell
if not links and template_dict:
if template_dict.get(possible_key):
matches = re.findall("\[\[(.*?)\]\]", template_dict.get(possible_key))
links = [link.split("|", 1)[0] for link in matches]
# final attempt if still no links
if not links and wtext_lines:
matched_line = wtext_help.find_key_val_line(wtext_lines, possible_key)
if matched_line:
key_val = matched_line.strip(' \t\n\r').split("=")
if len(key_val) == 2:
matches = re.findall("\[\[(.*?)\]\]", key_val[1].strip())
links = [link.split("|", 1)[0] for link in matches]
return links
| import re
from wikipedia_parser.infobox import clean_text as clean_help
from wikipedia_parser.infobox import wikitext_helpers as wtext_help
from wikipedia_parser.third_party_adapters import parserfromhell_adapter as adapter
__author__ = 'oswaldjones'
def get_simple_text(wtext, key, clean=True):
text = None
keys = key if type(key) is list else [key]
template_dict = adapter.template_dict(wtext)
wtext_lines = wtext_help.get_wtext_lines(wtext)
if keys:
for possible_key in keys:
# try getting from parserfromhell
if not text and template_dict:
text = template_dict.get(possible_key)
# final attempt if still no text
if not text and wtext_lines:
matched_line = wtext_help.find_key_val_line(wtext, possible_key)
if matched_line:
key_val = matched_line.strip(' \t\n\r').split("=", 1)
if len(key_val) == 2:
text = key_val[1].strip()
if text and clean:
text = clean_help.clean_text(text)
return text
def extract_page_links(wtext, key):
links = []
keys = key if type(key) is list else [key]
template_dict = adapter.template_dict(wtext)
wtext_lines = wtext_help.get_wtext_lines(wtext)
if keys:
for possible_key in keys:
# try parserfromhell
if not links and template_dict:
if template_dict.get(possible_key):
matches = re.findall("\[\[(.*?)\]\]", template_dict.get(possible_key))
links = [link.split("|", 1)[0] for link in matches]
# final attempt if still no links
if not links and wtext_lines:
matched_line = wtext_help.find_key_val_line(wtext_lines, possible_key)
if matched_line:
key_val = matched_line.strip(' \t\n\r').split("=")
if len(key_val) == 2:
matches = re.findall("\[\[(.*?)\]\]", key_val[1].strip())
links = [link.split("|", 1)[0] for link in matches]
return links | en | 0.770546 | # try getting from parserfromhell # final attempt if still no text # try parserfromhell # final attempt if still no links | 2.894812 | 3 |
sandbox/lib/jumpscale/JumpscaleLibs/clients/graphql/GraphQLFactory.py | threefoldtech/threebot_prebuilt | 0 | 9551 | from .GraphQLClient import GraphQLClient
from Jumpscale import j
JSConfigs = j.baseclasses.object_config_collection
class GraphQLFactory(JSConfigs):
__jslocation__ = "j.clients.graphql"
_CHILDCLASS = GraphQLClient
| from .GraphQLClient import GraphQLClient
from Jumpscale import j
JSConfigs = j.baseclasses.object_config_collection
class GraphQLFactory(JSConfigs):
__jslocation__ = "j.clients.graphql"
_CHILDCLASS = GraphQLClient
| none | 1 | 1.307889 | 1 |
|
src/video_transcoding/defaults.py | tumb1er/django-video-transcoding | 21 | 9552 | <reponame>tumb1er/django-video-transcoding<filename>src/video_transcoding/defaults.py
from os import getenv as e
from kombu import Queue
CELERY_APP_NAME = 'video_transcoding'
VIDEO_TRANSCODING_CELERY_CONF = {
'broker_url': e('VIDEO_TRANSCODING_CELERY_BROKER_URL',
'amqp://guest:guest@rabbitmq:5672/'),
'result_backend': e('VIDEO_TRANSCODING_CELERY_RESULT_BACKEND', None),
'task_default_exchange': CELERY_APP_NAME,
'task_default_exchange_type': 'topic',
'task_default_queue': CELERY_APP_NAME,
'worker_prefetch_multiplier': 1,
'worker_concurrency': e('VIDEO_TRANSCODING_CELERY_CONCURRENCY'),
'task_acks_late': True,
'task_reject_on_worker_lost': True,
'task_queues': [
Queue(CELERY_APP_NAME, routing_key=CELERY_APP_NAME),
]
}
# Directory for large output files
VIDEO_TEMP_DIR = '/tmp'
# Download source before processing
VIDEO_DOWNLOAD_SOURCE = bool(int(e('VIDEO_DOWNLOAD_SOURCE', 0)))
# A list of WebDAV endpoints for storing video results
VIDEO_ORIGINS = e('VIDEO_ORIGINS',
'http://storage.localhost:8080/videos/').split(',')
# Video streamer public urls (comma-separated)
VIDEO_EDGES = e('VIDEO_EDGES', 'http://storage.localhost:8080/').split(',')
# Edge video manifest url template
VIDEO_URL = '{edge}/hls/{filename}1080p.mp4/index.m3u8'
# Output source files checksum
CHECKSUM_SOURCE = bool(int(e('CHECKSUM_SOURCE', 0)))
| from os import getenv as e
from kombu import Queue
CELERY_APP_NAME = 'video_transcoding'
VIDEO_TRANSCODING_CELERY_CONF = {
'broker_url': e('VIDEO_TRANSCODING_CELERY_BROKER_URL',
'amqp://guest:guest@rabbitmq:5672/'),
'result_backend': e('VIDEO_TRANSCODING_CELERY_RESULT_BACKEND', None),
'task_default_exchange': CELERY_APP_NAME,
'task_default_exchange_type': 'topic',
'task_default_queue': CELERY_APP_NAME,
'worker_prefetch_multiplier': 1,
'worker_concurrency': e('VIDEO_TRANSCODING_CELERY_CONCURRENCY'),
'task_acks_late': True,
'task_reject_on_worker_lost': True,
'task_queues': [
Queue(CELERY_APP_NAME, routing_key=CELERY_APP_NAME),
]
}
# Directory for large output files
VIDEO_TEMP_DIR = '/tmp'
# Download source before processing
VIDEO_DOWNLOAD_SOURCE = bool(int(e('VIDEO_DOWNLOAD_SOURCE', 0)))
# A list of WebDAV endpoints for storing video results
VIDEO_ORIGINS = e('VIDEO_ORIGINS',
'http://storage.localhost:8080/videos/').split(',')
# Video streamer public urls (comma-separated)
VIDEO_EDGES = e('VIDEO_EDGES', 'http://storage.localhost:8080/').split(',')
# Edge video manifest url template
VIDEO_URL = '{edge}/hls/{filename}1080p.mp4/index.m3u8'
# Output source files checksum
CHECKSUM_SOURCE = bool(int(e('CHECKSUM_SOURCE', 0))) | en | 0.650513 | # Directory for large output files # Download source before processing # A list of WebDAV endpoints for storing video results # Video streamer public urls (comma-separated) # Edge video manifest url template # Output source files checksum | 1.973755 | 2 |
wordSenseByContext.py | jmboettcher/fall2019_sentiment_in_alternative_words | 0 | 9553 | <reponame>jmboettcher/fall2019_sentiment_in_alternative_words
from collections import defaultdict
from nltk.tokenize import sent_tokenize
from nltk.corpus import wordnet as wn
from nltk.corpus import semcor as sc
from nltk.corpus import stopwords
import mywordtokenizer
class SenseContextWordDict:
def __init__(self):
self.dictionary = self._create_dictionary()
def _create_dictionary(self):
dictionary = defaultdict(lambda: defaultdict(int))
myStopWords = stopwords.words('english')
for sentence in sc.tagged_sents(tag='sem'):
plainWordSent = []
taggedWordSent = []
self._make_word_lists(plainWordSent, taggedWordSent, sentence)
for taggedItemTuple in taggedWordSent:
self._update_tagged_item_entry(myStopWords, dictionary, plainWordSent, taggedItemTuple[0],taggedItemTuple[1])
return dictionary
def _make_word_lists(self, plainWordSent, taggedWordSent, sentence):
for i in range(0,len(sentence)):
item = sentence[i]
if(type(item)) == list:
plainWordSent.append(item[0])
else:
if type(item.label()) == str:
plainWordSent.append(item.leaves()[0])
else:
plainWordSent.append(item.label().name())
taggedWordSent.append([item, i])
def _update_tagged_item_entry(self, myStopWords,dictionary,plainWordSent,taggedItem,taggedItemPosition):
for j in range(0,len(plainWordSent)):
word = plainWordSent[j]
if taggedItem.label().name() != word:
taggedSynset = taggedItem.label().synset()
splitUp = word.split("_")
for thisword in splitUp:
wordTokened = mywordtokenizer.simple(thisword)
if len(wordTokened) > 0:
word = wordTokened[0]
if word not in myStopWords:
dictionary[taggedSynset][word]+=1
dictionary[taggedSynset][".total."]+=1
dictionary[taggedSynset][".totalNoStops."]+=1
elif abs(j - taggedItemPosition) == 1:
dictionary[taggedSynset][word]+=1
dictionary[taggedSynset][".total."]+=1
def getMostLikelySynset(self, word, sentence):
"""Find the set of a word's synonyms.
Parameters
----------
word : str
The string representing a given word.
Returns
-------
a set pf the given word's synonyms.
"""
myStopWords = stopwords.words('english')
highestCoverageSyn = self._synset_search(".totalNoStops.", myStopWords, word, sentence)
if highestCoverageSyn is None:
highestCoverageSyn = self._synset_search(".total.", [], word, sentence)
return highestCoverageSyn
def _synset_search(self, totalToUse, exclusionSet, word, sentence):
"""Find the set of a word's synonyms.
Parameters
----------
word : str
The string representing a given word.
Returns
-------
a set pf the given word's synonyms.
"""
myMap = self.dictionary
highestCoverage = 0
highestCoverageSyn = None
for syn in wn.synsets(word):
totalContextWordMatches = 0
totalSet = myMap[syn][totalToUse]
if totalSet > 0:
for contextWord in sentence:
if contextWord != word and contextWord not in exclusionSet:
totalContextWordMatches += myMap[syn][contextWord]
coverage = totalContextWordMatches / totalSet
if coverage > highestCoverage:
highestCoverage = coverage
highestCoverageSyn = syn
return highestCoverageSyn
def listAlternatives(self, word, sentence):
synonyms = set([])
mostLikelySynset = self.getMostLikelySynset(word, sentence)
if not mostLikelySynset is None:
for synonym in mostLikelySynset.lemmas():
synonyms.add(synonym.name())
return synonyms
def mostFrequentAlternative(self, word, sentence):
mostLikelySynset = self.getMostLikelySynset(word, sentence)
highestCount = 0
mostFrequentAlternative = None
if not mostLikelySynset is None:
for synonym in mostLikelySynset.lemmas():
count = synonym.count()
if count > highestCount:
mostFrequentAlternative = synonym.name()
highestCount = count
return mostFrequentAlternative
"""===================================================================
Place all function calls below the following conditional so that they
are called only if this module is called with
`python ling278_assign02.py`
No functions should execute if it is instead imported with
import ling278_assign02
in the interactive shell.
"""
if __name__ == '__main__':
pass
| from collections import defaultdict
from nltk.tokenize import sent_tokenize
from nltk.corpus import wordnet as wn
from nltk.corpus import semcor as sc
from nltk.corpus import stopwords
import mywordtokenizer
class SenseContextWordDict:
def __init__(self):
self.dictionary = self._create_dictionary()
def _create_dictionary(self):
dictionary = defaultdict(lambda: defaultdict(int))
myStopWords = stopwords.words('english')
for sentence in sc.tagged_sents(tag='sem'):
plainWordSent = []
taggedWordSent = []
self._make_word_lists(plainWordSent, taggedWordSent, sentence)
for taggedItemTuple in taggedWordSent:
self._update_tagged_item_entry(myStopWords, dictionary, plainWordSent, taggedItemTuple[0],taggedItemTuple[1])
return dictionary
def _make_word_lists(self, plainWordSent, taggedWordSent, sentence):
for i in range(0,len(sentence)):
item = sentence[i]
if(type(item)) == list:
plainWordSent.append(item[0])
else:
if type(item.label()) == str:
plainWordSent.append(item.leaves()[0])
else:
plainWordSent.append(item.label().name())
taggedWordSent.append([item, i])
def _update_tagged_item_entry(self, myStopWords,dictionary,plainWordSent,taggedItem,taggedItemPosition):
for j in range(0,len(plainWordSent)):
word = plainWordSent[j]
if taggedItem.label().name() != word:
taggedSynset = taggedItem.label().synset()
splitUp = word.split("_")
for thisword in splitUp:
wordTokened = mywordtokenizer.simple(thisword)
if len(wordTokened) > 0:
word = wordTokened[0]
if word not in myStopWords:
dictionary[taggedSynset][word]+=1
dictionary[taggedSynset][".total."]+=1
dictionary[taggedSynset][".totalNoStops."]+=1
elif abs(j - taggedItemPosition) == 1:
dictionary[taggedSynset][word]+=1
dictionary[taggedSynset][".total."]+=1
def getMostLikelySynset(self, word, sentence):
"""Find the set of a word's synonyms.
Parameters
----------
word : str
The string representing a given word.
Returns
-------
a set pf the given word's synonyms.
"""
myStopWords = stopwords.words('english')
highestCoverageSyn = self._synset_search(".totalNoStops.", myStopWords, word, sentence)
if highestCoverageSyn is None:
highestCoverageSyn = self._synset_search(".total.", [], word, sentence)
return highestCoverageSyn
def _synset_search(self, totalToUse, exclusionSet, word, sentence):
"""Find the set of a word's synonyms.
Parameters
----------
word : str
The string representing a given word.
Returns
-------
a set pf the given word's synonyms.
"""
myMap = self.dictionary
highestCoverage = 0
highestCoverageSyn = None
for syn in wn.synsets(word):
totalContextWordMatches = 0
totalSet = myMap[syn][totalToUse]
if totalSet > 0:
for contextWord in sentence:
if contextWord != word and contextWord not in exclusionSet:
totalContextWordMatches += myMap[syn][contextWord]
coverage = totalContextWordMatches / totalSet
if coverage > highestCoverage:
highestCoverage = coverage
highestCoverageSyn = syn
return highestCoverageSyn
def listAlternatives(self, word, sentence):
synonyms = set([])
mostLikelySynset = self.getMostLikelySynset(word, sentence)
if not mostLikelySynset is None:
for synonym in mostLikelySynset.lemmas():
synonyms.add(synonym.name())
return synonyms
def mostFrequentAlternative(self, word, sentence):
mostLikelySynset = self.getMostLikelySynset(word, sentence)
highestCount = 0
mostFrequentAlternative = None
if not mostLikelySynset is None:
for synonym in mostLikelySynset.lemmas():
count = synonym.count()
if count > highestCount:
mostFrequentAlternative = synonym.name()
highestCount = count
return mostFrequentAlternative
"""===================================================================
Place all function calls below the following conditional so that they
are called only if this module is called with
`python ling278_assign02.py`
No functions should execute if it is instead imported with
import ling278_assign02
in the interactive shell.
"""
if __name__ == '__main__':
pass | en | 0.72607 | Find the set of a word's synonyms. Parameters ---------- word : str The string representing a given word. Returns ------- a set pf the given word's synonyms. Find the set of a word's synonyms. Parameters ---------- word : str The string representing a given word. Returns ------- a set pf the given word's synonyms. =================================================================== Place all function calls below the following conditional so that they are called only if this module is called with `python ling278_assign02.py` No functions should execute if it is instead imported with import ling278_assign02 in the interactive shell. | 2.756752 | 3 |
paymentmethods/stripejs/tests.py | tjwalch/django-restshop | 0 | 9554 | import decimal
from unittest import mock
from django.conf import settings
from django.test import modify_settings
from rest_framework import test
from rest_framework.reverse import reverse
import stripe
from restshop import serializers
from restshop.models import Order
from paymentmethods.stripejs.models import StripeInvoice
import restshop.exceptions
from restshop.tests.test_product import products_and_price
@modify_settings(INSTALLED_APPS={
'append': 'restshop.paymentmethods.stripejs'
})
class StripeTest(test.APITestCase):
def setUp(self):
stripe.api_key = settings.STRIPE_API_KEY
self.order = Order.objects.create(
email='<EMAIL>',
)
self.order.items.create(
description='test purchase',
price='1000',
vat='250',
quantity=3,
product=products_and_price(1000).skus.all()[0]
)
session = self.client.session
session['order_id'] = self.order.pk
session.save()
def get_token(self):
return stripe.Token.create(card={
"number": '4242424242424242',
"exp_month": 12,
"exp_year": 2016,
"cvc": '123'
}).id
def test_pay(self):
response = self.client.post(
reverse(
'order-pay',
args=['stripejs.stripeinvoice']
),
{
'stripeToken': self.get_token(),
'order': serializers.OrderSerializer(instance=self.order).data
}
)
self.assertEqual(201, response.status_code, response.data)
self.assertEqual(0,
decimal.Decimal(response.data['owed']) -
decimal.Decimal(response.data['paid']))
order = Order.objects.get()
self.assertEqual(
Order.STATUS.completed,
order.status
)
self.assertEqual(
decimal.Decimal('3750.00'),
order.invoices.all()[0].paid
)
@mock.patch('stripe.Charge.create')
def test_card_error(self, create_mock):
create_mock.side_effect = stripe.CardError('fail!', '', '402')
si = StripeInvoice.objects.create(
order=self.order,
owed=self.order.amount,
stripeToken=self.get_token(),
)
try:
si.authorize()
except restshop.exceptions.PaymentFailed as e:
self.assertEqual('fail!', e.detail)
else:
self.assertRaises(restshop.exceptions.PaymentFailed, lambda: None)
def test_cancel_auth(self):
si = StripeInvoice.objects.create(
order=self.order,
owed=self.order.amount,
stripeToken=self.get_token(),
)
self.assertRaises(
restshop.exceptions.InvalidOperation,
si.cancel_auth
)
self.assertTrue(si.authorize())
self.assertTrue(si.cancel_auth())
si.refresh_from_db()
self.assertEqual(2, si.events.all().count())
self.assertEqual(StripeInvoice.STATUS.canceled, si.status)
| import decimal
from unittest import mock
from django.conf import settings
from django.test import modify_settings
from rest_framework import test
from rest_framework.reverse import reverse
import stripe
from restshop import serializers
from restshop.models import Order
from paymentmethods.stripejs.models import StripeInvoice
import restshop.exceptions
from restshop.tests.test_product import products_and_price
@modify_settings(INSTALLED_APPS={
'append': 'restshop.paymentmethods.stripejs'
})
class StripeTest(test.APITestCase):
def setUp(self):
stripe.api_key = settings.STRIPE_API_KEY
self.order = Order.objects.create(
email='<EMAIL>',
)
self.order.items.create(
description='test purchase',
price='1000',
vat='250',
quantity=3,
product=products_and_price(1000).skus.all()[0]
)
session = self.client.session
session['order_id'] = self.order.pk
session.save()
def get_token(self):
return stripe.Token.create(card={
"number": '4242424242424242',
"exp_month": 12,
"exp_year": 2016,
"cvc": '123'
}).id
def test_pay(self):
response = self.client.post(
reverse(
'order-pay',
args=['stripejs.stripeinvoice']
),
{
'stripeToken': self.get_token(),
'order': serializers.OrderSerializer(instance=self.order).data
}
)
self.assertEqual(201, response.status_code, response.data)
self.assertEqual(0,
decimal.Decimal(response.data['owed']) -
decimal.Decimal(response.data['paid']))
order = Order.objects.get()
self.assertEqual(
Order.STATUS.completed,
order.status
)
self.assertEqual(
decimal.Decimal('3750.00'),
order.invoices.all()[0].paid
)
@mock.patch('stripe.Charge.create')
def test_card_error(self, create_mock):
create_mock.side_effect = stripe.CardError('fail!', '', '402')
si = StripeInvoice.objects.create(
order=self.order,
owed=self.order.amount,
stripeToken=self.get_token(),
)
try:
si.authorize()
except restshop.exceptions.PaymentFailed as e:
self.assertEqual('fail!', e.detail)
else:
self.assertRaises(restshop.exceptions.PaymentFailed, lambda: None)
def test_cancel_auth(self):
si = StripeInvoice.objects.create(
order=self.order,
owed=self.order.amount,
stripeToken=self.get_token(),
)
self.assertRaises(
restshop.exceptions.InvalidOperation,
si.cancel_auth
)
self.assertTrue(si.authorize())
self.assertTrue(si.cancel_auth())
si.refresh_from_db()
self.assertEqual(2, si.events.all().count())
self.assertEqual(StripeInvoice.STATUS.canceled, si.status)
| none | 1 | 2.361511 | 2 |
|
tnnt/uniqdeaths.py | tnnt-devteam/python-backend | 0 | 9555 | from tnnt.settings import UNIQUE_DEATH_REJECTIONS, UNIQUE_DEATH_NORMALIZATIONS
import re
def normalize(death):
# Given a death string, apply normalizations from settings.
for regtuple in UNIQUE_DEATH_NORMALIZATIONS:
death = re.sub(regtuple[0], regtuple[1], death)
return death
def reject(death):
# Given a death string, return True if it should be excluded as a
# unique death and False if not.
for regex in UNIQUE_DEATH_REJECTIONS:
if re.search(regex, death) is not None:
return True
return False
def compile_unique_deaths(gameQS):
# Given a QuerySet of Game objects, return a set containing strings of all
# the unique deaths from those games after rejections and normalizations are
# applied.
# This is primarily for aggregation, and runs somewhat faster than it would
# if we wanted to return the players who got a death and when. This is a
# post 2021 TODO.
# First, get all unique, un-normalized deaths.
raw_uniq_deaths = \
gameQS.values_list('death', flat=True).distinct()
# Then apply normalizations and rejections, and turn it into a set
# to automatically remove any duplicates produced by normalization.
return set(normalize(d) for d in raw_uniq_deaths if not reject(d))
# post 2021 TODO: showing unique deaths of a player or clan:
# 1. list(Game.objects.values_list('death', 'player__name', 'endtime'))
# 2. iterate through list, filtering any death for which reject is True, and
# normalizing all death strings.
# 3. sort by first death, then endtime.
# 4. filter again by taking only the first player/endtime for each death and
# ignoring later ones.
| from tnnt.settings import UNIQUE_DEATH_REJECTIONS, UNIQUE_DEATH_NORMALIZATIONS
import re
def normalize(death):
# Given a death string, apply normalizations from settings.
for regtuple in UNIQUE_DEATH_NORMALIZATIONS:
death = re.sub(regtuple[0], regtuple[1], death)
return death
def reject(death):
# Given a death string, return True if it should be excluded as a
# unique death and False if not.
for regex in UNIQUE_DEATH_REJECTIONS:
if re.search(regex, death) is not None:
return True
return False
def compile_unique_deaths(gameQS):
# Given a QuerySet of Game objects, return a set containing strings of all
# the unique deaths from those games after rejections and normalizations are
# applied.
# This is primarily for aggregation, and runs somewhat faster than it would
# if we wanted to return the players who got a death and when. This is a
# post 2021 TODO.
# First, get all unique, un-normalized deaths.
raw_uniq_deaths = \
gameQS.values_list('death', flat=True).distinct()
# Then apply normalizations and rejections, and turn it into a set
# to automatically remove any duplicates produced by normalization.
return set(normalize(d) for d in raw_uniq_deaths if not reject(d))
# post 2021 TODO: showing unique deaths of a player or clan:
# 1. list(Game.objects.values_list('death', 'player__name', 'endtime'))
# 2. iterate through list, filtering any death for which reject is True, and
# normalizing all death strings.
# 3. sort by first death, then endtime.
# 4. filter again by taking only the first player/endtime for each death and
# ignoring later ones.
| en | 0.893668 | # Given a death string, apply normalizations from settings. # Given a death string, return True if it should be excluded as a # unique death and False if not. # Given a QuerySet of Game objects, return a set containing strings of all # the unique deaths from those games after rejections and normalizations are # applied. # This is primarily for aggregation, and runs somewhat faster than it would # if we wanted to return the players who got a death and when. This is a # post 2021 TODO. # First, get all unique, un-normalized deaths. # Then apply normalizations and rejections, and turn it into a set # to automatically remove any duplicates produced by normalization. # post 2021 TODO: showing unique deaths of a player or clan: # 1. list(Game.objects.values_list('death', 'player__name', 'endtime')) # 2. iterate through list, filtering any death for which reject is True, and # normalizing all death strings. # 3. sort by first death, then endtime. # 4. filter again by taking only the first player/endtime for each death and # ignoring later ones. | 2.78117 | 3 |
qutip/graph.py | anubhavvardhan/qutip | 0 | 9556 | # This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, <NAME> and <NAME>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
"""
This module contains a collection of graph theory routines used mainly
to reorder matrices for iterative steady state solvers.
"""
__all__ = ['graph_degree', 'column_permutation', 'breadth_first_search',
'reverse_cuthill_mckee', 'maximum_bipartite_matching',
'weighted_bipartite_matching']
import numpy as np
import scipy.sparse as sp
from qutip.cy.graph_utils import (
_breadth_first_search, _node_degrees,
_reverse_cuthill_mckee, _maximum_bipartite_matching,
_weighted_bipartite_matching)
def graph_degree(A):
"""
Returns the degree for the nodes (rows) of a symmetric
graph in sparse CSR or CSC format, or a qobj.
Parameters
----------
A : qobj, csr_matrix, csc_matrix
Input quantum object or csr_matrix.
Returns
-------
degree : array
Array of integers giving the degree for each node (row).
"""
if not (sp.isspmatrix_csc(A) or sp.isspmatrix_csr(A)):
raise TypeError('Input must be CSC or CSR sparse matrix.')
return _node_degrees(A.indices, A.indptr, A.shape[0])
def breadth_first_search(A, start):
"""
Breadth-First-Search (BFS) of a graph in CSR or CSC matrix format starting
from a given node (row). Takes Qobjs and CSR or CSC matrices as inputs.
This function requires a matrix with symmetric structure.
Use A+trans(A) if original matrix is not symmetric or not sure.
Parameters
----------
A : csc_matrix, csr_matrix
Input graph in CSC or CSR matrix format
start : int
Staring node for BFS traversal.
Returns
-------
order : array
Order in which nodes are traversed from starting node.
levels : array
Level of the nodes in the order that they are traversed.
"""
if not (sp.isspmatrix_csc(A) or sp.isspmatrix_csr(A)):
raise TypeError('Input must be CSC or CSR sparse matrix.')
num_rows = A.shape[0]
start = int(start)
order, levels = _breadth_first_search(A.indices, A.indptr, num_rows, start)
# since maybe not all nodes are in search, check for unused entires in
# arrays
return order[order != -1], levels[levels != -1]
def column_permutation(A):
"""
Finds the non-symmetric column permutation of A such that the columns
are given in ascending order according to the number of nonzero entries.
This is sometimes useful for decreasing the fill-in of sparse LU
factorization.
Parameters
----------
A : csc_matrix
Input sparse CSC sparse matrix.
Returns
-------
perm : array
Array of permuted row and column indices.
"""
if not sp.isspmatrix_csc(A):
A = sp.csc_matrix(A)
count = np.diff(A.indptr)
perm = np.argsort(count)
return perm
def reverse_cuthill_mckee(A, sym=False):
"""
Returns the permutation array that orders a sparse CSR or CSC matrix
in Reverse-Cuthill McKee ordering. Since the input matrix must be
symmetric, this routine works on the matrix A+Trans(A) if the sym flag is
set to False (Default).
It is assumed by default (*sym=False*) that the input matrix is not
symmetric. This is because it is faster to do A+Trans(A) than it is to
check for symmetry for a generic matrix. If you are guaranteed that the
matrix is symmetric in structure (values of matrix element do not matter)
then set *sym=True*
Parameters
----------
A : csc_matrix, csr_matrix
Input sparse CSC or CSR sparse matrix format.
sym : bool {False, True}
Flag to set whether input matrix is symmetric.
Returns
-------
perm : array
Array of permuted row and column indices.
Notes
-----
This routine is used primarily for internal reordering of Lindblad
superoperators for use in iterative solver routines.
References
----------
<NAME> and <NAME>, "Reducing the Bandwidth of Sparse Symmetric
Matrices", ACM '69 Proceedings of the 1969 24th national conference,
(1969).
"""
if not (sp.isspmatrix_csc(A) or sp.isspmatrix_csr(A)):
raise TypeError('Input must be CSC or CSR sparse matrix.')
nrows = A.shape[0]
if not sym:
A = A + A.transpose()
return _reverse_cuthill_mckee(A.indices, A.indptr, nrows)
def maximum_bipartite_matching(A, perm_type='row'):
"""
Returns an array of row or column permutations that removes nonzero
elements from the diagonal of a nonsingular square CSC sparse matrix. Such
a permutation is always possible provided that the matrix is nonsingular.
This function looks at the structure of the matrix only.
The input matrix will be converted to CSC matrix format if
necessary.
Parameters
----------
A : sparse matrix
Input matrix
perm_type : str {'row', 'column'}
Type of permutation to generate.
Returns
-------
perm : array
Array of row or column permutations.
Notes
-----
This function relies on a maximum cardinality bipartite matching algorithm
based on a breadth-first search (BFS) of the underlying graph[1]_.
References
----------
<NAME>, <NAME>, and <NAME>, "Design, Implementation, and
Analysis of Maximum Transversal Algorithms", ACM Trans. Math. Softw.
38, no. 2, (2011).
"""
nrows = A.shape[0]
if A.shape[0] != A.shape[1]:
raise ValueError(
'Maximum bipartite matching requires a square matrix.')
if sp.isspmatrix_csr(A) or sp.isspmatrix_coo(A):
A = A.tocsc()
elif not sp.isspmatrix_csc(A):
raise TypeError("matrix must be in CSC, CSR, or COO format.")
if perm_type == 'column':
A = A.transpose().tocsc()
perm = _maximum_bipartite_matching(A.indices, A.indptr, nrows)
if np.any(perm == -1):
raise Exception('Possibly singular input matrix.')
return perm
def weighted_bipartite_matching(A, perm_type='row'):
"""
Returns an array of row permutations that attempts to maximize
the product of the ABS values of the diagonal elements in
a nonsingular square CSC sparse matrix. Such a permutation is
always possible provided that the matrix is nonsingular.
This function looks at both the structure and ABS values of the
underlying matrix.
Parameters
----------
A : csc_matrix
Input matrix
perm_type : str {'row', 'column'}
Type of permutation to generate.
Returns
-------
perm : array
Array of row or column permutations.
Notes
-----
This function uses a weighted maximum cardinality bipartite matching
algorithm based on breadth-first search (BFS). The columns are weighted
according to the element of max ABS value in the associated rows and
are traversed in descending order by weight. When performing the BFS
traversal, the row associated to a given column is the one with maximum
weight. Unlike other techniques[1]_, this algorithm does not guarantee the
product of the diagonal is maximized. However, this limitation is offset
by the substantially faster runtime of this method.
References
----------
<NAME> and <NAME>, "The design and use of algorithms for
permuting large entries to the diagonal of sparse matrices", SIAM J.
Matrix Anal. and Applics. 20, no. 4, 889 (1997).
"""
nrows = A.shape[0]
if A.shape[0] != A.shape[1]:
raise ValueError('weighted_bfs_matching requires a square matrix.')
if sp.isspmatrix_csr(A) or sp.isspmatrix_coo(A):
A = A.tocsc()
elif not sp.isspmatrix_csc(A):
raise TypeError("matrix must be in CSC, CSR, or COO format.")
if perm_type == 'column':
A = A.transpose().tocsc()
perm = _weighted_bipartite_matching(
np.asarray(np.abs(A.data), dtype=float),
A.indices, A.indptr, nrows)
if np.any(perm == -1):
raise Exception('Possibly singular input matrix.')
return perm
| # This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, <NAME> and <NAME>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
"""
This module contains a collection of graph theory routines used mainly
to reorder matrices for iterative steady state solvers.
"""
__all__ = ['graph_degree', 'column_permutation', 'breadth_first_search',
'reverse_cuthill_mckee', 'maximum_bipartite_matching',
'weighted_bipartite_matching']
import numpy as np
import scipy.sparse as sp
from qutip.cy.graph_utils import (
_breadth_first_search, _node_degrees,
_reverse_cuthill_mckee, _maximum_bipartite_matching,
_weighted_bipartite_matching)
def graph_degree(A):
"""
Returns the degree for the nodes (rows) of a symmetric
graph in sparse CSR or CSC format, or a qobj.
Parameters
----------
A : qobj, csr_matrix, csc_matrix
Input quantum object or csr_matrix.
Returns
-------
degree : array
Array of integers giving the degree for each node (row).
"""
if not (sp.isspmatrix_csc(A) or sp.isspmatrix_csr(A)):
raise TypeError('Input must be CSC or CSR sparse matrix.')
return _node_degrees(A.indices, A.indptr, A.shape[0])
def breadth_first_search(A, start):
"""
Breadth-First-Search (BFS) of a graph in CSR or CSC matrix format starting
from a given node (row). Takes Qobjs and CSR or CSC matrices as inputs.
This function requires a matrix with symmetric structure.
Use A+trans(A) if original matrix is not symmetric or not sure.
Parameters
----------
A : csc_matrix, csr_matrix
Input graph in CSC or CSR matrix format
start : int
Staring node for BFS traversal.
Returns
-------
order : array
Order in which nodes are traversed from starting node.
levels : array
Level of the nodes in the order that they are traversed.
"""
if not (sp.isspmatrix_csc(A) or sp.isspmatrix_csr(A)):
raise TypeError('Input must be CSC or CSR sparse matrix.')
num_rows = A.shape[0]
start = int(start)
order, levels = _breadth_first_search(A.indices, A.indptr, num_rows, start)
# since maybe not all nodes are in search, check for unused entires in
# arrays
return order[order != -1], levels[levels != -1]
def column_permutation(A):
"""
Finds the non-symmetric column permutation of A such that the columns
are given in ascending order according to the number of nonzero entries.
This is sometimes useful for decreasing the fill-in of sparse LU
factorization.
Parameters
----------
A : csc_matrix
Input sparse CSC sparse matrix.
Returns
-------
perm : array
Array of permuted row and column indices.
"""
if not sp.isspmatrix_csc(A):
A = sp.csc_matrix(A)
count = np.diff(A.indptr)
perm = np.argsort(count)
return perm
def reverse_cuthill_mckee(A, sym=False):
"""
Returns the permutation array that orders a sparse CSR or CSC matrix
in Reverse-Cuthill McKee ordering. Since the input matrix must be
symmetric, this routine works on the matrix A+Trans(A) if the sym flag is
set to False (Default).
It is assumed by default (*sym=False*) that the input matrix is not
symmetric. This is because it is faster to do A+Trans(A) than it is to
check for symmetry for a generic matrix. If you are guaranteed that the
matrix is symmetric in structure (values of matrix element do not matter)
then set *sym=True*
Parameters
----------
A : csc_matrix, csr_matrix
Input sparse CSC or CSR sparse matrix format.
sym : bool {False, True}
Flag to set whether input matrix is symmetric.
Returns
-------
perm : array
Array of permuted row and column indices.
Notes
-----
This routine is used primarily for internal reordering of Lindblad
superoperators for use in iterative solver routines.
References
----------
<NAME> and <NAME>, "Reducing the Bandwidth of Sparse Symmetric
Matrices", ACM '69 Proceedings of the 1969 24th national conference,
(1969).
"""
if not (sp.isspmatrix_csc(A) or sp.isspmatrix_csr(A)):
raise TypeError('Input must be CSC or CSR sparse matrix.')
nrows = A.shape[0]
if not sym:
A = A + A.transpose()
return _reverse_cuthill_mckee(A.indices, A.indptr, nrows)
def maximum_bipartite_matching(A, perm_type='row'):
"""
Returns an array of row or column permutations that removes nonzero
elements from the diagonal of a nonsingular square CSC sparse matrix. Such
a permutation is always possible provided that the matrix is nonsingular.
This function looks at the structure of the matrix only.
The input matrix will be converted to CSC matrix format if
necessary.
Parameters
----------
A : sparse matrix
Input matrix
perm_type : str {'row', 'column'}
Type of permutation to generate.
Returns
-------
perm : array
Array of row or column permutations.
Notes
-----
This function relies on a maximum cardinality bipartite matching algorithm
based on a breadth-first search (BFS) of the underlying graph[1]_.
References
----------
<NAME>, <NAME>, and <NAME>, "Design, Implementation, and
Analysis of Maximum Transversal Algorithms", ACM Trans. Math. Softw.
38, no. 2, (2011).
"""
nrows = A.shape[0]
if A.shape[0] != A.shape[1]:
raise ValueError(
'Maximum bipartite matching requires a square matrix.')
if sp.isspmatrix_csr(A) or sp.isspmatrix_coo(A):
A = A.tocsc()
elif not sp.isspmatrix_csc(A):
raise TypeError("matrix must be in CSC, CSR, or COO format.")
if perm_type == 'column':
A = A.transpose().tocsc()
perm = _maximum_bipartite_matching(A.indices, A.indptr, nrows)
if np.any(perm == -1):
raise Exception('Possibly singular input matrix.')
return perm
def weighted_bipartite_matching(A, perm_type='row'):
"""
Returns an array of row permutations that attempts to maximize
the product of the ABS values of the diagonal elements in
a nonsingular square CSC sparse matrix. Such a permutation is
always possible provided that the matrix is nonsingular.
This function looks at both the structure and ABS values of the
underlying matrix.
Parameters
----------
A : csc_matrix
Input matrix
perm_type : str {'row', 'column'}
Type of permutation to generate.
Returns
-------
perm : array
Array of row or column permutations.
Notes
-----
This function uses a weighted maximum cardinality bipartite matching
algorithm based on breadth-first search (BFS). The columns are weighted
according to the element of max ABS value in the associated rows and
are traversed in descending order by weight. When performing the BFS
traversal, the row associated to a given column is the one with maximum
weight. Unlike other techniques[1]_, this algorithm does not guarantee the
product of the diagonal is maximized. However, this limitation is offset
by the substantially faster runtime of this method.
References
----------
<NAME> and <NAME>, "The design and use of algorithms for
permuting large entries to the diagonal of sparse matrices", SIAM J.
Matrix Anal. and Applics. 20, no. 4, 889 (1997).
"""
nrows = A.shape[0]
if A.shape[0] != A.shape[1]:
raise ValueError('weighted_bfs_matching requires a square matrix.')
if sp.isspmatrix_csr(A) or sp.isspmatrix_coo(A):
A = A.tocsc()
elif not sp.isspmatrix_csc(A):
raise TypeError("matrix must be in CSC, CSR, or COO format.")
if perm_type == 'column':
A = A.transpose().tocsc()
perm = _weighted_bipartite_matching(
np.asarray(np.abs(A.data), dtype=float),
A.indices, A.indptr, nrows)
if np.any(perm == -1):
raise Exception('Possibly singular input matrix.')
return perm
| en | 0.732807 | # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, <NAME> and <NAME>. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### This module contains a collection of graph theory routines used mainly to reorder matrices for iterative steady state solvers. Returns the degree for the nodes (rows) of a symmetric graph in sparse CSR or CSC format, or a qobj. Parameters ---------- A : qobj, csr_matrix, csc_matrix Input quantum object or csr_matrix. Returns ------- degree : array Array of integers giving the degree for each node (row). Breadth-First-Search (BFS) of a graph in CSR or CSC matrix format starting from a given node (row). Takes Qobjs and CSR or CSC matrices as inputs. This function requires a matrix with symmetric structure. Use A+trans(A) if original matrix is not symmetric or not sure. Parameters ---------- A : csc_matrix, csr_matrix Input graph in CSC or CSR matrix format start : int Staring node for BFS traversal. Returns ------- order : array Order in which nodes are traversed from starting node. levels : array Level of the nodes in the order that they are traversed. # since maybe not all nodes are in search, check for unused entires in # arrays Finds the non-symmetric column permutation of A such that the columns are given in ascending order according to the number of nonzero entries. This is sometimes useful for decreasing the fill-in of sparse LU factorization. Parameters ---------- A : csc_matrix Input sparse CSC sparse matrix. Returns ------- perm : array Array of permuted row and column indices. Returns the permutation array that orders a sparse CSR or CSC matrix in Reverse-Cuthill McKee ordering. Since the input matrix must be symmetric, this routine works on the matrix A+Trans(A) if the sym flag is set to False (Default). It is assumed by default (*sym=False*) that the input matrix is not symmetric. This is because it is faster to do A+Trans(A) than it is to check for symmetry for a generic matrix. If you are guaranteed that the matrix is symmetric in structure (values of matrix element do not matter) then set *sym=True* Parameters ---------- A : csc_matrix, csr_matrix Input sparse CSC or CSR sparse matrix format. sym : bool {False, True} Flag to set whether input matrix is symmetric. Returns ------- perm : array Array of permuted row and column indices. Notes ----- This routine is used primarily for internal reordering of Lindblad superoperators for use in iterative solver routines. References ---------- <NAME> and <NAME>, "Reducing the Bandwidth of Sparse Symmetric Matrices", ACM '69 Proceedings of the 1969 24th national conference, (1969). Returns an array of row or column permutations that removes nonzero elements from the diagonal of a nonsingular square CSC sparse matrix. Such a permutation is always possible provided that the matrix is nonsingular. This function looks at the structure of the matrix only. The input matrix will be converted to CSC matrix format if necessary. Parameters ---------- A : sparse matrix Input matrix perm_type : str {'row', 'column'} Type of permutation to generate. Returns ------- perm : array Array of row or column permutations. Notes ----- This function relies on a maximum cardinality bipartite matching algorithm based on a breadth-first search (BFS) of the underlying graph[1]_. References ---------- <NAME>, <NAME>, and <NAME>, "Design, Implementation, and Analysis of Maximum Transversal Algorithms", ACM Trans. Math. Softw. 38, no. 2, (2011). Returns an array of row permutations that attempts to maximize the product of the ABS values of the diagonal elements in a nonsingular square CSC sparse matrix. Such a permutation is always possible provided that the matrix is nonsingular. This function looks at both the structure and ABS values of the underlying matrix. Parameters ---------- A : csc_matrix Input matrix perm_type : str {'row', 'column'} Type of permutation to generate. Returns ------- perm : array Array of row or column permutations. Notes ----- This function uses a weighted maximum cardinality bipartite matching algorithm based on breadth-first search (BFS). The columns are weighted according to the element of max ABS value in the associated rows and are traversed in descending order by weight. When performing the BFS traversal, the row associated to a given column is the one with maximum weight. Unlike other techniques[1]_, this algorithm does not guarantee the product of the diagonal is maximized. However, this limitation is offset by the substantially faster runtime of this method. References ---------- <NAME> and <NAME>, "The design and use of algorithms for permuting large entries to the diagonal of sparse matrices", SIAM J. Matrix Anal. and Applics. 20, no. 4, 889 (1997). | 1.41961 | 1 |
test/source_dir/comments_blank_lines_code.py | Pierre-Thibault/neo-insert-imports | 1 | 9557 | # comments------------------
def a(x):
print x
if True:
a(10) | # comments------------------
def a(x):
print x
if True:
a(10) | en | 0.300544 | # comments------------------ | 2.817643 | 3 |
locan/data/hulls/__init__.py | super-resolution/Locan | 8 | 9558 | <filename>locan/data/hulls/__init__.py
"""
Hull objects of localization data.
Submodules:
-----------
.. autosummary::
:toctree: ./
hull
alpha_shape
"""
from locan.data.hulls.alpha_shape import *
from locan.data.hulls.hull import *
__all__ = []
__all__.extend(hull.__all__)
__all__.extend(alpha_shape.__all__)
| <filename>locan/data/hulls/__init__.py
"""
Hull objects of localization data.
Submodules:
-----------
.. autosummary::
:toctree: ./
hull
alpha_shape
"""
from locan.data.hulls.alpha_shape import *
from locan.data.hulls.hull import *
__all__ = []
__all__.extend(hull.__all__)
__all__.extend(alpha_shape.__all__)
| en | 0.634993 | Hull objects of localization data. Submodules: ----------- .. autosummary:: :toctree: ./ hull alpha_shape | 1.362097 | 1 |
tests/test_workflow_build_combinations.py | tschoonj/cgat-daisy | 1 | 9559 | import pytest
from daisy.workflow import build_combinations
def test_one_option():
assert build_combinations(
{"option1": ["value1", "value2"]}) == \
[{'option1': 'value1'},
{'option1': 'value2'}]
def test_two_options():
assert build_combinations(
{'option1': ["value1", "value2"],
'option2': 'valueA'}) == \
[{'option2': 'valueA', 'option1': 'value1'},
{'option2': 'valueA', 'option1': 'value2'}]
def test_two_options():
assert build_combinations(
{'option1': ["value1", "value2"],
'option2': ["valueA", "valueB"]}) == \
[{'option1': 'value1', 'option2': 'valueA'},
{'option1': 'value1', 'option2': 'valueB'},
{'option1': 'value2', 'option2': 'valueA'},
{'option1': 'value2', 'option2': 'valueB'}]
def test_complex_values():
assert build_combinations(
{'option1': [{"value1": [1, 2, 3]},
{"value2": [4, 5, 6]}]}) == \
[{'option1': {'value1': [1, 2, 3]}},
{'option1': {'value2': [4, 5, 6]}}]
def test_groupby_design(tmp_path):
design_file = tmp_path / "design.tsv"
with open(design_file, "w") as outf:
outf.write("label\tc_option1\tc_option2\n")
outf.write("label1\tvalue1\tvalueA\n")
outf.write("label2\tvalue1\tvalueB\n")
outf.write("label3\tvalue2\tvalueA\n")
outf.write("label4\tvalue2\tvalueB\n")
assert build_combinations(
{"groupby": "file",
"label": "label",
"input": design_file,
"option1": "c_option1",
"option2": "c_option2"}) == \
[{'option1': 'value1', 'option2': 'valueA', "name": "label1"},
{'option1': 'value1', 'option2': 'valueB', "name": "label2"},
{'option1': 'value2', 'option2': 'valueA', "name": "label3"},
{'option1': 'value2', 'option2': 'valueB', "name": "label4"}]
def test_groupby_design_with_constant_option(tmp_path):
design_file = tmp_path / "design.tsv"
with open(design_file, "w") as outf:
outf.write("label\tc_option1\tc_option2\n")
outf.write("label1\tvalue1\tvalueA\n")
outf.write("label2\tvalue1\tvalueB\n")
outf.write("label3\tvalue2\tvalueA\n")
outf.write("label4\tvalue2\tvalueB\n")
assert build_combinations(
{"groupby": "file",
"label": "label",
"input": design_file,
"option1": "c_option1",
"option2": "c_option2",
"option3": "valueX"}) == \
[{'option1': 'value1', 'option2': 'valueA', "name": "label1", "option3": "valueX"},
{'option1': 'value1', 'option2': 'valueB', "name": "label2", "option3": "valueX"},
{'option1': 'value2', 'option2': 'valueA', "name": "label3", "option3": "valueX"},
{'option1': 'value2', 'option2': 'valueB', "name": "label4", "option3": "valueX"}]
def test_groupby_design_with_combinatorial_option(tmp_path):
design_file = tmp_path / "design.tsv"
with open(design_file, "w") as outf:
outf.write("label\tc_option1\tc_option2\n")
outf.write("label1\tvalue1\tvalueA\n")
outf.write("label2\tvalue1\tvalueB\n")
outf.write("label3\tvalue2\tvalueA\n")
outf.write("label4\tvalue2\tvalueB\n")
assert build_combinations(
{"groupby": "file",
"label": "label",
"input": design_file,
"option1": "c_option1",
"option2": "c_option2",
"option3": ["valueX", "valueY"]}) == \
[{'option1': 'value1', 'option2': 'valueA', "name": "label1", "option3": "valueX"},
{'option1': 'value1', 'option2': 'valueA', "name": "label1", "option3": "valueY"},
{'option1': 'value1', 'option2': 'valueB', "name": "label2", "option3": "valueX"},
{'option1': 'value1', 'option2': 'valueB', "name": "label2", "option3": "valueY"},
{'option1': 'value2', 'option2': 'valueA', "name": "label3", "option3": "valueX"},
{'option1': 'value2', 'option2': 'valueA', "name": "label3", "option3": "valueY"},
{'option1': 'value2', 'option2': 'valueB', "name": "label4", "option3": "valueX"},
{'option1': 'value2', 'option2': 'valueB', "name": "label4", "option3": "valueY"}]
def test_groupby_regex(tmp_path):
assert build_combinations(
{"groupby": "regex",
"files_a": ["{}/data_0.a".format(tmp_path),
"{}/data_1.a".format(tmp_path)],
"files_b": ["{}/data_0.b".format(tmp_path),
"{}/data_1.b".format(tmp_path)],
"files_a_regex": r"data_(\d+).a",
"files_b_regex": r"data_(\d+).b"}) == \
[{'files_a': "{}/data_0.a".format(tmp_path),
'files_b': "{}/data_0.b".format(tmp_path),
'name': "0"},
{'files_a': "{}/data_1.a".format(tmp_path),
'files_b': "{}/data_1.b".format(tmp_path),
'name': "1"}]
def test_groupby_regex_filters_when_data_point_missing(tmp_path):
assert build_combinations(
{"groupby": "regex",
"files_a": ["{}/data_0.a".format(tmp_path)],
"files_b": ["{}/data_0.b".format(tmp_path),
"{}/data_1.b".format(tmp_path)],
"files_a_regex": r"data_(\d+).a",
"files_b_regex": r"data_(\d+).b"}) == \
[{'files_a': "{}/data_0.a".format(tmp_path),
'files_b': "{}/data_0.b".format(tmp_path),
'name': "0"}]
def test_groupby_regex_with_constant(tmp_path):
assert build_combinations(
{"groupby": "regex",
"files_x": "x.y",
"files_a": ["{}/data_0.a".format(tmp_path),
"{}/data_1.a".format(tmp_path)],
"files_b": ["{}/data_0.b".format(tmp_path),
"{}/data_1.b".format(tmp_path)],
"files_a_regex": r"data_(\d+).a",
"files_b_regex": r"data_(\d+).b"}) == \
[
{'files_a': "{}/data_0.a".format(tmp_path),
'files_b': "{}/data_0.b".format(tmp_path),
'files_x': "x.y",
'name': "0"},
{'files_a': "{}/data_1.a".format(tmp_path),
'files_b': "{}/data_1.b".format(tmp_path),
'files_x': "x.y",
'name': "1"},
]
def test_groupby_regex_with_combinatorial_option(tmp_path):
assert build_combinations(
{"groupby": "regex",
"files_x": ["y.x", "z.x"],
"files_a": ["{}/data_0.a".format(tmp_path),
"{}/data_1.a".format(tmp_path)],
"files_b": ["{}/data_0.b".format(tmp_path),
"{}/data_1.b".format(tmp_path)],
"files_a_regex": r"data_(\d+).a",
"files_b_regex": r"data_(\d+).b"}) == \
[
{'files_a': "{}/data_0.a".format(tmp_path),
'files_b': "{}/data_0.b".format(tmp_path),
'files_x': "y.x",
'name': "0"},
{'files_a': "{}/data_0.a".format(tmp_path),
'files_b': "{}/data_0.b".format(tmp_path),
'files_x': "z.x",
'name': "0"},
{'files_a': "{}/data_1.a".format(tmp_path),
'files_b': "{}/data_1.b".format(tmp_path),
'files_x': "y.x",
'name': "1"},
{'files_a': "{}/data_1.a".format(tmp_path),
'files_b': "{}/data_1.b".format(tmp_path),
'files_x': "z.x",
'name': "1"},
]
def test_groupby_named_regex(tmp_path):
assert build_combinations(
{"groupby": "regex",
"files_a": ["{}/data_0.a".format(tmp_path),
"{}/data_1.a".format(tmp_path)],
"files_b": ["{}/data_0.b".format(tmp_path),
"{}/data_1.b".format(tmp_path)],
"files_a_regex": r"data_(?P<key1>\d+).a",
"files_b_regex": r"data_(?P<key1>\d+).b"}) == \
[{'files_a': "{}/data_0.a".format(tmp_path),
'files_b': "{}/data_0.b".format(tmp_path),
'name': "0"},
{'files_a': "{}/data_1.a".format(tmp_path),
'files_b': "{}/data_1.b".format(tmp_path),
'name': "1"}]
def test_groupby_named_regex_paired(tmp_path):
assert build_combinations(
{"groupby": "regex",
"files_a": ["{}/data_0_2.a".format(tmp_path),
"{}/data_0_3.a".format(tmp_path),
"{}/data_1_2.a".format(tmp_path),
"{}/data_1_3.a".format(tmp_path)],
"files_b": ["{}/data_0.b".format(tmp_path),
"{}/data_1.b".format(tmp_path)],
"files_a_regex": r"data_(?P<key1>\d+)_(?P<key2>\d+).a",
"files_b_regex": r"data_(?P<key1>\d+).b"}) == \
[{'files_a': "{}/data_0_2.a".format(tmp_path),
'files_b': "{}/data_0.b".format(tmp_path),
'name': "0_2"},
{'files_a': "{}/data_0_3.a".format(tmp_path),
'files_b': "{}/data_0.b".format(tmp_path),
'name': "0_3"},
{'files_a': "{}/data_1_2.a".format(tmp_path),
'files_b': "{}/data_1.b".format(tmp_path),
'name': "1_2"},
{'files_a': "{}/data_1_3.a".format(tmp_path),
'files_b': "{}/data_1.b".format(tmp_path),
'name': "1_3"}]
| import pytest
from daisy.workflow import build_combinations
def test_one_option():
assert build_combinations(
{"option1": ["value1", "value2"]}) == \
[{'option1': 'value1'},
{'option1': 'value2'}]
def test_two_options():
assert build_combinations(
{'option1': ["value1", "value2"],
'option2': 'valueA'}) == \
[{'option2': 'valueA', 'option1': 'value1'},
{'option2': 'valueA', 'option1': 'value2'}]
def test_two_options():
assert build_combinations(
{'option1': ["value1", "value2"],
'option2': ["valueA", "valueB"]}) == \
[{'option1': 'value1', 'option2': 'valueA'},
{'option1': 'value1', 'option2': 'valueB'},
{'option1': 'value2', 'option2': 'valueA'},
{'option1': 'value2', 'option2': 'valueB'}]
def test_complex_values():
assert build_combinations(
{'option1': [{"value1": [1, 2, 3]},
{"value2": [4, 5, 6]}]}) == \
[{'option1': {'value1': [1, 2, 3]}},
{'option1': {'value2': [4, 5, 6]}}]
def test_groupby_design(tmp_path):
design_file = tmp_path / "design.tsv"
with open(design_file, "w") as outf:
outf.write("label\tc_option1\tc_option2\n")
outf.write("label1\tvalue1\tvalueA\n")
outf.write("label2\tvalue1\tvalueB\n")
outf.write("label3\tvalue2\tvalueA\n")
outf.write("label4\tvalue2\tvalueB\n")
assert build_combinations(
{"groupby": "file",
"label": "label",
"input": design_file,
"option1": "c_option1",
"option2": "c_option2"}) == \
[{'option1': 'value1', 'option2': 'valueA', "name": "label1"},
{'option1': 'value1', 'option2': 'valueB', "name": "label2"},
{'option1': 'value2', 'option2': 'valueA', "name": "label3"},
{'option1': 'value2', 'option2': 'valueB', "name": "label4"}]
def test_groupby_design_with_constant_option(tmp_path):
design_file = tmp_path / "design.tsv"
with open(design_file, "w") as outf:
outf.write("label\tc_option1\tc_option2\n")
outf.write("label1\tvalue1\tvalueA\n")
outf.write("label2\tvalue1\tvalueB\n")
outf.write("label3\tvalue2\tvalueA\n")
outf.write("label4\tvalue2\tvalueB\n")
assert build_combinations(
{"groupby": "file",
"label": "label",
"input": design_file,
"option1": "c_option1",
"option2": "c_option2",
"option3": "valueX"}) == \
[{'option1': 'value1', 'option2': 'valueA', "name": "label1", "option3": "valueX"},
{'option1': 'value1', 'option2': 'valueB', "name": "label2", "option3": "valueX"},
{'option1': 'value2', 'option2': 'valueA', "name": "label3", "option3": "valueX"},
{'option1': 'value2', 'option2': 'valueB', "name": "label4", "option3": "valueX"}]
def test_groupby_design_with_combinatorial_option(tmp_path):
design_file = tmp_path / "design.tsv"
with open(design_file, "w") as outf:
outf.write("label\tc_option1\tc_option2\n")
outf.write("label1\tvalue1\tvalueA\n")
outf.write("label2\tvalue1\tvalueB\n")
outf.write("label3\tvalue2\tvalueA\n")
outf.write("label4\tvalue2\tvalueB\n")
assert build_combinations(
{"groupby": "file",
"label": "label",
"input": design_file,
"option1": "c_option1",
"option2": "c_option2",
"option3": ["valueX", "valueY"]}) == \
[{'option1': 'value1', 'option2': 'valueA', "name": "label1", "option3": "valueX"},
{'option1': 'value1', 'option2': 'valueA', "name": "label1", "option3": "valueY"},
{'option1': 'value1', 'option2': 'valueB', "name": "label2", "option3": "valueX"},
{'option1': 'value1', 'option2': 'valueB', "name": "label2", "option3": "valueY"},
{'option1': 'value2', 'option2': 'valueA', "name": "label3", "option3": "valueX"},
{'option1': 'value2', 'option2': 'valueA', "name": "label3", "option3": "valueY"},
{'option1': 'value2', 'option2': 'valueB', "name": "label4", "option3": "valueX"},
{'option1': 'value2', 'option2': 'valueB', "name": "label4", "option3": "valueY"}]
def test_groupby_regex(tmp_path):
assert build_combinations(
{"groupby": "regex",
"files_a": ["{}/data_0.a".format(tmp_path),
"{}/data_1.a".format(tmp_path)],
"files_b": ["{}/data_0.b".format(tmp_path),
"{}/data_1.b".format(tmp_path)],
"files_a_regex": r"data_(\d+).a",
"files_b_regex": r"data_(\d+).b"}) == \
[{'files_a': "{}/data_0.a".format(tmp_path),
'files_b': "{}/data_0.b".format(tmp_path),
'name': "0"},
{'files_a': "{}/data_1.a".format(tmp_path),
'files_b': "{}/data_1.b".format(tmp_path),
'name': "1"}]
def test_groupby_regex_filters_when_data_point_missing(tmp_path):
assert build_combinations(
{"groupby": "regex",
"files_a": ["{}/data_0.a".format(tmp_path)],
"files_b": ["{}/data_0.b".format(tmp_path),
"{}/data_1.b".format(tmp_path)],
"files_a_regex": r"data_(\d+).a",
"files_b_regex": r"data_(\d+).b"}) == \
[{'files_a': "{}/data_0.a".format(tmp_path),
'files_b': "{}/data_0.b".format(tmp_path),
'name': "0"}]
def test_groupby_regex_with_constant(tmp_path):
assert build_combinations(
{"groupby": "regex",
"files_x": "x.y",
"files_a": ["{}/data_0.a".format(tmp_path),
"{}/data_1.a".format(tmp_path)],
"files_b": ["{}/data_0.b".format(tmp_path),
"{}/data_1.b".format(tmp_path)],
"files_a_regex": r"data_(\d+).a",
"files_b_regex": r"data_(\d+).b"}) == \
[
{'files_a': "{}/data_0.a".format(tmp_path),
'files_b': "{}/data_0.b".format(tmp_path),
'files_x': "x.y",
'name': "0"},
{'files_a': "{}/data_1.a".format(tmp_path),
'files_b': "{}/data_1.b".format(tmp_path),
'files_x': "x.y",
'name': "1"},
]
def test_groupby_regex_with_combinatorial_option(tmp_path):
assert build_combinations(
{"groupby": "regex",
"files_x": ["y.x", "z.x"],
"files_a": ["{}/data_0.a".format(tmp_path),
"{}/data_1.a".format(tmp_path)],
"files_b": ["{}/data_0.b".format(tmp_path),
"{}/data_1.b".format(tmp_path)],
"files_a_regex": r"data_(\d+).a",
"files_b_regex": r"data_(\d+).b"}) == \
[
{'files_a': "{}/data_0.a".format(tmp_path),
'files_b': "{}/data_0.b".format(tmp_path),
'files_x': "y.x",
'name': "0"},
{'files_a': "{}/data_0.a".format(tmp_path),
'files_b': "{}/data_0.b".format(tmp_path),
'files_x': "z.x",
'name': "0"},
{'files_a': "{}/data_1.a".format(tmp_path),
'files_b': "{}/data_1.b".format(tmp_path),
'files_x': "y.x",
'name': "1"},
{'files_a': "{}/data_1.a".format(tmp_path),
'files_b': "{}/data_1.b".format(tmp_path),
'files_x': "z.x",
'name': "1"},
]
def test_groupby_named_regex(tmp_path):
assert build_combinations(
{"groupby": "regex",
"files_a": ["{}/data_0.a".format(tmp_path),
"{}/data_1.a".format(tmp_path)],
"files_b": ["{}/data_0.b".format(tmp_path),
"{}/data_1.b".format(tmp_path)],
"files_a_regex": r"data_(?P<key1>\d+).a",
"files_b_regex": r"data_(?P<key1>\d+).b"}) == \
[{'files_a': "{}/data_0.a".format(tmp_path),
'files_b': "{}/data_0.b".format(tmp_path),
'name': "0"},
{'files_a': "{}/data_1.a".format(tmp_path),
'files_b': "{}/data_1.b".format(tmp_path),
'name': "1"}]
def test_groupby_named_regex_paired(tmp_path):
assert build_combinations(
{"groupby": "regex",
"files_a": ["{}/data_0_2.a".format(tmp_path),
"{}/data_0_3.a".format(tmp_path),
"{}/data_1_2.a".format(tmp_path),
"{}/data_1_3.a".format(tmp_path)],
"files_b": ["{}/data_0.b".format(tmp_path),
"{}/data_1.b".format(tmp_path)],
"files_a_regex": r"data_(?P<key1>\d+)_(?P<key2>\d+).a",
"files_b_regex": r"data_(?P<key1>\d+).b"}) == \
[{'files_a': "{}/data_0_2.a".format(tmp_path),
'files_b': "{}/data_0.b".format(tmp_path),
'name': "0_2"},
{'files_a': "{}/data_0_3.a".format(tmp_path),
'files_b': "{}/data_0.b".format(tmp_path),
'name': "0_3"},
{'files_a': "{}/data_1_2.a".format(tmp_path),
'files_b': "{}/data_1.b".format(tmp_path),
'name': "1_2"},
{'files_a': "{}/data_1_3.a".format(tmp_path),
'files_b': "{}/data_1.b".format(tmp_path),
'name': "1_3"}]
| none | 1 | 2.541732 | 3 |
|
src/train_vae.py | katnoria/world-models | 0 | 9560 | <reponame>katnoria/world-models<gh_stars>0
# class Encoder:
# pass
# class Decoder:
# pass
# class VariationAutoEncoder:
# pass
import os
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
import pickle
import logging
from glob import glob
import numpy as np
from time import time
from datetime import datetime
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow import keras
if not os.path.exists("logs"):
os.makedirs("logs")
today = datetime.now().strftime('%Y%m%d')
logger = logging.getLogger('worldmodels')
logger.setLevel(logging.DEBUG)
# Create logger
logger = logging.getLogger("worldmodels")
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
logger.setLevel(logging.DEBUG)
# Uncomment to enable console logger
streamhandler = logging.StreamHandler()
streamhandler.setFormatter(formatter)
streamhandler.setLevel(logging.DEBUG)
logger.addHandler(streamhandler)
filehandler = logging.FileHandler(filename='logs/dataset.{}.log'.format(today))
filehandler.setFormatter(formatter)
filehandler.setLevel(logging.DEBUG)
logger.addHandler(filehandler)
AUTOTUNE = tf.data.experimental.AUTOTUNE
def load_preprocess_image(fname, resize_to=[64,64]):
image = tf.io.read_file(fname)
image = tf.image.decode_jpeg(image, channels=3)
# image = tf.image.resize(image, [64, 64])
image = tf.image.resize(image, resize_to)
image /= 255.0
return image
INPUT_SHAPE = (64,64,3)
# INPUT_SHAPE = (128,128,3)
LATENT_DIM = 32
encoder_input = keras.Input(shape=(INPUT_SHAPE), name='encoder_input_image')
x = keras.layers.Conv2D(32, 4, strides=(2,2), activation='relu', name='conv-1')(encoder_input)
x = keras.layers.Conv2D(64, 4, strides=(2,2), activation='relu', name='conv-2')(x)
x = keras.layers.Conv2D(128, 4, strides=(2,2), activation='relu', name='conv-3')(x)
x = keras.layers.Conv2D(256, 4, strides=(2,2), activation='relu', name='conv-4')(x)
# x = keras.layers.Conv2D(512, 4, strides=(2,2), activation='relu', name='conv-5')(x)
encoder_last_conv_shape = K.int_shape(x)[1:]
logger.info("encoder_last_conv_shape: {}".format(encoder_last_conv_shape))
x = keras.layers.Flatten()(x)
mu = keras.layers.Dense(LATENT_DIM, activation='linear', name="mean")(x)
logvar = keras.layers.Dense(LATENT_DIM, activation='linear', name="variance")(x)
encoder = keras.Model(encoder_input, [mu, logvar], name='encoder')
encoder.summary()
def sample(args):
mean, logvar = args
# reparameterizaton trick: allows gradients to pass through the sample
# 1. sample from unit gaussian, then
# 2. multiply it with standard deviation and add mean
e = tf.random.normal(shape=(K.shape(mean)[0], LATENT_DIM))
return e * tf.math.exp(logvar) + mean
sampled_latent_vector = keras.layers.Lambda(sample)([mu, logvar])
decoder_input = keras.layers.Input(shape=K.int_shape(sampled_latent_vector)[1:], name='decoder_input')
x = keras.layers.Dense(np.prod(encoder_last_conv_shape))(decoder_input)
x = keras.layers.Reshape((1,1,np.prod(encoder_last_conv_shape)))(x)
x = keras.layers.Conv2DTranspose(128, kernel_size=5, strides=(2,2), activation='relu')(x)
x = keras.layers.Conv2DTranspose(64, kernel_size=5, strides=(2,2), activation='relu')(x)
x = keras.layers.Conv2DTranspose(32, kernel_size=6, strides=(2,2), activation='relu')(x)
# x = keras.layers.Conv2DTranspose(32, kernel_size=4, strides=(2,2), activation='relu')(x)
decoder_output = keras.layers.Conv2DTranspose(3, kernel_size=6, strides=(2,2))(x)
decoder = keras.Model(decoder_input, decoder_output, name='decoder')
decoder.summary()
# Taken from tensorflow VAE example
def log_normal_pdf(sample, mean, logvar):
log2pi = tf.math.log(2. * np.pi)
return tf.reduce_sum(
-.5 * ((sample - mean) ** 2. * tf.exp(-logvar) + logvar + log2pi), axis=1)
@tf.function
def calculate_loss(mean, logvar, labels, decoded_logits):
xent_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=decoded_logits)
z = sample([mean, logvar])
logpx_z = -tf.reduce_sum(xent_loss, axis=[1,2,3])
logpz = log_normal_pdf(z, 0., 0.)
logqz_x = log_normal_pdf(z, mean, logvar)
loss = -tf.reduce_mean(logpx_z + logpz - logqz_x)
return loss
class VAE(keras.Model):
def __init__(self, encoder, decoder):
super(VAE, self).__init__()
self.encoder = encoder
self.decoder = decoder
def train_vars(self):
return self.encoder.trainable_variables + self.decoder.trainable_variables
def encode(self, x):
encoded = self.encoder(x)
return encoded
def decode(self, z, apply_sigmoid=False):
logits = self.decoder(z)
if apply_sigmoid:
return tf.sigmoid(logits)
return logits
@tf.function
def train_step(train_x, model, optimizer):
with tf.GradientTape() as tape:
# use training inputs to approximate the posterior
mean, logvar = model.encode(train_x)
# sample latent vector from the learned mean and variance
latent_z = sample([mean, logvar])
# decode z
decoded_logits = model.decode(latent_z)
# calculate loss
loss = calculate_loss(mean, logvar, labels=train_x, decoded_logits=decoded_logits)
# calculate gradients
gradients = tape.gradient(loss, model.trainable_variables)
# apply gradients
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return loss
def train(fnames, output_dirname="output", epochs=600, save_every_pct=0.3, print_every_pct=0.05):
logger.info('Total files: {}'.format(len(fnames)))
path_ds = tf.data.Dataset.from_tensor_slices(fnames)
image_ds = path_ds.map(load_preprocess_image, num_parallel_calls=AUTOTUNE)
# Dataset
BATCH_SIZE = 64
SHUFFLE_BUFFER_SIZE = len(fnames)
train_dataset = image_ds \
.shuffle(SHUFFLE_BUFFER_SIZE) \
.repeat() \
.batch(BATCH_SIZE) \
.prefetch(buffer_size=AUTOTUNE)
if not os.path.exists(output_dirname):
os.makedirs('{}/ckpt'.format(output_dirname))
os.makedirs('{}/imgs'.format(output_dirname))
# Number of training epochs
# EPOCHS = 600
logger.info('Training epochs: {}'.format(epochs))
# Initialize the Variational Autoencoder model
model = VAE(encoder, decoder)
# Define optimizer
optimizer = keras.optimizers.Adam(1e-4)
# keep track of losses
losses = []
# How often to print the loss
print_every = max(int(print_every_pct * epochs), 1)
# Model Checkpoint
# Save model and optimizer
ckpt = tf.train.Checkpoint(optimizer=optimizer, model=model)
# Set save path and how many checkpoints to save
checkpoint_path = '{}/ckpt/'.format(output_dirname)
logger.info('Checkpoints will be stored at {}'.format(checkpoint_path))
manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=2)
# Load the latest checkpoint and restore
latest_ckpt = manager.latest_checkpoint
ckpt.restore(latest_ckpt)
if latest_ckpt:
logger.info('Restored from {}'.format(latest_ckpt))
else:
logger.info('Training from scratch')
# How often to save the checkpoint
save_every = max(int(save_every_pct * epochs), 1)
# We are now ready to start the training loop
elapsed_loop_time = time()
for epoch in range(0, epochs):
for train_x in train_dataset:
loss = train_step(train_x, model, optimizer)
losses.append(loss)
if epoch % print_every == 0:
now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
logger.info('{}:Epoch {}/{}: train loss {} in {} seconds'.format(epoch, epochs, losses[-1], time()-elapsed_loop_time))
elapsed_loop_time = time()
if epoch % save_every == 0:
save_path = manager.save()
logger.info('Saved checkpoint for step {}:{}'.format(epoch, save_path))
# Final Save
save_path = manager.save()
logger.info('Saved checkpoint for step {}'.format(save_path))
if __name__ == "__main__":
# Toons
# fnames = glob('{}/*.png'.format("/mnt/bigdrive/datasets/cartoonset/cartoonset10k/"))
# train(fnames, output_dirname="toons128")
# Car racing
fnames = glob('{}/*.png'.format("/mnt/bigdrive/projects/public_repos/world-models/src/imgs/"))
train(fnames, output_dirname="car_racing")
| # class Encoder:
# pass
# class Decoder:
# pass
# class VariationAutoEncoder:
# pass
import os
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
import pickle
import logging
from glob import glob
import numpy as np
from time import time
from datetime import datetime
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow import keras
if not os.path.exists("logs"):
os.makedirs("logs")
today = datetime.now().strftime('%Y%m%d')
logger = logging.getLogger('worldmodels')
logger.setLevel(logging.DEBUG)
# Create logger
logger = logging.getLogger("worldmodels")
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
logger.setLevel(logging.DEBUG)
# Uncomment to enable console logger
streamhandler = logging.StreamHandler()
streamhandler.setFormatter(formatter)
streamhandler.setLevel(logging.DEBUG)
logger.addHandler(streamhandler)
filehandler = logging.FileHandler(filename='logs/dataset.{}.log'.format(today))
filehandler.setFormatter(formatter)
filehandler.setLevel(logging.DEBUG)
logger.addHandler(filehandler)
AUTOTUNE = tf.data.experimental.AUTOTUNE
def load_preprocess_image(fname, resize_to=[64,64]):
image = tf.io.read_file(fname)
image = tf.image.decode_jpeg(image, channels=3)
# image = tf.image.resize(image, [64, 64])
image = tf.image.resize(image, resize_to)
image /= 255.0
return image
INPUT_SHAPE = (64,64,3)
# INPUT_SHAPE = (128,128,3)
LATENT_DIM = 32
encoder_input = keras.Input(shape=(INPUT_SHAPE), name='encoder_input_image')
x = keras.layers.Conv2D(32, 4, strides=(2,2), activation='relu', name='conv-1')(encoder_input)
x = keras.layers.Conv2D(64, 4, strides=(2,2), activation='relu', name='conv-2')(x)
x = keras.layers.Conv2D(128, 4, strides=(2,2), activation='relu', name='conv-3')(x)
x = keras.layers.Conv2D(256, 4, strides=(2,2), activation='relu', name='conv-4')(x)
# x = keras.layers.Conv2D(512, 4, strides=(2,2), activation='relu', name='conv-5')(x)
encoder_last_conv_shape = K.int_shape(x)[1:]
logger.info("encoder_last_conv_shape: {}".format(encoder_last_conv_shape))
x = keras.layers.Flatten()(x)
mu = keras.layers.Dense(LATENT_DIM, activation='linear', name="mean")(x)
logvar = keras.layers.Dense(LATENT_DIM, activation='linear', name="variance")(x)
encoder = keras.Model(encoder_input, [mu, logvar], name='encoder')
encoder.summary()
def sample(args):
mean, logvar = args
# reparameterizaton trick: allows gradients to pass through the sample
# 1. sample from unit gaussian, then
# 2. multiply it with standard deviation and add mean
e = tf.random.normal(shape=(K.shape(mean)[0], LATENT_DIM))
return e * tf.math.exp(logvar) + mean
sampled_latent_vector = keras.layers.Lambda(sample)([mu, logvar])
decoder_input = keras.layers.Input(shape=K.int_shape(sampled_latent_vector)[1:], name='decoder_input')
x = keras.layers.Dense(np.prod(encoder_last_conv_shape))(decoder_input)
x = keras.layers.Reshape((1,1,np.prod(encoder_last_conv_shape)))(x)
x = keras.layers.Conv2DTranspose(128, kernel_size=5, strides=(2,2), activation='relu')(x)
x = keras.layers.Conv2DTranspose(64, kernel_size=5, strides=(2,2), activation='relu')(x)
x = keras.layers.Conv2DTranspose(32, kernel_size=6, strides=(2,2), activation='relu')(x)
# x = keras.layers.Conv2DTranspose(32, kernel_size=4, strides=(2,2), activation='relu')(x)
decoder_output = keras.layers.Conv2DTranspose(3, kernel_size=6, strides=(2,2))(x)
decoder = keras.Model(decoder_input, decoder_output, name='decoder')
decoder.summary()
# Taken from tensorflow VAE example
def log_normal_pdf(sample, mean, logvar):
log2pi = tf.math.log(2. * np.pi)
return tf.reduce_sum(
-.5 * ((sample - mean) ** 2. * tf.exp(-logvar) + logvar + log2pi), axis=1)
@tf.function
def calculate_loss(mean, logvar, labels, decoded_logits):
xent_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=decoded_logits)
z = sample([mean, logvar])
logpx_z = -tf.reduce_sum(xent_loss, axis=[1,2,3])
logpz = log_normal_pdf(z, 0., 0.)
logqz_x = log_normal_pdf(z, mean, logvar)
loss = -tf.reduce_mean(logpx_z + logpz - logqz_x)
return loss
class VAE(keras.Model):
def __init__(self, encoder, decoder):
super(VAE, self).__init__()
self.encoder = encoder
self.decoder = decoder
def train_vars(self):
return self.encoder.trainable_variables + self.decoder.trainable_variables
def encode(self, x):
encoded = self.encoder(x)
return encoded
def decode(self, z, apply_sigmoid=False):
logits = self.decoder(z)
if apply_sigmoid:
return tf.sigmoid(logits)
return logits
@tf.function
def train_step(train_x, model, optimizer):
with tf.GradientTape() as tape:
# use training inputs to approximate the posterior
mean, logvar = model.encode(train_x)
# sample latent vector from the learned mean and variance
latent_z = sample([mean, logvar])
# decode z
decoded_logits = model.decode(latent_z)
# calculate loss
loss = calculate_loss(mean, logvar, labels=train_x, decoded_logits=decoded_logits)
# calculate gradients
gradients = tape.gradient(loss, model.trainable_variables)
# apply gradients
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return loss
def train(fnames, output_dirname="output", epochs=600, save_every_pct=0.3, print_every_pct=0.05):
logger.info('Total files: {}'.format(len(fnames)))
path_ds = tf.data.Dataset.from_tensor_slices(fnames)
image_ds = path_ds.map(load_preprocess_image, num_parallel_calls=AUTOTUNE)
# Dataset
BATCH_SIZE = 64
SHUFFLE_BUFFER_SIZE = len(fnames)
train_dataset = image_ds \
.shuffle(SHUFFLE_BUFFER_SIZE) \
.repeat() \
.batch(BATCH_SIZE) \
.prefetch(buffer_size=AUTOTUNE)
if not os.path.exists(output_dirname):
os.makedirs('{}/ckpt'.format(output_dirname))
os.makedirs('{}/imgs'.format(output_dirname))
# Number of training epochs
# EPOCHS = 600
logger.info('Training epochs: {}'.format(epochs))
# Initialize the Variational Autoencoder model
model = VAE(encoder, decoder)
# Define optimizer
optimizer = keras.optimizers.Adam(1e-4)
# keep track of losses
losses = []
# How often to print the loss
print_every = max(int(print_every_pct * epochs), 1)
# Model Checkpoint
# Save model and optimizer
ckpt = tf.train.Checkpoint(optimizer=optimizer, model=model)
# Set save path and how many checkpoints to save
checkpoint_path = '{}/ckpt/'.format(output_dirname)
logger.info('Checkpoints will be stored at {}'.format(checkpoint_path))
manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=2)
# Load the latest checkpoint and restore
latest_ckpt = manager.latest_checkpoint
ckpt.restore(latest_ckpt)
if latest_ckpt:
logger.info('Restored from {}'.format(latest_ckpt))
else:
logger.info('Training from scratch')
# How often to save the checkpoint
save_every = max(int(save_every_pct * epochs), 1)
# We are now ready to start the training loop
elapsed_loop_time = time()
for epoch in range(0, epochs):
for train_x in train_dataset:
loss = train_step(train_x, model, optimizer)
losses.append(loss)
if epoch % print_every == 0:
now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
logger.info('{}:Epoch {}/{}: train loss {} in {} seconds'.format(epoch, epochs, losses[-1], time()-elapsed_loop_time))
elapsed_loop_time = time()
if epoch % save_every == 0:
save_path = manager.save()
logger.info('Saved checkpoint for step {}:{}'.format(epoch, save_path))
# Final Save
save_path = manager.save()
logger.info('Saved checkpoint for step {}'.format(save_path))
if __name__ == "__main__":
# Toons
# fnames = glob('{}/*.png'.format("/mnt/bigdrive/datasets/cartoonset/cartoonset10k/"))
# train(fnames, output_dirname="toons128")
# Car racing
fnames = glob('{}/*.png'.format("/mnt/bigdrive/projects/public_repos/world-models/src/imgs/"))
train(fnames, output_dirname="car_racing") | en | 0.712866 | # class Encoder: # pass # class Decoder: # pass # class VariationAutoEncoder: # pass # Create logger # Uncomment to enable console logger # image = tf.image.resize(image, [64, 64]) # INPUT_SHAPE = (128,128,3) # x = keras.layers.Conv2D(512, 4, strides=(2,2), activation='relu', name='conv-5')(x) # reparameterizaton trick: allows gradients to pass through the sample # 1. sample from unit gaussian, then # 2. multiply it with standard deviation and add mean # x = keras.layers.Conv2DTranspose(32, kernel_size=4, strides=(2,2), activation='relu')(x) # Taken from tensorflow VAE example # use training inputs to approximate the posterior # sample latent vector from the learned mean and variance # decode z # calculate loss # calculate gradients # apply gradients # Dataset # Number of training epochs # EPOCHS = 600 # Initialize the Variational Autoencoder model # Define optimizer # keep track of losses # How often to print the loss # Model Checkpoint # Save model and optimizer # Set save path and how many checkpoints to save # Load the latest checkpoint and restore # How often to save the checkpoint # We are now ready to start the training loop # Final Save # Toons # fnames = glob('{}/*.png'.format("/mnt/bigdrive/datasets/cartoonset/cartoonset10k/")) # train(fnames, output_dirname="toons128") # Car racing | 2.096383 | 2 |
login.py | harryzcy/canvas-file-syncer | 0 | 9561 | import time
from config import get_password, get_username
from playwright.sync_api import Page
def login(page: Page, url: str, landing_url: str):
raise RuntimeError("default login not supported")
def login_kenan_flagler(page: Page, url: str, landing_url: str) -> None:
page.goto(url)
page.wait_for_load_state('load')
if page.url.startswith(landing_url):
return
with page.expect_navigation():
page.locator("text=ONYEN Login").click()
time.sleep(0.5)
page.locator("input[type=email]").fill(get_username())
with page.expect_navigation():
page.locator("input[type=submit]").click()
time.sleep(1)
page.locator("input[type=password]").fill(get_password())
with page.expect_navigation():
page.click('input[type=submit]')
if page.url.endswith("/login"):
# 2-factor auth
page.locator("div[role=\"button\"]:has-text(\"Text\")").click()
print("Enter code: ", end="")
code = input()
code = code.strip()
page.locator("[aria-label=\"Code\"]").fill(code)
with page.expect_navigation():
page.locator("text=Verify").click()
page.locator("[aria-label=\"Don\\'t\\ show\\ this\\ again\"]").check()
page.locator("text=Yes").click()
time.sleep(0.5)
assert page.url.startswith(landing_url)
| import time
from config import get_password, get_username
from playwright.sync_api import Page
def login(page: Page, url: str, landing_url: str):
raise RuntimeError("default login not supported")
def login_kenan_flagler(page: Page, url: str, landing_url: str) -> None:
page.goto(url)
page.wait_for_load_state('load')
if page.url.startswith(landing_url):
return
with page.expect_navigation():
page.locator("text=ONYEN Login").click()
time.sleep(0.5)
page.locator("input[type=email]").fill(get_username())
with page.expect_navigation():
page.locator("input[type=submit]").click()
time.sleep(1)
page.locator("input[type=password]").fill(get_password())
with page.expect_navigation():
page.click('input[type=submit]')
if page.url.endswith("/login"):
# 2-factor auth
page.locator("div[role=\"button\"]:has-text(\"Text\")").click()
print("Enter code: ", end="")
code = input()
code = code.strip()
page.locator("[aria-label=\"Code\"]").fill(code)
with page.expect_navigation():
page.locator("text=Verify").click()
page.locator("[aria-label=\"Don\\'t\\ show\\ this\\ again\"]").check()
page.locator("text=Yes").click()
time.sleep(0.5)
assert page.url.startswith(landing_url)
| en | 0.825903 | # 2-factor auth | 2.666284 | 3 |
multitidal/client_lib.py | xa4a/multitidal | 2 | 9562 | import asyncio
import json
import os
import pty
import shutil
import sys
import tty
import termios
import time
import threading
import tornado.iostream
from tornado.ioloop import IOLoop
from tornado.websocket import websocket_connect
ioloop = tornado.ioloop.IOLoop.instance()
SSH_LOGIN = "root"
SSH_PASSWORD = "<PASSWORD>"
SCREEN_TO_SCREEN_0_SEQ = b"ls -l\r\x1bOC" + b"\x010" # ^A 0
async def send_stdin_to_ws_task(ws, on_finish_cb):
print("mangling terminal")
try:
fn = os.dup(sys.stdin.fileno())
inp = tornado.iostream.PipeIOStream(fn)
mode = termios.tcgetattr(sys.stdin.fileno())
tty.setraw(fn)
while True:
try:
print("reading stdin", end="\r\n")
content = await inp.read_bytes(100, partial=True)
print("read stdin", end="\r\n")
# content = await self.inp.read_bytes(100, partial=True)
except tornado.iostream.StreamClosedError:
print("Stdin closed", end="\r\n")
# await self.finish()
ioloop.add_callback(on_finish_cb)
break
print(f"stdin: {content}", end="\r\n")
if content[0] == 3 or not content: # CTRL-C
print("Got a ^C", end="\r\n")
ioloop.add_callback(on_finish_cb)
break
ioloop.add_callback(
ws.write_message,
json.dumps(
{
"client_command": "keystrokes",
"keystrokes": [int(x) for x in content],
}
),
)
print("no exc", end="\r\n")
except asyncio.CancelledError:
print("stdin read task cancelled", end="\r\n")
except Exception as e: # pylint: disable=broad-except
print(f"Exception: {e}")
finally:
inp.close()
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, mode)
print("finally")
async def run_ssh(host, port, login=SSH_LOGIN, password=SSH_PASSWORD):
os.environ["SSHPASS"] = password
ssh_cmd = [
"ssh",
"-o",
"PreferredAuthentications=password",
"-o",
"PubkeyAuthentication=no",
"-o",
"StrictHostKeyChecking=no", # Skip fingerpint warning.
f"{login}@{host}",
"-p",
str(port),
]
sshpass_cmd = [shutil.which("sshpass"), "-e"] + ssh_cmd
args = sshpass_cmd
print(" ".join(args))
e = threading.Event()
def stdin_read(fd):
if not e.is_set():
e.set()
return SCREEN_TO_SCREEN_0_SEQ + os.read(fd, 1024)
b = os.read(fd, 1024)
return b
def master_read(fd):
b = os.read(fd, 1024)
return b
# Let Web UI connect to screen 0 first.
time.sleep(3)
res = pty.spawn(args, master_read=master_read, stdin_read=stdin_read)
print(f"ssh returned {res}")
class Client:
mode: str
def __init__(self, url, timeout):
self.url = url
self.timeout = timeout
self.ioloop = IOLoop.instance()
self.ws = None
self.send_stdin_task = None
async def connect(self):
print("trying to connect")
try:
self.ws = await websocket_connect(self.url)
except Exception as e: # pylint: disable=broad-except
print(f"connection error: {str(e)}")
else:
print("connected")
# await self.ws.write_message({'client': self.i})
self.mode = "idle"
self.ioloop.spawn_callback(self.run_idle)
self.ioloop.spawn_callback(self.run)
def finish_ws(self):
if self.ws:
self.ws.close()
self.ws = None
async def finish(self):
if self.send_stdin_task:
await self.stop_idle()
self.finish_ws()
self.ioloop.stop()
async def run_idle(self):
assert not self.send_stdin_task
print("running idle, spawning task")
self.send_stdin_task = asyncio.create_task(
send_stdin_to_ws_task(self.ws, self.finish)
)
async def stop_idle(self):
assert self.send_stdin_task
self.send_stdin_task.cancel()
await self.send_stdin_task
self.send_stdin_task = None
@staticmethod
async def run_ssh(host, port):
# Blocks ioloop
await run_ssh(host, port)
async def run(self):
while True:
msg = await self.ws.read_message()
if msg is None:
print("server left, terminating", end="\r\n")
self.ioloop.add_callback(self.finish)
return
msg = json.loads(msg)
print(f"got msg: {msg}", end="\r\n")
if "mode" not in msg:
continue
if msg["mode"] == "ssh":
host, port = msg["ssh"]["host"], msg["ssh"]["port"]
print(f"Connecting to ssh {host}:{port}...", end="\r\n")
await self.stop_idle()
await self.run_ssh(host, port)
print("restarting idle task")
self.finish_ws()
await self.connect()
break
| import asyncio
import json
import os
import pty
import shutil
import sys
import tty
import termios
import time
import threading
import tornado.iostream
from tornado.ioloop import IOLoop
from tornado.websocket import websocket_connect
ioloop = tornado.ioloop.IOLoop.instance()
SSH_LOGIN = "root"
SSH_PASSWORD = "<PASSWORD>"
SCREEN_TO_SCREEN_0_SEQ = b"ls -l\r\x1bOC" + b"\x010" # ^A 0
async def send_stdin_to_ws_task(ws, on_finish_cb):
print("mangling terminal")
try:
fn = os.dup(sys.stdin.fileno())
inp = tornado.iostream.PipeIOStream(fn)
mode = termios.tcgetattr(sys.stdin.fileno())
tty.setraw(fn)
while True:
try:
print("reading stdin", end="\r\n")
content = await inp.read_bytes(100, partial=True)
print("read stdin", end="\r\n")
# content = await self.inp.read_bytes(100, partial=True)
except tornado.iostream.StreamClosedError:
print("Stdin closed", end="\r\n")
# await self.finish()
ioloop.add_callback(on_finish_cb)
break
print(f"stdin: {content}", end="\r\n")
if content[0] == 3 or not content: # CTRL-C
print("Got a ^C", end="\r\n")
ioloop.add_callback(on_finish_cb)
break
ioloop.add_callback(
ws.write_message,
json.dumps(
{
"client_command": "keystrokes",
"keystrokes": [int(x) for x in content],
}
),
)
print("no exc", end="\r\n")
except asyncio.CancelledError:
print("stdin read task cancelled", end="\r\n")
except Exception as e: # pylint: disable=broad-except
print(f"Exception: {e}")
finally:
inp.close()
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, mode)
print("finally")
async def run_ssh(host, port, login=SSH_LOGIN, password=SSH_PASSWORD):
os.environ["SSHPASS"] = password
ssh_cmd = [
"ssh",
"-o",
"PreferredAuthentications=password",
"-o",
"PubkeyAuthentication=no",
"-o",
"StrictHostKeyChecking=no", # Skip fingerpint warning.
f"{login}@{host}",
"-p",
str(port),
]
sshpass_cmd = [shutil.which("sshpass"), "-e"] + ssh_cmd
args = sshpass_cmd
print(" ".join(args))
e = threading.Event()
def stdin_read(fd):
if not e.is_set():
e.set()
return SCREEN_TO_SCREEN_0_SEQ + os.read(fd, 1024)
b = os.read(fd, 1024)
return b
def master_read(fd):
b = os.read(fd, 1024)
return b
# Let Web UI connect to screen 0 first.
time.sleep(3)
res = pty.spawn(args, master_read=master_read, stdin_read=stdin_read)
print(f"ssh returned {res}")
class Client:
mode: str
def __init__(self, url, timeout):
self.url = url
self.timeout = timeout
self.ioloop = IOLoop.instance()
self.ws = None
self.send_stdin_task = None
async def connect(self):
print("trying to connect")
try:
self.ws = await websocket_connect(self.url)
except Exception as e: # pylint: disable=broad-except
print(f"connection error: {str(e)}")
else:
print("connected")
# await self.ws.write_message({'client': self.i})
self.mode = "idle"
self.ioloop.spawn_callback(self.run_idle)
self.ioloop.spawn_callback(self.run)
def finish_ws(self):
if self.ws:
self.ws.close()
self.ws = None
async def finish(self):
if self.send_stdin_task:
await self.stop_idle()
self.finish_ws()
self.ioloop.stop()
async def run_idle(self):
assert not self.send_stdin_task
print("running idle, spawning task")
self.send_stdin_task = asyncio.create_task(
send_stdin_to_ws_task(self.ws, self.finish)
)
async def stop_idle(self):
assert self.send_stdin_task
self.send_stdin_task.cancel()
await self.send_stdin_task
self.send_stdin_task = None
@staticmethod
async def run_ssh(host, port):
# Blocks ioloop
await run_ssh(host, port)
async def run(self):
while True:
msg = await self.ws.read_message()
if msg is None:
print("server left, terminating", end="\r\n")
self.ioloop.add_callback(self.finish)
return
msg = json.loads(msg)
print(f"got msg: {msg}", end="\r\n")
if "mode" not in msg:
continue
if msg["mode"] == "ssh":
host, port = msg["ssh"]["host"], msg["ssh"]["port"]
print(f"Connecting to ssh {host}:{port}...", end="\r\n")
await self.stop_idle()
await self.run_ssh(host, port)
print("restarting idle task")
self.finish_ws()
await self.connect()
break
| en | 0.471431 | # ^A 0 # content = await self.inp.read_bytes(100, partial=True) # await self.finish() # CTRL-C # pylint: disable=broad-except # Skip fingerpint warning. # Let Web UI connect to screen 0 first. # pylint: disable=broad-except # await self.ws.write_message({'client': self.i}) # Blocks ioloop | 2.327109 | 2 |
custom_components/ge_home/entities/common/ge_water_heater.py | olds/ha_gehome | 41 | 9563 | import abc
import logging
from typing import Any, Dict, List, Optional
from homeassistant.components.water_heater import WaterHeaterEntity
from homeassistant.const import (
TEMP_FAHRENHEIT,
TEMP_CELSIUS
)
from gehomesdk import ErdCode, ErdMeasurementUnits
from ...const import DOMAIN
from .ge_erd_entity import GeEntity
_LOGGER = logging.getLogger(__name__)
class GeWaterHeater(GeEntity, WaterHeaterEntity, metaclass=abc.ABCMeta):
"""Mock temperature/operation mode supporting device as a water heater"""
@property
def heater_type(self) -> str:
raise NotImplementedError
@property
def operation_list(self) -> List[str]:
raise NotImplementedError
@property
def unique_id(self) -> str:
return f"{DOMAIN}_{self.serial_or_mac}_{self.heater_type}"
@property
def name(self) -> Optional[str]:
return f"{self.serial_or_mac} {self.heater_type.title()}"
@property
def temperature_unit(self):
measurement_system = self.appliance.get_erd_value(ErdCode.TEMPERATURE_UNIT)
if measurement_system == ErdMeasurementUnits.METRIC:
return TEMP_CELSIUS
return TEMP_FAHRENHEIT
@property
def supported_features(self):
raise NotImplementedError
| import abc
import logging
from typing import Any, Dict, List, Optional
from homeassistant.components.water_heater import WaterHeaterEntity
from homeassistant.const import (
TEMP_FAHRENHEIT,
TEMP_CELSIUS
)
from gehomesdk import ErdCode, ErdMeasurementUnits
from ...const import DOMAIN
from .ge_erd_entity import GeEntity
_LOGGER = logging.getLogger(__name__)
class GeWaterHeater(GeEntity, WaterHeaterEntity, metaclass=abc.ABCMeta):
"""Mock temperature/operation mode supporting device as a water heater"""
@property
def heater_type(self) -> str:
raise NotImplementedError
@property
def operation_list(self) -> List[str]:
raise NotImplementedError
@property
def unique_id(self) -> str:
return f"{DOMAIN}_{self.serial_or_mac}_{self.heater_type}"
@property
def name(self) -> Optional[str]:
return f"{self.serial_or_mac} {self.heater_type.title()}"
@property
def temperature_unit(self):
measurement_system = self.appliance.get_erd_value(ErdCode.TEMPERATURE_UNIT)
if measurement_system == ErdMeasurementUnits.METRIC:
return TEMP_CELSIUS
return TEMP_FAHRENHEIT
@property
def supported_features(self):
raise NotImplementedError
| en | 0.93109 | Mock temperature/operation mode supporting device as a water heater | 2.469595 | 2 |
scrapy/contracts/default.py | zyuchuan/scrapy | 0 | 9564 | import json
from scrapy.item import BaseItem
from scrapy.http import Request
from scrapy.exceptions import ContractFail
from scrapy.contracts import Contract
# contracts
class UrlContract(Contract):
""" Contract to set the url of the request (mandatory)
@url http://scrapy.org
"""
name = 'url'
def adjust_request_args(self, args):
args['url'] = self.args[0]
return args
class CallbackKeywordArgumentsContract(Contract):
""" Contract to set the keyword arguments for the request.
The value should be a JSON-encoded dictionary, e.g.:
@cb_kwargs {"arg1": "some value"}
"""
name = 'cb_kwargs'
def adjust_request_args(self, args):
args['cb_kwargs'] = json.loads(' '.join(self.args))
return args
class ReturnsContract(Contract):
""" Contract to check the output of a callback
general form:
@returns request(s)/item(s) [min=1 [max]]
e.g.:
@returns request
@returns request 2
@returns request 2 10
@returns request 0 10
"""
name = 'returns'
objects = {
'request': Request,
'requests': Request,
'item': (BaseItem, dict),
'items': (BaseItem, dict),
}
def __init__(self, *args, **kwargs):
super(ReturnsContract, self).__init__(*args, **kwargs)
assert len(self.args) in [1, 2, 3]
self.obj_name = self.args[0] or None
self.obj_type = self.objects[self.obj_name]
try:
self.min_bound = int(self.args[1])
except IndexError:
self.min_bound = 1
try:
self.max_bound = int(self.args[2])
except IndexError:
self.max_bound = float('inf')
def post_process(self, output):
occurrences = 0
for x in output:
if isinstance(x, self.obj_type):
occurrences += 1
assertion = (self.min_bound <= occurrences <= self.max_bound)
if not assertion:
if self.min_bound == self.max_bound:
expected = self.min_bound
else:
expected = '%s..%s' % (self.min_bound, self.max_bound)
raise ContractFail("Returned %s %s, expected %s" % \
(occurrences, self.obj_name, expected))
class ScrapesContract(Contract):
""" Contract to check presence of fields in scraped items
@scrapes page_name page_body
"""
name = 'scrapes'
def post_process(self, output):
for x in output:
if isinstance(x, (BaseItem, dict)):
missing = [arg for arg in self.args if arg not in x]
if missing:
raise ContractFail(
"Missing fields: %s" % ", ".join(missing))
| import json
from scrapy.item import BaseItem
from scrapy.http import Request
from scrapy.exceptions import ContractFail
from scrapy.contracts import Contract
# contracts
class UrlContract(Contract):
""" Contract to set the url of the request (mandatory)
@url http://scrapy.org
"""
name = 'url'
def adjust_request_args(self, args):
args['url'] = self.args[0]
return args
class CallbackKeywordArgumentsContract(Contract):
""" Contract to set the keyword arguments for the request.
The value should be a JSON-encoded dictionary, e.g.:
@cb_kwargs {"arg1": "some value"}
"""
name = 'cb_kwargs'
def adjust_request_args(self, args):
args['cb_kwargs'] = json.loads(' '.join(self.args))
return args
class ReturnsContract(Contract):
""" Contract to check the output of a callback
general form:
@returns request(s)/item(s) [min=1 [max]]
e.g.:
@returns request
@returns request 2
@returns request 2 10
@returns request 0 10
"""
name = 'returns'
objects = {
'request': Request,
'requests': Request,
'item': (BaseItem, dict),
'items': (BaseItem, dict),
}
def __init__(self, *args, **kwargs):
super(ReturnsContract, self).__init__(*args, **kwargs)
assert len(self.args) in [1, 2, 3]
self.obj_name = self.args[0] or None
self.obj_type = self.objects[self.obj_name]
try:
self.min_bound = int(self.args[1])
except IndexError:
self.min_bound = 1
try:
self.max_bound = int(self.args[2])
except IndexError:
self.max_bound = float('inf')
def post_process(self, output):
occurrences = 0
for x in output:
if isinstance(x, self.obj_type):
occurrences += 1
assertion = (self.min_bound <= occurrences <= self.max_bound)
if not assertion:
if self.min_bound == self.max_bound:
expected = self.min_bound
else:
expected = '%s..%s' % (self.min_bound, self.max_bound)
raise ContractFail("Returned %s %s, expected %s" % \
(occurrences, self.obj_name, expected))
class ScrapesContract(Contract):
""" Contract to check presence of fields in scraped items
@scrapes page_name page_body
"""
name = 'scrapes'
def post_process(self, output):
for x in output:
if isinstance(x, (BaseItem, dict)):
missing = [arg for arg in self.args if arg not in x]
if missing:
raise ContractFail(
"Missing fields: %s" % ", ".join(missing))
| en | 0.562235 | # contracts Contract to set the url of the request (mandatory) @url http://scrapy.org Contract to set the keyword arguments for the request. The value should be a JSON-encoded dictionary, e.g.: @cb_kwargs {"arg1": "some value"} Contract to check the output of a callback general form: @returns request(s)/item(s) [min=1 [max]] e.g.: @returns request @returns request 2 @returns request 2 10 @returns request 0 10 Contract to check presence of fields in scraped items @scrapes page_name page_body | 2.683488 | 3 |
networks/metrics.py | pabloduque0/cnn_deconv_viz | 0 | 9565 | <filename>networks/metrics.py
from keras import backend as K
import tensorflow as tf
import numpy as np
def custom_dice_coefficient(y_true, y_pred, recall_weight=0.3):
recall_weight = tf.Variable(recall_weight, dtype=tf.float32)
regular_dice = dice_coefficient(y_true, y_pred)
recall = lession_recall(y_true, y_pred)
recall = tf.cast(recall, dtype=tf.float32)
recall_addition = recall * regular_dice * recall_weight
return regular_dice + recall_addition
def lession_recall(y_true, y_pred):
conn_comp_true = tf.contrib.image.connected_components(tf.cast(tf.squeeze(y_true, axis=[-1]), tf.bool))
conn_comp_pred = conn_comp_true * tf.cast(tf.squeeze(y_pred, axis=[-1]), tf.int32)
n_conn_comp_true, _ = tf.unique(K.flatten(conn_comp_true))
n_conn_comp_pred, _ = tf.unique(K.flatten(conn_comp_pred))
n_conn_comp_true = tf.size(input=n_conn_comp_true) - 1
n_conn_comp_pred = tf.size(input=n_conn_comp_pred) - 1
recall = tf.cond(pred=tf.equal(n_conn_comp_pred, tf.Variable(0)),
true_fn=lambda: tf.Variable(1.0, dtype=tf.float64), false_fn=lambda: n_conn_comp_pred / n_conn_comp_true)
return recall
def thresholded_dice(y_true, y_pred):
y_true = tf.math.floor(y_true + 0.6)
return dice_coefficient(y_true, y_pred)
def thresholded_dice_loss(y_true, y_pred):
return -thresholded_dice(y_true, y_pred)
def custom_dice_coefficient_loss(y_true, y_pred):
return -custom_dice_coefficient(y_true, y_pred)
def dice_coefficient(y_true, y_pred, smooth=0.1):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_pred_f * y_true_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coefficient_loss(y_true, y_pred):
return -dice_coefficient(y_true, y_pred)
def sigmoid(x):
return 1. / (1. + K.exp(-x))
def segmentation_recall(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
recall = K.sum(y_pred_f * y_true_f) / tf.cast(K.sum(y_true_f), tf.float32)
return recall
def weighted_crossentropy_pixelwise(y_true, y_pred):
y_pred = tf.clip_by_value(y_pred, 1e-7, 1 - 1e-7)
y_pred = K.log(y_pred / (1 - y_pred))
wmh_indexes = np.where(y_true == 1.0)
weights = np.repeat(1.0, 240 * 240)
weights = np.reshape(weights, (1, 240, 240, 1))
weights[wmh_indexes] = 5000.0
crossentropy = (y_true * weights * -K.log(sigmoid(y_pred)) + (1 - y_true * weights) * -K.log(1 - sigmoid(y_pred)))
return crossentropy
def prediction_count(y_true, y_pred):
return tf.math.count_nonzero(y_pred)
def label_count(y_true, y_pred):
return tf.math.count_nonzero(y_true)
def prediction_sum(y_true, y_pred):
return tf.reduce_sum(input_tensor=y_pred)
def label_sum(y_true, y_pred):
return tf.reduce_sum(input_tensor=y_true)
custom_dice_coef = custom_dice_coefficient
custom_dice_loss = custom_dice_coefficient_loss
dice_coef = dice_coefficient
dice_coef_loss = dice_coefficient_loss
weighted_crossentropy = weighted_crossentropy_pixelwise
predicted_count = prediction_count
predicted_sum = prediction_sum
ground_truth_count = label_count
ground_truth_sum = label_sum
pixel_recall = segmentation_recall
obj_recall = lession_recall | <filename>networks/metrics.py
from keras import backend as K
import tensorflow as tf
import numpy as np
def custom_dice_coefficient(y_true, y_pred, recall_weight=0.3):
recall_weight = tf.Variable(recall_weight, dtype=tf.float32)
regular_dice = dice_coefficient(y_true, y_pred)
recall = lession_recall(y_true, y_pred)
recall = tf.cast(recall, dtype=tf.float32)
recall_addition = recall * regular_dice * recall_weight
return regular_dice + recall_addition
def lession_recall(y_true, y_pred):
conn_comp_true = tf.contrib.image.connected_components(tf.cast(tf.squeeze(y_true, axis=[-1]), tf.bool))
conn_comp_pred = conn_comp_true * tf.cast(tf.squeeze(y_pred, axis=[-1]), tf.int32)
n_conn_comp_true, _ = tf.unique(K.flatten(conn_comp_true))
n_conn_comp_pred, _ = tf.unique(K.flatten(conn_comp_pred))
n_conn_comp_true = tf.size(input=n_conn_comp_true) - 1
n_conn_comp_pred = tf.size(input=n_conn_comp_pred) - 1
recall = tf.cond(pred=tf.equal(n_conn_comp_pred, tf.Variable(0)),
true_fn=lambda: tf.Variable(1.0, dtype=tf.float64), false_fn=lambda: n_conn_comp_pred / n_conn_comp_true)
return recall
def thresholded_dice(y_true, y_pred):
y_true = tf.math.floor(y_true + 0.6)
return dice_coefficient(y_true, y_pred)
def thresholded_dice_loss(y_true, y_pred):
return -thresholded_dice(y_true, y_pred)
def custom_dice_coefficient_loss(y_true, y_pred):
return -custom_dice_coefficient(y_true, y_pred)
def dice_coefficient(y_true, y_pred, smooth=0.1):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_pred_f * y_true_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coefficient_loss(y_true, y_pred):
return -dice_coefficient(y_true, y_pred)
def sigmoid(x):
return 1. / (1. + K.exp(-x))
def segmentation_recall(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
recall = K.sum(y_pred_f * y_true_f) / tf.cast(K.sum(y_true_f), tf.float32)
return recall
def weighted_crossentropy_pixelwise(y_true, y_pred):
y_pred = tf.clip_by_value(y_pred, 1e-7, 1 - 1e-7)
y_pred = K.log(y_pred / (1 - y_pred))
wmh_indexes = np.where(y_true == 1.0)
weights = np.repeat(1.0, 240 * 240)
weights = np.reshape(weights, (1, 240, 240, 1))
weights[wmh_indexes] = 5000.0
crossentropy = (y_true * weights * -K.log(sigmoid(y_pred)) + (1 - y_true * weights) * -K.log(1 - sigmoid(y_pred)))
return crossentropy
def prediction_count(y_true, y_pred):
return tf.math.count_nonzero(y_pred)
def label_count(y_true, y_pred):
return tf.math.count_nonzero(y_true)
def prediction_sum(y_true, y_pred):
return tf.reduce_sum(input_tensor=y_pred)
def label_sum(y_true, y_pred):
return tf.reduce_sum(input_tensor=y_true)
custom_dice_coef = custom_dice_coefficient
custom_dice_loss = custom_dice_coefficient_loss
dice_coef = dice_coefficient
dice_coef_loss = dice_coefficient_loss
weighted_crossentropy = weighted_crossentropy_pixelwise
predicted_count = prediction_count
predicted_sum = prediction_sum
ground_truth_count = label_count
ground_truth_sum = label_sum
pixel_recall = segmentation_recall
obj_recall = lession_recall | none | 1 | 2.293882 | 2 |
|
scrapy_template/scrapy_template/pipelines.py | kk0501/spider | 0 | 9566 | <gh_stars>0
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.exceptions import DropItem
from hashlib import md5
from scrapy import log
from twisted.enterprise import adbapi
from scrapy_template.items import ScrapyTemplateItem
class ScrapyTemplatePipeline(object):
def __init__(self, dbpool):
self.urls_seen = set()
self.dbpool = dbpool
@classmethod
def from_settings(cls, settings):
dbargs = dict(
host=settings['MYSQL_HOST'],
db=settings['MYSQL_DBNAME'],
user=settings['MYSQL_USER'],
passwd=settings['<PASSWORD>'],
charset='utf8',
use_unicode=True,
)
dbpool = adbapi.ConnectionPool('MySQLdb', **dbargs)
return cls(dbpool)
def process_item(self, item, spider):
if isinstance(item, ScrapyTemplateItem):
if item['url'] in self.urls_seen:
raise DropItem("Duplicate item found: %s" % item['url'])
else:
self.urls_seen.add(item['url'])
d = self.dbpool.runInteraction(self._do_upsert, item, spider)
d.addErrback(self._handle_error, item, spider)
d.addBoth(lambda _: item)
return d
else:
return item
def _do_upsert(self, conn, item, spider):
guid = self._get_id(item)
conn.execute("""SELECT EXISTS(
SELECT 1 FROM example WHERE guid = %s
)""", (guid, ))
ret = conn.fetchone()[0]
if not ret:
conn.execute("""
INSERT INTO example (category, name, color, images, price, url, guid)
VALUES (%s, %s, %s, %s, %s, %s, %s)
""", (item['category'], item['name'], item['color'],
item['images'], item['price'], item['url'], guid))
spider.log("Item stored in db: %s %r" % (guid, item))
def _handle_error(self, failure, item, spider):
log.err(failure)
def _get_id(self, item):
return md5(item['url']).hexdigest() | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.exceptions import DropItem
from hashlib import md5
from scrapy import log
from twisted.enterprise import adbapi
from scrapy_template.items import ScrapyTemplateItem
class ScrapyTemplatePipeline(object):
def __init__(self, dbpool):
self.urls_seen = set()
self.dbpool = dbpool
@classmethod
def from_settings(cls, settings):
dbargs = dict(
host=settings['MYSQL_HOST'],
db=settings['MYSQL_DBNAME'],
user=settings['MYSQL_USER'],
passwd=settings['<PASSWORD>'],
charset='utf8',
use_unicode=True,
)
dbpool = adbapi.ConnectionPool('MySQLdb', **dbargs)
return cls(dbpool)
def process_item(self, item, spider):
if isinstance(item, ScrapyTemplateItem):
if item['url'] in self.urls_seen:
raise DropItem("Duplicate item found: %s" % item['url'])
else:
self.urls_seen.add(item['url'])
d = self.dbpool.runInteraction(self._do_upsert, item, spider)
d.addErrback(self._handle_error, item, spider)
d.addBoth(lambda _: item)
return d
else:
return item
def _do_upsert(self, conn, item, spider):
guid = self._get_id(item)
conn.execute("""SELECT EXISTS(
SELECT 1 FROM example WHERE guid = %s
)""", (guid, ))
ret = conn.fetchone()[0]
if not ret:
conn.execute("""
INSERT INTO example (category, name, color, images, price, url, guid)
VALUES (%s, %s, %s, %s, %s, %s, %s)
""", (item['category'], item['name'], item['color'],
item['images'], item['price'], item['url'], guid))
spider.log("Item stored in db: %s %r" % (guid, item))
def _handle_error(self, failure, item, spider):
log.err(failure)
def _get_id(self, item):
return md5(item['url']).hexdigest() | en | 0.511113 | # -*- coding: utf-8 -*- # Define your item pipelines here # # Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html SELECT EXISTS( SELECT 1 FROM example WHERE guid = %s ) INSERT INTO example (category, name, color, images, price, url, guid) VALUES (%s, %s, %s, %s, %s, %s, %s) | 2.158814 | 2 |
run_training_size_bootstrap.py | willferreira/multilabel-stance-detection | 0 | 9567 | import click
import pickle
import numpy as np
from collections import defaultdict
from utils import reset_seeds, get_dataset, load_embeddings
from mlp_multilabel_wrapper import PowersetKerasWrapper, MultiOutputKerasWrapper
from mlp_utils import CrossLabelDependencyLoss
def get_random_sample(dataset_name='bbc', train_frac=0.25):
# get model runner specific dataset
_, _, y_train, y_test = get_dataset(dataset_name)
X_train, X_test = load_embeddings(dataset_name)
grps = y_train.apply(lambda v: ''.join(map(str, v)), axis=1).to_frame(0).groupby(0)[0]
train_idx = grps.apply(lambda g: g.sample(frac=train_frac)).index.get_level_values(1)
X_train_sample = X_train.loc[train_idx, :]
y_train_sample = y_train.loc[train_idx, :]
return X_train_sample, X_test, y_train_sample, y_test
def _get_label_set(y):
return set(y.apply(lambda v: ''.join(map(str, v)), axis=1).values)
@click.command()
@click.option('--n-samples', default=10)
@click.option('--dataset-name', default='moral-dataset-MeToo')
def run(n_samples, dataset_name):
mlp_cld_bootstrap_results = defaultdict(lambda: defaultdict(list))
mlp_powerset_bootstrap_results = defaultdict(lambda: defaultdict(list))
mlp_labels_bootstrap_results = defaultdict(lambda: defaultdict(list))
reset_seeds()
for i in range(n_samples):
print('Running bootstrap sample: {}'.format(i + 1))
for f in np.arange(0.1, 1.1, 0.1):
X_train, X_test, y_train, y_test = get_random_sample(dataset_name, train_frac=f)
print('Training set size: {}'.format(X_train.shape))
print('Test set size: {}'.format(X_test.shape))
mlp_powerset_model = PowersetKerasWrapper(columns=y_train.columns)
mlp_powerset_model.fit(X_train.values, y_train.values)
y_pred_mlp = mlp_powerset_model.predict(X_test.values)
mlp_powerset_bootstrap_results[i][f].append(y_pred_mlp)
cld_loss = CrossLabelDependencyLoss(alpha=0.2)
mlp_cld_model = MultiOutputKerasWrapper(columns=y_train.columns, loss=cld_loss)
mlp_cld_model.fit(X_train.values, y_train.values)
y_pred_cld = mlp_cld_model.predict(X_test.values)
mlp_cld_bootstrap_results[i][f].append(y_pred_cld)
mlp_labels_bootstrap_results[i][f].append((_get_label_set(y_train), _get_label_set(y_test)))
with open('training_size_bootstrap_{}.pkl'.format(dataset_name), 'wb') as f:
pickle.dump({'cld': dict(mlp_cld_bootstrap_results),
'powerset': dict(mlp_powerset_bootstrap_results),
'labels': dict(mlp_labels_bootstrap_results)}, f)
if __name__ == '__main__':
run()
| import click
import pickle
import numpy as np
from collections import defaultdict
from utils import reset_seeds, get_dataset, load_embeddings
from mlp_multilabel_wrapper import PowersetKerasWrapper, MultiOutputKerasWrapper
from mlp_utils import CrossLabelDependencyLoss
def get_random_sample(dataset_name='bbc', train_frac=0.25):
# get model runner specific dataset
_, _, y_train, y_test = get_dataset(dataset_name)
X_train, X_test = load_embeddings(dataset_name)
grps = y_train.apply(lambda v: ''.join(map(str, v)), axis=1).to_frame(0).groupby(0)[0]
train_idx = grps.apply(lambda g: g.sample(frac=train_frac)).index.get_level_values(1)
X_train_sample = X_train.loc[train_idx, :]
y_train_sample = y_train.loc[train_idx, :]
return X_train_sample, X_test, y_train_sample, y_test
def _get_label_set(y):
return set(y.apply(lambda v: ''.join(map(str, v)), axis=1).values)
@click.command()
@click.option('--n-samples', default=10)
@click.option('--dataset-name', default='moral-dataset-MeToo')
def run(n_samples, dataset_name):
mlp_cld_bootstrap_results = defaultdict(lambda: defaultdict(list))
mlp_powerset_bootstrap_results = defaultdict(lambda: defaultdict(list))
mlp_labels_bootstrap_results = defaultdict(lambda: defaultdict(list))
reset_seeds()
for i in range(n_samples):
print('Running bootstrap sample: {}'.format(i + 1))
for f in np.arange(0.1, 1.1, 0.1):
X_train, X_test, y_train, y_test = get_random_sample(dataset_name, train_frac=f)
print('Training set size: {}'.format(X_train.shape))
print('Test set size: {}'.format(X_test.shape))
mlp_powerset_model = PowersetKerasWrapper(columns=y_train.columns)
mlp_powerset_model.fit(X_train.values, y_train.values)
y_pred_mlp = mlp_powerset_model.predict(X_test.values)
mlp_powerset_bootstrap_results[i][f].append(y_pred_mlp)
cld_loss = CrossLabelDependencyLoss(alpha=0.2)
mlp_cld_model = MultiOutputKerasWrapper(columns=y_train.columns, loss=cld_loss)
mlp_cld_model.fit(X_train.values, y_train.values)
y_pred_cld = mlp_cld_model.predict(X_test.values)
mlp_cld_bootstrap_results[i][f].append(y_pred_cld)
mlp_labels_bootstrap_results[i][f].append((_get_label_set(y_train), _get_label_set(y_test)))
with open('training_size_bootstrap_{}.pkl'.format(dataset_name), 'wb') as f:
pickle.dump({'cld': dict(mlp_cld_bootstrap_results),
'powerset': dict(mlp_powerset_bootstrap_results),
'labels': dict(mlp_labels_bootstrap_results)}, f)
if __name__ == '__main__':
run()
| en | 0.848842 | # get model runner specific dataset | 2.271262 | 2 |
code/evaluate.py | Shuailong/CCGSupertagging | 3 | 9568 | #!/usr/bin/env python
# encoding: utf-8
"""
evaluate.py
Created by Shuailong on 2016-12-2.
Evaluate model accuracy on test set.
"""
from __future__ import print_function
from time import time
from keras.models import load_model
import os
from utils import true_accuracy
from dataset import get_data
from train import MODEL_FILE, MODEL_DIR
from train import data_generator
def main():
start_time = time()
print('\nGetting data...')
data = get_data(force=False)
X_test = data['X_test']
X_test_feats = data['X_test_feats']
y_test = data['y_test']
tag_size = len(data['tag_index'])
print('\nLoading models...')
model = load_model(os.path.join(MODEL_DIR, MODEL_FILE), custom_objects={'true_accuracy': true_accuracy})
print('\nEvaluating...')
_, true_acc = model.evaluate_generator(data_generator(X_test, X_test_feats, y_test, tag_size),
val_samples=len(X_test))
print('Test accuracy: {}.'.format(true_acc))
seconds = time() - start_time
minutes = seconds / 60
print('[Finished in {} seconds ({} minutes)]'.format(str(round(seconds, 1)),
str(round(minutes, 1))))
if __name__ == '__main__':
main()
| #!/usr/bin/env python
# encoding: utf-8
"""
evaluate.py
Created by Shuailong on 2016-12-2.
Evaluate model accuracy on test set.
"""
from __future__ import print_function
from time import time
from keras.models import load_model
import os
from utils import true_accuracy
from dataset import get_data
from train import MODEL_FILE, MODEL_DIR
from train import data_generator
def main():
start_time = time()
print('\nGetting data...')
data = get_data(force=False)
X_test = data['X_test']
X_test_feats = data['X_test_feats']
y_test = data['y_test']
tag_size = len(data['tag_index'])
print('\nLoading models...')
model = load_model(os.path.join(MODEL_DIR, MODEL_FILE), custom_objects={'true_accuracy': true_accuracy})
print('\nEvaluating...')
_, true_acc = model.evaluate_generator(data_generator(X_test, X_test_feats, y_test, tag_size),
val_samples=len(X_test))
print('Test accuracy: {}.'.format(true_acc))
seconds = time() - start_time
minutes = seconds / 60
print('[Finished in {} seconds ({} minutes)]'.format(str(round(seconds, 1)),
str(round(minutes, 1))))
if __name__ == '__main__':
main()
| en | 0.681129 | #!/usr/bin/env python # encoding: utf-8 evaluate.py Created by Shuailong on 2016-12-2. Evaluate model accuracy on test set. | 2.576705 | 3 |
setup.py | Cloudlock/bravado | 0 | 9569 | #!/usr/bin/env python
# Copyright (c) 2013, Digium, Inc.
# Copyright (c) 2014-2016, Yelp, Inc.
import os
from setuptools import setup
import bravado
setup(
name="bravado",
# cloudlock version, no twisted dependency
version=bravado.version + "cl",
license="BSD 3-Clause License",
description="Library for accessing Swagger-enabled API's",
long_description=open(os.path.join(os.path.dirname(__file__),
"README.rst")).read(),
author="Digium, Inc. and Yelp, Inc.",
author_email="<EMAIL>",
url="https://github.com/Yelp/bravado",
packages=["bravado"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries :: Python Modules",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
],
install_requires=[
"bravado-core >= 4.2.2",
"yelp_bytes",
"python-dateutil",
"pyyaml",
"requests",
"six",
],
extras_require={
},
)
| #!/usr/bin/env python
# Copyright (c) 2013, Digium, Inc.
# Copyright (c) 2014-2016, Yelp, Inc.
import os
from setuptools import setup
import bravado
setup(
name="bravado",
# cloudlock version, no twisted dependency
version=bravado.version + "cl",
license="BSD 3-Clause License",
description="Library for accessing Swagger-enabled API's",
long_description=open(os.path.join(os.path.dirname(__file__),
"README.rst")).read(),
author="Digium, Inc. and Yelp, Inc.",
author_email="<EMAIL>",
url="https://github.com/Yelp/bravado",
packages=["bravado"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries :: Python Modules",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
],
install_requires=[
"bravado-core >= 4.2.2",
"yelp_bytes",
"python-dateutil",
"pyyaml",
"requests",
"six",
],
extras_require={
},
)
| en | 0.64223 | #!/usr/bin/env python # Copyright (c) 2013, Digium, Inc. # Copyright (c) 2014-2016, Yelp, Inc. # cloudlock version, no twisted dependency | 1.248593 | 1 |
solum/api/controllers/v1/assembly.py | devdattakulkarni/test-solum | 0 | 9570 | <gh_stars>0
# Copyright 2013 - Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
from pecan import rest
import wsme
import wsmeext.pecan as wsme_pecan
from solum.api.controllers.v1.datamodel import assembly
import solum.api.controllers.v1.userlog as userlog_controller
from solum.api.handlers import assembly_handler
from solum.common import exception
from solum.common import request
from solum import objects
from solum.openstack.common.gettextutils import _
class AssemblyController(rest.RestController):
"""Manages operations on a single assembly."""
def __init__(self, assembly_id):
super(AssemblyController, self).__init__()
self._id = assembly_id
@pecan.expose()
def _lookup(self, primary_key, *remainder):
if remainder and not remainder[-1]:
remainder = remainder[:-1]
if primary_key == 'logs':
logs = userlog_controller.UserlogsController(self._id)
return logs, remainder
@exception.wrap_wsme_pecan_controller_exception
@wsme_pecan.wsexpose(assembly.Assembly)
def get(self):
"""Return this assembly."""
request.check_request_for_https()
handler = assembly_handler.AssemblyHandler(
pecan.request.security_context)
return assembly.Assembly.from_db_model(handler.get(self._id),
pecan.request.host_url)
@exception.wrap_wsme_pecan_controller_exception
@wsme_pecan.wsexpose(assembly.Assembly, body=assembly.Assembly)
def put(self, data):
"""Modify this assembly."""
handler = assembly_handler.AssemblyHandler(
pecan.request.security_context)
res = handler.update(self._id,
data.as_dict(objects.registry.Assembly))
return assembly.Assembly.from_db_model(res, pecan.request.host_url)
@exception.wrap_wsme_pecan_controller_exception
@wsme_pecan.wsexpose(status_code=204)
def delete(self):
"""Delete this assembly."""
handler = assembly_handler.AssemblyHandler(
pecan.request.security_context)
return handler.delete(self._id)
class AssembliesController(rest.RestController):
"""Manages operations on the assemblies collection."""
@pecan.expose()
def _lookup(self, assembly_id, *remainder):
if remainder and not remainder[-1]:
remainder = remainder[:-1]
return AssemblyController(assembly_id), remainder
@exception.wrap_wsme_pecan_controller_exception
@wsme_pecan.wsexpose(assembly.Assembly, body=assembly.Assembly,
status_code=201)
def post(self, data):
"""Create a new assembly."""
js_data = data.as_dict(objects.registry.Assembly)
if data.plan_uri is not wsme.Unset:
plan_uri = data.plan_uri
if plan_uri.startswith(pecan.request.host_url):
pl_uuid = plan_uri.split('/')[-1]
pl = objects.registry.Plan.get_by_uuid(
pecan.request.security_context, pl_uuid)
js_data['plan_id'] = pl.id
else:
# TODO(asalkeld) we are not hosting the plan so
# download the plan and insert it into our db.
raise exception.BadRequest(reason=_(
'The plan was not hosted in solum'))
if js_data.get('plan_id') is None:
raise exception.BadRequest(reason=_(
'The plan was not given or could not be found'))
handler = assembly_handler.AssemblyHandler(
pecan.request.security_context)
return assembly.Assembly.from_db_model(
handler.create(js_data), pecan.request.host_url)
@exception.wrap_wsme_pecan_controller_exception
@wsme_pecan.wsexpose([assembly.Assembly])
def get_all(self):
"""Return all assemblies, based on the query provided."""
request.check_request_for_https()
handler = assembly_handler.AssemblyHandler(
pecan.request.security_context)
return [assembly.Assembly.from_db_model(assm, pecan.request.host_url)
for assm in handler.get_all()]
| # Copyright 2013 - Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
from pecan import rest
import wsme
import wsmeext.pecan as wsme_pecan
from solum.api.controllers.v1.datamodel import assembly
import solum.api.controllers.v1.userlog as userlog_controller
from solum.api.handlers import assembly_handler
from solum.common import exception
from solum.common import request
from solum import objects
from solum.openstack.common.gettextutils import _
class AssemblyController(rest.RestController):
"""Manages operations on a single assembly."""
def __init__(self, assembly_id):
super(AssemblyController, self).__init__()
self._id = assembly_id
@pecan.expose()
def _lookup(self, primary_key, *remainder):
if remainder and not remainder[-1]:
remainder = remainder[:-1]
if primary_key == 'logs':
logs = userlog_controller.UserlogsController(self._id)
return logs, remainder
@exception.wrap_wsme_pecan_controller_exception
@wsme_pecan.wsexpose(assembly.Assembly)
def get(self):
"""Return this assembly."""
request.check_request_for_https()
handler = assembly_handler.AssemblyHandler(
pecan.request.security_context)
return assembly.Assembly.from_db_model(handler.get(self._id),
pecan.request.host_url)
@exception.wrap_wsme_pecan_controller_exception
@wsme_pecan.wsexpose(assembly.Assembly, body=assembly.Assembly)
def put(self, data):
"""Modify this assembly."""
handler = assembly_handler.AssemblyHandler(
pecan.request.security_context)
res = handler.update(self._id,
data.as_dict(objects.registry.Assembly))
return assembly.Assembly.from_db_model(res, pecan.request.host_url)
@exception.wrap_wsme_pecan_controller_exception
@wsme_pecan.wsexpose(status_code=204)
def delete(self):
"""Delete this assembly."""
handler = assembly_handler.AssemblyHandler(
pecan.request.security_context)
return handler.delete(self._id)
class AssembliesController(rest.RestController):
"""Manages operations on the assemblies collection."""
@pecan.expose()
def _lookup(self, assembly_id, *remainder):
if remainder and not remainder[-1]:
remainder = remainder[:-1]
return AssemblyController(assembly_id), remainder
@exception.wrap_wsme_pecan_controller_exception
@wsme_pecan.wsexpose(assembly.Assembly, body=assembly.Assembly,
status_code=201)
def post(self, data):
"""Create a new assembly."""
js_data = data.as_dict(objects.registry.Assembly)
if data.plan_uri is not wsme.Unset:
plan_uri = data.plan_uri
if plan_uri.startswith(pecan.request.host_url):
pl_uuid = plan_uri.split('/')[-1]
pl = objects.registry.Plan.get_by_uuid(
pecan.request.security_context, pl_uuid)
js_data['plan_id'] = pl.id
else:
# TODO(asalkeld) we are not hosting the plan so
# download the plan and insert it into our db.
raise exception.BadRequest(reason=_(
'The plan was not hosted in solum'))
if js_data.get('plan_id') is None:
raise exception.BadRequest(reason=_(
'The plan was not given or could not be found'))
handler = assembly_handler.AssemblyHandler(
pecan.request.security_context)
return assembly.Assembly.from_db_model(
handler.create(js_data), pecan.request.host_url)
@exception.wrap_wsme_pecan_controller_exception
@wsme_pecan.wsexpose([assembly.Assembly])
def get_all(self):
"""Return all assemblies, based on the query provided."""
request.check_request_for_https()
handler = assembly_handler.AssemblyHandler(
pecan.request.security_context)
return [assembly.Assembly.from_db_model(assm, pecan.request.host_url)
for assm in handler.get_all()] | en | 0.879244 | # Copyright 2013 - Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. Manages operations on a single assembly. Return this assembly. Modify this assembly. Delete this assembly. Manages operations on the assemblies collection. Create a new assembly. # TODO(asalkeld) we are not hosting the plan so # download the plan and insert it into our db. Return all assemblies, based on the query provided. | 1.932549 | 2 |
server/website/website/migrations/0003_background_task_optimization.py | mjain2/ottertune | 1 | 9571 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2018-08-02 07:58
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('website', '0002_enable_compression'),
]
operations = [
migrations.AddField(
model_name='workload',
name='status',
field=models.IntegerField(choices=[(1, 'MODIFIED'), (2, 'PROCESSING'), (3, 'PROCESSED')], default=1, editable=False),
)
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2018-08-02 07:58
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('website', '0002_enable_compression'),
]
operations = [
migrations.AddField(
model_name='workload',
name='status',
field=models.IntegerField(choices=[(1, 'MODIFIED'), (2, 'PROCESSING'), (3, 'PROCESSED')], default=1, editable=False),
)
]
| en | 0.734154 | # -*- coding: utf-8 -*- # Generated by Django 1.10.1 on 2018-08-02 07:58 | 1.528942 | 2 |
src/agility/usc/settings.py | bobbyluig/6.A01 | 0 | 9572 | <gh_stars>0
from agility.usc.enumeration import uscSerialMode, ChannelMode, HomeMode
from agility.usc.reader import BytecodeReader
class UscSettings:
def __init__(self):
self.servosAvailable = 6
self.servoPeriod = 156
self.miniMaestroServoPeriod = 80000
self.servoMultiplier = 1
self.serialMode = uscSerialMode.SERIAL_MODE_UART_DETECT_BAUD_RATE
self.fixedBaudRate = 9600
self.enableCrc = False
self.neverSuspend = False
self.serialDeviceNumber = 12
self.miniSscOffset = 0
self.serialTimeout = 0
self.scriptDone = True
self.channelSettings = []
self.enablePullups = True
self.scriptInconsistent = False
self.script = None
self.bytecodeProgram = None
def __len__(self):
return len(self.channelSettings)
def setAndCompileScript(self, script):
self.script = None
reader = BytecodeReader()
self.bytecodeProgram = reader.read(script, len(self) != 6)
self.script = script
class ChannelSetting:
def __init__(self):
self.name = ''
self.mode = ChannelMode.Servo
self.homeMode = HomeMode.Off
self.home = 6000
self.minimum = 3968
self.maximum = 8000
self.neutral = 6000
self.range = 1905
self.speed = 0
self.acceleration = 0
| from agility.usc.enumeration import uscSerialMode, ChannelMode, HomeMode
from agility.usc.reader import BytecodeReader
class UscSettings:
def __init__(self):
self.servosAvailable = 6
self.servoPeriod = 156
self.miniMaestroServoPeriod = 80000
self.servoMultiplier = 1
self.serialMode = uscSerialMode.SERIAL_MODE_UART_DETECT_BAUD_RATE
self.fixedBaudRate = 9600
self.enableCrc = False
self.neverSuspend = False
self.serialDeviceNumber = 12
self.miniSscOffset = 0
self.serialTimeout = 0
self.scriptDone = True
self.channelSettings = []
self.enablePullups = True
self.scriptInconsistent = False
self.script = None
self.bytecodeProgram = None
def __len__(self):
return len(self.channelSettings)
def setAndCompileScript(self, script):
self.script = None
reader = BytecodeReader()
self.bytecodeProgram = reader.read(script, len(self) != 6)
self.script = script
class ChannelSetting:
def __init__(self):
self.name = ''
self.mode = ChannelMode.Servo
self.homeMode = HomeMode.Off
self.home = 6000
self.minimum = 3968
self.maximum = 8000
self.neutral = 6000
self.range = 1905
self.speed = 0
self.acceleration = 0 | none | 1 | 2.580103 | 3 |
|
invmonInfra/domain/__init__.py | jtom38/invmon-api | 0 | 9573 | <filename>invmonInfra/domain/__init__.py
from .dbApiInterface import DbApiTableInterface
from .dbApiTableInterface import DbApiTableInterface
from .cacheInterface import CacheInterface
from .loggerInterface import LoggerInterface
from .envReaderInterface import EnvReaderInterface
from .driverInterface import DriverInterface
from .jobsInterface import JobsInterface
from .jobsInventoryInterface import JobsInventoryInterface
from .emailInterface import EmailInterface
from .sqlTableInterface import SqlTableInterface | <filename>invmonInfra/domain/__init__.py
from .dbApiInterface import DbApiTableInterface
from .dbApiTableInterface import DbApiTableInterface
from .cacheInterface import CacheInterface
from .loggerInterface import LoggerInterface
from .envReaderInterface import EnvReaderInterface
from .driverInterface import DriverInterface
from .jobsInterface import JobsInterface
from .jobsInventoryInterface import JobsInventoryInterface
from .emailInterface import EmailInterface
from .sqlTableInterface import SqlTableInterface | none | 1 | 1.117396 | 1 |
|
app/app8_18mix/h_noSeqSearch.py | ameenetemady/DeepPep | 1 | 9574 | <filename>app/app8_18mix/h_noSeqSearch.py<gh_stars>1-10
import sys
import csv
import os
sys.path.append('../../')
import h_lib
import h_lib_noSeqSearch
in_strFastaFilename = '{!s}/data/protein/18mix/18mix_db_plus_contaminants_20081209.fasta'.format(os.environ.get('HOME'))
in_strPeptideFilename = '{!s}/data/protein/18mix/18_mixtures_peptide_identification.txt'.format(os.environ.get('HOME'))
out_strOutputBaseDir = './sparseData_h'
out_strFile = out_strOutputBaseDir + "/h_noSeqSearch.csv"
YInfo = h_lib.getPeptides(in_strPeptideFilename, "\t", 0, 2)
###assuming proteins are already broken to individual files under in_strProtRefsDir
#XMatchProb = h_lib.getYInfo(YInfo, in_strProtRefsDir, strXMatchProb_filename, True)
XMatchProb = h_lib_noSeqSearch.getXInfo(YInfo, in_strPeptideFilename, "\t", 0, 1)
YMatchProbCount = h_lib.getPeptideProteinMatches(YInfo, XMatchProb)
h_lib.updateXMatchingProbabilities(XMatchProb, YMatchProbCount)
XPred = h_lib.getAccumulatedXMatchingProbabilities(XMatchProb)
XPred.sort()
with open(out_strFile, "w") as bfFile:
for row in XPred:
bfFile.write('{!s},{:.6f}\n'.format(row[0], row[1]))
print("result saved in:" + out_strFile)
| <filename>app/app8_18mix/h_noSeqSearch.py<gh_stars>1-10
import sys
import csv
import os
sys.path.append('../../')
import h_lib
import h_lib_noSeqSearch
in_strFastaFilename = '{!s}/data/protein/18mix/18mix_db_plus_contaminants_20081209.fasta'.format(os.environ.get('HOME'))
in_strPeptideFilename = '{!s}/data/protein/18mix/18_mixtures_peptide_identification.txt'.format(os.environ.get('HOME'))
out_strOutputBaseDir = './sparseData_h'
out_strFile = out_strOutputBaseDir + "/h_noSeqSearch.csv"
YInfo = h_lib.getPeptides(in_strPeptideFilename, "\t", 0, 2)
###assuming proteins are already broken to individual files under in_strProtRefsDir
#XMatchProb = h_lib.getYInfo(YInfo, in_strProtRefsDir, strXMatchProb_filename, True)
XMatchProb = h_lib_noSeqSearch.getXInfo(YInfo, in_strPeptideFilename, "\t", 0, 1)
YMatchProbCount = h_lib.getPeptideProteinMatches(YInfo, XMatchProb)
h_lib.updateXMatchingProbabilities(XMatchProb, YMatchProbCount)
XPred = h_lib.getAccumulatedXMatchingProbabilities(XMatchProb)
XPred.sort()
with open(out_strFile, "w") as bfFile:
for row in XPred:
bfFile.write('{!s},{:.6f}\n'.format(row[0], row[1]))
print("result saved in:" + out_strFile)
| en | 0.407541 | ###assuming proteins are already broken to individual files under in_strProtRefsDir #XMatchProb = h_lib.getYInfo(YInfo, in_strProtRefsDir, strXMatchProb_filename, True) | 2.521266 | 3 |
normalizer.py | ashokn414/python_floating_conversions | 0 | 9575 | # for normalization we need to have the maxima of x and y values with the help of which
# we can normalise the given values
import csv
filename = "values.csv"
fields = []
rows = []
with open(filename,'r') as csvfile:
reader = csv.reader(csvfile)
fields = next(reader)
for row in reader:
rows.append(row)
for row in rows:
for col in row:
a = col[0]
norm=50
#a = float(input("enter the x cordinate:"))
#b = float(input("enter the y cordinate:"))
if (a>norm or b>norm or a<-(norm) or b<-(norm)):
print("the value given is invalid/out of bound")
else:
a = a/norm
b = b/norm
print("the normalized values are "+str(a)+","+str(b)) | # for normalization we need to have the maxima of x and y values with the help of which
# we can normalise the given values
import csv
filename = "values.csv"
fields = []
rows = []
with open(filename,'r') as csvfile:
reader = csv.reader(csvfile)
fields = next(reader)
for row in reader:
rows.append(row)
for row in rows:
for col in row:
a = col[0]
norm=50
#a = float(input("enter the x cordinate:"))
#b = float(input("enter the y cordinate:"))
if (a>norm or b>norm or a<-(norm) or b<-(norm)):
print("the value given is invalid/out of bound")
else:
a = a/norm
b = b/norm
print("the normalized values are "+str(a)+","+str(b)) | en | 0.734884 | # for normalization we need to have the maxima of x and y values with the help of which # we can normalise the given values #a = float(input("enter the x cordinate:")) #b = float(input("enter the y cordinate:")) | 4.052783 | 4 |
pygdp/fwgs.py | jiwalker-usgs/pyGDP | 0 | 9576 | from pygdp import _execute_request
from pygdp import _get_geotype
from owslib.util import log
def submitFeatureWeightedGridStatistics(geoType, dataSetURI, varID, startTime, endTime, attribute, value, gmlIDs,
verbose, coverage, delim, stat, grpby, timeStep, summAttr, weighted, WFS_URL, outputfname, sleepSecs):
"""
Makes a featureWeightedGridStatistics algorithm call.
The web service interface implemented is summarized here:
https://my.usgs.gov/confluence/display/GeoDataPortal/Generating+Area+Weighted+Statistics+Of+A+Gridded+Dataset+For+A+Set+Of+Vector+Polygon+Features
Note that varID and stat can be a list of strings.
"""
# test for dods:
dataSetURI = _execute_request.dodsReplace(dataSetURI)
log.info('Generating feature collection.')
featureCollection = _get_geotype._getFeatureCollectionGeoType(geoType, attribute, value, gmlIDs, WFS_URL)
if featureCollection is None:
return
processid = 'gov.usgs.cida.gdp.wps.algorithm.FeatureWeightedGridStatisticsAlgorithm'
if weighted==False:
processid = 'gov.usgs.cida.gdp.wps.algorithm.FeatureGridStatisticsAlgorithm'
solo_inputs = [("FEATURE_ATTRIBUTE_NAME",attribute),
("DATASET_URI", dataSetURI),
("TIME_START",startTime),
("TIME_END",endTime),
("REQUIRE_FULL_COVERAGE",str(coverage).lower()),
("DELIMITER",delim),
("GROUP_BY", grpby),
("SUMMARIZE_TIMESTEP", str(timeStep).lower()),
("SUMMARIZE_FEATURE_ATTRIBUTE",str(summAttr).lower()),
("FEATURE_COLLECTION", featureCollection)]
if isinstance(stat, list):
num_stats=len(stat)
if num_stats > 7:
raise Exception('Too many statistics were submitted.')
else:
num_stats=1
if isinstance(varID, list):
num_varIDs=len(varID)
else:
num_varIDs=1
inputs = [('','')]*(len(solo_inputs)+num_varIDs+num_stats)
count=0
rmvCnt=0
for solo_input in solo_inputs:
if solo_input[1]!=None:
inputs[count] = solo_input
count+=1
else:
rmvCnt+=1
del inputs[count:count+rmvCnt]
if num_stats > 1:
for stat_in in stat:
if stat_in not in ["MEAN", "MINIMUM", "MAXIMUM", "VARIANCE", "STD_DEV", "SUM", "COUNT"]:
raise Exception('The statistic %s is not in the allowed list: "MEAN", "MINIMUM", "MAXIMUM", "VARIANCE", "STD_DEV", "SUM", "COUNT"' % stat_in)
inputs[count] = ("STATISTICS",stat_in)
count+=1
elif num_stats == 1:
if stat not in ["MEAN", "MINIMUM", "MAXIMUM", "VARIANCE", "STD_DEV", "SUM", "COUNT"]:
raise Exception('The statistic %s is not in the allowed list: "MEAN", "MINIMUM", "MAXIMUM", "VARIANCE", "STD_DEV", "SUM", "COUNT"' % stat)
inputs[count] = ("STATISTICS",stat)
count+=1
if num_varIDs > 1:
for var in varID:
inputs[count] = ("DATASET_ID",var)
count+=1
elif num_varIDs == 1:
inputs[count] = ("DATASET_ID",varID)
output = "OUTPUT"
return _execute_request._executeRequest(processid, inputs, output, verbose, outputfname, sleepSecs)
| from pygdp import _execute_request
from pygdp import _get_geotype
from owslib.util import log
def submitFeatureWeightedGridStatistics(geoType, dataSetURI, varID, startTime, endTime, attribute, value, gmlIDs,
verbose, coverage, delim, stat, grpby, timeStep, summAttr, weighted, WFS_URL, outputfname, sleepSecs):
"""
Makes a featureWeightedGridStatistics algorithm call.
The web service interface implemented is summarized here:
https://my.usgs.gov/confluence/display/GeoDataPortal/Generating+Area+Weighted+Statistics+Of+A+Gridded+Dataset+For+A+Set+Of+Vector+Polygon+Features
Note that varID and stat can be a list of strings.
"""
# test for dods:
dataSetURI = _execute_request.dodsReplace(dataSetURI)
log.info('Generating feature collection.')
featureCollection = _get_geotype._getFeatureCollectionGeoType(geoType, attribute, value, gmlIDs, WFS_URL)
if featureCollection is None:
return
processid = 'gov.usgs.cida.gdp.wps.algorithm.FeatureWeightedGridStatisticsAlgorithm'
if weighted==False:
processid = 'gov.usgs.cida.gdp.wps.algorithm.FeatureGridStatisticsAlgorithm'
solo_inputs = [("FEATURE_ATTRIBUTE_NAME",attribute),
("DATASET_URI", dataSetURI),
("TIME_START",startTime),
("TIME_END",endTime),
("REQUIRE_FULL_COVERAGE",str(coverage).lower()),
("DELIMITER",delim),
("GROUP_BY", grpby),
("SUMMARIZE_TIMESTEP", str(timeStep).lower()),
("SUMMARIZE_FEATURE_ATTRIBUTE",str(summAttr).lower()),
("FEATURE_COLLECTION", featureCollection)]
if isinstance(stat, list):
num_stats=len(stat)
if num_stats > 7:
raise Exception('Too many statistics were submitted.')
else:
num_stats=1
if isinstance(varID, list):
num_varIDs=len(varID)
else:
num_varIDs=1
inputs = [('','')]*(len(solo_inputs)+num_varIDs+num_stats)
count=0
rmvCnt=0
for solo_input in solo_inputs:
if solo_input[1]!=None:
inputs[count] = solo_input
count+=1
else:
rmvCnt+=1
del inputs[count:count+rmvCnt]
if num_stats > 1:
for stat_in in stat:
if stat_in not in ["MEAN", "MINIMUM", "MAXIMUM", "VARIANCE", "STD_DEV", "SUM", "COUNT"]:
raise Exception('The statistic %s is not in the allowed list: "MEAN", "MINIMUM", "MAXIMUM", "VARIANCE", "STD_DEV", "SUM", "COUNT"' % stat_in)
inputs[count] = ("STATISTICS",stat_in)
count+=1
elif num_stats == 1:
if stat not in ["MEAN", "MINIMUM", "MAXIMUM", "VARIANCE", "STD_DEV", "SUM", "COUNT"]:
raise Exception('The statistic %s is not in the allowed list: "MEAN", "MINIMUM", "MAXIMUM", "VARIANCE", "STD_DEV", "SUM", "COUNT"' % stat)
inputs[count] = ("STATISTICS",stat)
count+=1
if num_varIDs > 1:
for var in varID:
inputs[count] = ("DATASET_ID",var)
count+=1
elif num_varIDs == 1:
inputs[count] = ("DATASET_ID",varID)
output = "OUTPUT"
return _execute_request._executeRequest(processid, inputs, output, verbose, outputfname, sleepSecs)
| en | 0.643722 | Makes a featureWeightedGridStatistics algorithm call. The web service interface implemented is summarized here: https://my.usgs.gov/confluence/display/GeoDataPortal/Generating+Area+Weighted+Statistics+Of+A+Gridded+Dataset+For+A+Set+Of+Vector+Polygon+Features Note that varID and stat can be a list of strings. # test for dods: | 2.670602 | 3 |
tests/pure-req.py | rbanffy/bjoern | 2,326 | 9577 | import sys
import socket
conn = socket.create_connection(('0.0.0.0', 8080))
msgs = [
# 0 Keep-Alive, Transfer-Encoding chunked
'GET / HTTP/1.1\r\nConnection: Keep-Alive\r\n\r\n',
# 1,2,3 Close, EOF "encoding"
'GET / HTTP/1.1\r\n\r\n',
'GET / HTTP/1.1\r\nConnection: close\r\n\r\n',
'GET / HTTP/1.0\r\nConnection: Keep-Alive\r\n\r\n',
# 4 Bad Request
'GET /%20%20% HTTP/1.1\r\n\r\n',
# 5 Bug #14
'GET /%20abc HTTP/1.0\r\n\r\n',
# 6 Content-{Length, Type}
'GET / HTTP/1.0\r\nContent-Length: 11\r\n'
'Content-Type: text/blah\r\nContent-Fype: bla\r\n'
'Content-Tength: bla\r\n\r\nhello world',
# 7 POST memory leak
'POST / HTTP/1.0\r\nContent-Length: 1000\r\n\r\n%s' % ('a'*1000),
# 8,9 CVE-2015-0219
'GET / HTTP/1.1\r\nFoo_Bar: bad\r\n\r\n',
'GET / HTTP/1.1\r\nFoo-Bar: good\r\nFoo_Bar: bad\r\n\r\n'
]
conn.send(msgs[int(sys.argv[1])].encode())
while 1:
data = conn.recv(100)
if not data: break
print(repr(data))
if data.endswith(b'0\r\n\r\n'):
if raw_input('new request? Y/n') == 'n':
exit()
conn.send(b'GET / HTTP/1.1\r\nConnection: Keep-Alive\r\n\r\n')
| import sys
import socket
conn = socket.create_connection(('0.0.0.0', 8080))
msgs = [
# 0 Keep-Alive, Transfer-Encoding chunked
'GET / HTTP/1.1\r\nConnection: Keep-Alive\r\n\r\n',
# 1,2,3 Close, EOF "encoding"
'GET / HTTP/1.1\r\n\r\n',
'GET / HTTP/1.1\r\nConnection: close\r\n\r\n',
'GET / HTTP/1.0\r\nConnection: Keep-Alive\r\n\r\n',
# 4 Bad Request
'GET /%20%20% HTTP/1.1\r\n\r\n',
# 5 Bug #14
'GET /%20abc HTTP/1.0\r\n\r\n',
# 6 Content-{Length, Type}
'GET / HTTP/1.0\r\nContent-Length: 11\r\n'
'Content-Type: text/blah\r\nContent-Fype: bla\r\n'
'Content-Tength: bla\r\n\r\nhello world',
# 7 POST memory leak
'POST / HTTP/1.0\r\nContent-Length: 1000\r\n\r\n%s' % ('a'*1000),
# 8,9 CVE-2015-0219
'GET / HTTP/1.1\r\nFoo_Bar: bad\r\n\r\n',
'GET / HTTP/1.1\r\nFoo-Bar: good\r\nFoo_Bar: bad\r\n\r\n'
]
conn.send(msgs[int(sys.argv[1])].encode())
while 1:
data = conn.recv(100)
if not data: break
print(repr(data))
if data.endswith(b'0\r\n\r\n'):
if raw_input('new request? Y/n') == 'n':
exit()
conn.send(b'GET / HTTP/1.1\r\nConnection: Keep-Alive\r\n\r\n')
| en | 0.430121 | # 0 Keep-Alive, Transfer-Encoding chunked # 1,2,3 Close, EOF "encoding" # 4 Bad Request # 5 Bug #14 # 6 Content-{Length, Type} # 7 POST memory leak # 8,9 CVE-2015-0219 | 2.608001 | 3 |
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/stock/models/web_planner.py | gtfarng/Odoo_migrade | 1 | 9578 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from odoo import models
class PlannerInventory(models.Model):
_inherit = 'web.planner'
def _get_planner_application(self):
planner = super(PlannerInventory, self)._get_planner_application()
planner.append(['planner_inventory', 'Inventory Planner'])
return planner
| # -*- coding: utf-8 -*-
from odoo import models
class PlannerInventory(models.Model):
_inherit = 'web.planner'
def _get_planner_application(self):
planner = super(PlannerInventory, self)._get_planner_application()
planner.append(['planner_inventory', 'Inventory Planner'])
return planner | en | 0.769321 | # -*- coding: utf-8 -*- | 1.951933 | 2 |
wizbot.py | Wizard-Of-Chaos/WizardBot | 0 | 9579 | <gh_stars>0
#WIZARD BOT IS LIVE
import calendar
import discord as dc
from discord.ext.commands import Bot
from discord.ext import commands
from functools import partial
import asyncio as aio
import time
from random import randint
from datetime import datetime
from discord.ext import commands
from guildconfig import GuildConfig
from rolesaver import RoleSaver
#initializes bot, sets up command sign
bot = commands.Bot(command_prefix = '!')
bot.remove_command('help')
guild_config = GuildConfig(bot, 'config.pkl')
role_saver = RoleSaver(bot, 'roles.pkl')
#GAME STUFF
class Monster:
def __init__(self, speed, damage, health, dmg_type):
self.spd = speed
self.dmg = damage
self.hp = health
self.dmg_type = dmg_type
self.is_alive = True
#All integers.
#Last one is 1 or 0 - there are two damage types. Magical and physical.
#Physical is 0, Magical is 1.
#Attacks return a tuple containing a 1 or a 0 as the first number, then the damage as the second number.
#ACCESSORS
def health(self):
return self.hp
def speed(self):
return self.spd
def damage(self):
return self.dmg
def life(self):
return self.is_alive
#MUTATORS
def take_hit(self, damage):
self.hp = self.hp - damage
if self.hp <= 0:
self.is_alive = False
def make_attack(self):
attack = ""
attack += str(self.dmg_type)
attack += " "
attack += str(self.dmg)
return attack
class Player:
def __init__(self):
self.hp = 100 #Classic!
self.dmg = 10
self.shield = 0
self.s_dur = 0
self.is_alive = True
#Player has four shield conditions.
#0 - has no shield. 1 - Physical shield. 2 - Magical shield. 3 - Both.
#ACCESSORS
def damage(self):
return self.dmg
def life(self):
return self.is_alive
def shield_type(self):
return self.shield
def shield_dur(self):
return self.s_dur
def health(self):
return self.hp
#MUTATORS
def take_hit(self, damage):
self.hp = self.hp - damage
if self.hp <= 0:
self.is_alive = False
def shield_hit(self):
self.s_dur = self.s_dur - 1
if self.s_dur == 0:
self.shield = 0
#Kills your shield when the durability hits 0.
def heal(self, heal):
self.hp = self.hp + heal
def dangerify(self, damage):
self.dmg = self.dmg + damage
def get_shield(self, shield):
#This one's a bit tricky. The shield is 0 or 1 - Physical or magical.
#It then updates the player's shield accordingly.
if shield == 0:
if self.shield == 0:
self.shield = 1
self.s_dur = 10
if self.shield == 2:
self.shield = 3
self.s_dur = 5
elif shield == 1:
if self.shield == 0:
self.shield = 2
self.s_dur = 10
if self.shield == 1:
self.shield = 3
self.s_dur = 5
#Shield durabilty goes to 5, regardless of what it was before, on picking up a SECOND shield.
#Other four cases don't need to be covered.
#WIZBOT OLD STUFF ENDS HERE
#FUNCTIONS HERE
def get_token():
with open('token.dat', 'r') as tokenfile:
return ''.join(
chr(int(''.join(c), 16))
for c in zip(*[iter(tokenfile.read().strip())]*2)
)
def monthdelta(date, delta):
m, y = (date.month+delta) % 12, date.year + ((date.month)+delta-1) // 12
if not m: m = 12
d = min(date.day, calendar.monthrange(y, m)[1])
return date.replace(day=d, month=m, year=y)
async def get_last_seen(member, pendant=None):
lastseen = None
for channel in member.guild.text_channels:
lastmsg = await channel.history(limit=None, after=pendant).get(author__name=member.display_name)
if lastmsg and (lastseen is None or lastseen < lastmsg.created_at):
lastseen = lastmsg.created_at
return lastseen
#START OF EVENTS
@bot.event
async def on_ready():
print(f'{bot.user} has connected to Discord!')
@bot.event
async def on_message(message):
if message.content == "EAT THAT HORSE!":
await message.channel.send(":horse:")
await bot.process_commands(message)
@bot.event
async def on_message_edit(bfr, aft):
if bfr.author == bot.user:
return
if not hasattr(bfr.channel, 'guild'):
return
guild_id = bfr.channel.guild.id
if guild_id in guild_config.mod_channels:
embed = dc.Embed(color=dc.Color.gold(), timestamp=aft.created_at)
embed.set_author(
name=f'@{bfr.author} edited a message in #{bfr.channel}:',
icon_url=bfr.author.avatar_url,
)
embed.add_field(name='**Before:**', value=bfr.content, inline=False)
embed.add_field(name='**After:**', value=aft.content, inline=False)
embed.add_field(name='**MESSAGE ID:**', value=f'`{aft.id}`')
embed.add_field(name='**USER ID:**', value=f'`{bfr.author.id}`')
await bot.get_channel(guild_config.mod_channels[guild_id]['msglog']).send(
embed=embed
)
@bot.event
async def on_message_delete(msg):
if not hasattr(msg.channel, 'guild'):
return
guild_id = msg.channel.guild.id
if guild_id in guild_config.mod_channels:
embed = dc.Embed(
color=dc.Color.darker_grey(),
timestamp=msg.created_at,
description=msg.content,
)
embed.set_author(
name=f'@{msg.author} deleted a message in #{msg.channel}:',
icon_url=msg.author.avatar_url,
)
embed.add_field(name='**MESSAGE ID:**', value=f'`{msg.id}`')
embed.add_field(name='**USER ID:**', value=f'`{msg.author.id}`')
await bot.get_channel(guild_config.mod_channels[guild_id]['msglog']).send(
embed=embed
)
@bot.event
async def on_member_join(member):
guild = member.guild
if guild.id in guild_config.mod_channels:
await role_saver.load_roles(member)
embed = dc.Embed(
color=dc.Color.green(),
timestamp=datetime.utcnow(),
description=f':green_circle: **{member}** has joined **{guild}**!\n'
f'The guild now has {len(guild.members)} members!\n'
f'This account was created on `{member.created_at.strftime("%d/%m/%Y %H:%M:%S")}`'
)
embed.set_author(name=f'A user has joined the server!')
embed.set_thumbnail(url=member.avatar_url)
embed.add_field(name='**USER ID:**', value=f'`{member.id}`')
await bot.get_channel(guild_config.mod_channels[guild.id]['usrlog']).send(
embed=embed
)
@bot.event
async def on_member_remove(member):
guild = member.guild
if guild.id in guild_config.mod_channels:
role_saver.save_roles(member)
timestamp = datetime.utcnow()
lastseen = await get_last_seen(member, monthdelta(timestamp, -1)) # Moved grabbing last seen to a function
if lastseen is not None:
lastseenmsg = f'This user was last seen on `{lastseen.strftime("%d/%m/%Y %H:%M:%S")}`'
else:
lastseenmsg = 'This user has not spoken for at least 1 month!'
embed = dc.Embed(
color=dc.Color.red(),
timestamp=timestamp,
description=f':red_circle: **{member}** has left **{guild}**!\n'
f'The guild now has {len(guild.members)} members!\n{lastseenmsg}'
)
embed.set_author(name=f'A user left or got beaned!')
embed.set_thumbnail(url=member.avatar_url)
embed.add_field(
name='**ROLES SNAGGED:**',
value=(', '.join(
f'`{guild.get_role(role).name}`'
for role in role_saver.get_roles(member)
)
or None),
inline=False)
embed.add_field(name='**USER ID:**', value=f'`{member.id}`')
await bot.get_channel(guild_config.mod_channels[guild.id]['usrlog']).send(
embed=embed
)
@bot.event
async def on_member_update(bfr, aft): # Log role and nickname changes
guild = bfr.guild
if guild.id in guild_config.mod_channels:
changetype = None
if bfr.nick != aft.nick:
changetype = 'Nickname Update:'
changelog = f'**{bfr}** had their nickname changed to **{aft.nick}**'
if bfr.roles != aft.roles:
changetype = 'Role Update:'
diffrole = next(iter(set(aft.roles) ^ set(bfr.roles)))
difftype = 'added' if len(bfr.roles) < len(aft.roles) else 'removed'
changelog = f'**{aft}** had the following role {difftype}: `{diffrole.name}`'
if changetype is not None:
embed = dc.Embed(
color=dc.Color.blue(),
timestamp=datetime.utcnow(),
description=changelog,
)
embed.set_author(name=changetype, icon_url=aft.avatar_url)
embed.add_field(name='**USER ID:**', value=f'`{aft.id}`', inline=False)
await bot.get_channel(guild_config.mod_channels[guild.id]['usrlog']).send(
embed=embed
)
@bot.event
async def on_user_update(bfr, aft): # Log avatar, name, discrim changes
for guild in bot.guilds:
if guild.get_member(bfr.id) is not None:
changetype = None
if bfr.name != aft.name:
changetype = 'Username Update:'
changelog = f'@{bfr} has changed their username to {aft}'
if bfr.discriminator != aft.discriminator:
changetype = 'Discriminator Update:'
changelog = (
f'@{bfr} had their discriminator changed from '
f'{bfr.discriminator} to {aft.discriminator}'
)
if bfr.avatar != aft.avatar:
changetype = 'Avatar Update:'
changelog = f'@{bfr} has changed their avatar to:'
if changetype is not None:
embed = dc.Embed(
color=dc.Color.purple(),
timestamp=datetime.utcnow(),
description=changelog,
)
embed.set_author(name=changetype, icon_url=bfr.avatar_url)
if changetype.startswith('Avatar'):
embed.set_thumbnail(url=f'{aft.avatar_url}')
embed.add_field(name='**USER ID:**', value=f'`{aft.id}`', inline=False)
await bot.get_channel(guild_config.mod_channels[guild.id]['usrlog']).send(
embed=embed
)
#END OF EVENTS
@bot.command()
async def slap(ctx, arg):
await ctx.send("You have slapped {1}!" .format(ctx, arg))
@bot.command()
async def hello(ctx):
await ctx.send("Hello, World!")
@bot.command()
async def echo(ctx, arg):
await ctx.send(arg)
@bot.command()
async def roll(ctx, arg):
value = randint(1, int(arg))
await ctx.send("You have rolled a {1}!" .format(ctx, value))
@bot.command()
async def help(ctx):
embed = dc.Embed(
color=ctx.author.color,
timestamp=ctx.message.created_at,
description=f'It seems you have asked about the Homestuck and Hiveswap Discord Utility Bot:tm:.'
f'This is a bot designed to cater to the server\'s moderation, utility, and statistic '
f'tracking needs. If the functions herein described are not performing to the degree '
f'that is claimed, please direct your attention to Wizard of Chaos#2459.\n\n'
f'**Command List:**',
)
embed.set_author(name='Help message', icon_url=bot.user.avatar_url)
embed.add_field(name='`help`', value='Display this message.', inline=False)
embed.add_field(
name='`info [username]`',
value='Grabs user information. Leave username empty to get your own info.',
inline=False
)
embed.add_field(name='`ping`', value='Pong!', inline=False)
embed.add_field(
name='`config (msglog|usrlog)`',
value='(Manage Server only) Sets the appropriate log channel.',
inline=False
)
await ctx.send(embed=embed)
@bot.command()
async def info(ctx, member : str=None):
if member is not None:
for gmember in ctx.guild.members:
if member == gmember.display_name:
member = gmember
break
else:
await ctx.send(
'It seems that user can\'t be found. Please check your spelling. '
'Alternatively, try adding double quotes ("") around the name.'
)
return
else:
member = ctx.author
timestamp = datetime.utcnow()
lastseen = await get_last_seen(member, monthdelta(timestamp, -1))
if lastseen is not None:
lastseenmsg = lastseen.strftime("%d/%m/%Y %H:%M:%S")
else:
lastseenmsg = 'This user has not spoken for at least 1 month!'
embed = dc.Embed(color=member.color, timestamp=timestamp)
embed.set_author(name=f'Information for {member}')
embed.set_thumbnail(url=member.avatar_url)
embed.add_field(name='User ID:', value=f'{member.id}')
embed.add_field(name='Last Seen:', value=lastseenmsg, inline=False)
embed.add_field(name='Account Created On:', value=member.created_at.strftime('%d/%m/%Y %H:%M:%S'))
embed.add_field(name='Guild Joined On:', value=member.joined_at.strftime('%d/%m/%Y %H:%M:%S'))
embed.add_field(name='Roles:', value=', '.join(f'`{role.name}`' for role in member.roles[1:]), inline=False)
if ctx.author != member:
msg = 'It seems you\'re a bit of a stalker, aren\'t you?'
else:
msg = None
await ctx.send(msg, embed=embed)
@bot.command()
async def ping(ctx):
await ctx.send(f'Pong, <@!{ctx.message.author.id}>!')
@bot.group()
async def config(ctx):
if ctx.invoked_subcommand is None:
await ctx.send(
'It seems that you have attempted to run a nonexistent command. '
'Would you like to try again? Redos are free, you know.'
)
@config.command()
async def usrlog(ctx):
if ctx.author.guild_permissions.manage_guild == True:
await ctx.send(guild_config.setlog(ctx, 'usrlog'))
else:
await ctx.send("It seems that you don't have the appropriate permissions for this command.")
@config.command()
async def msglog(ctx):
if ctx.author.guild_permissions.manage_guild == True:
await ctx.send(guild_config.setlog(ctx, 'msglog'))
else:
await ctx.send("It seems that you don't have the appropriate permissions for this command.")
#GAME EVENT
#ABANDON ALL HOPE YE WHO GO BELOW HERE
@bot.command()
async def rogue_game(ctx):
await ctx.send("Game started! Choose a starting buff - 'Health' or 'Damage'.")
def check(m):
if m.author == ctx.author:
return m.content == "Health" or m.content == "Damage" or m.content == "CMSC280 FREE PASS"
else:
return False
gamer = Player() #Initializing player class
msg = await bot.wait_for("message", check=check)
if msg.content == "Health":
await ctx.send("+25 HP!")
gamer.heal(25)
elif msg.content == "Damage":
await ctx.send("+5 Damage!")
gamer.dangerify(5)
elif msg.content == "CMSC280 FREE PASS":
await ctx.send("Free shield!")
gamer.get_shield(1)
gamer.get_shield(0)
await ctx.send("OPTIONS: You can 'Block', 'Dodge' or 'Attack' a monster. Alternatively, you may 'Die'.")
slain_enemies = 0
def continue_check(m): #Check used several times
if m.author == ctx.author:
return m.content == "Yes" or m.content == "No"
else:
return False
while gamer.life() == True:
game_roll = randint(1, 1) #placeholder
if game_roll == 1:
#Monster speed is between 5 and 12.
#Monster health is between 40 and 120.
#Monster damage is between 5 and 20.
#Monster damage type is random one or the other (physical or magical).
m_speed = randint(5, 12)
m_hp = randint(40, 120)
m_dmg = randint(5, 20)
m_type = randint(0, 1)
danger = Monster(m_speed, m_dmg, m_hp, m_type) #Initializing monster class
print(f"Monster generated.")
await ctx.send("There is a beast, and you must tenderize it!")
while danger.life() == True:
await ctx.send("Monsters speed is {1}, damage {2}, health {3}." .format(ctx, danger.speed(), danger.damage(), danger.health()))
m_attk_str = danger.make_attack()
m_attk = m_attk_str.split(" ")
if "0" in m_attk:
await ctx.send("The monster is about to bite you!")
elif "1" in m_attk:
await ctx.send("The monster is about to breathe fire at you!")
def game_response(m): #Player response
if m.author == ctx.author:
return m.content == "Block" or m.content == "Dodge" or m.content == "Attack" or m.content == "Die"
else:
return False
#Reactions to the monster's attack
try:
g_msg = await bot.wait_for("message",timeout=m_speed, check=game_response)
if g_msg.content == "Block":
if "0" in m_attk:
if gamer.shield_type() == 1 or gamer.shield_type() == 3:
gamer.shield_hit()
await ctx.send("You block the attack!")
if gamer.shield_type() == 0:
await ctx.send("Your shield shatters from the force of the blow.")
else:
await ctx.send("You try to block it, but your shield isn't rated for this kind of damage!")
bp_damage = int(m_attk[1])
gamer.take_hit(bp_damage)
curhp = gamer.health()
await ctx.send("Your health is {1}." .format(ctx, curhp))
if "1" in m_attk:
if gamer.shield_type() == 2 or gamer.shield_type() == 3:
gamer.shield_hit()
await ctx.send("You block the attack!")
if gamer.shield_type() == 0:
await ctx.send("Your shield falls to pieces in a burst of multicolored light.")
else:
await ctx.send("The magical assault burns right through your shield!")
bm_damage = int(m_attk[1])
gamer.take_hit(bm_damage)
curhp = gamer.health()
await ctx.send("Your health is {1}." .format(ctx, curhp))
if g_msg.content == "Dodge":
await ctx.send("You roll to one side, avoiding some of the damage!")
d_damage = int(m_attk[1])
hit = d_damage - randint(5, 18)
gamer.take_hit(hit)
await ctx.send("Your health is {1}." .format(ctx, gamer.health()))
if g_msg.content == "Attack":
await ctx.send("You strike at the monster, but in doing so, expose yourself to the blow!") #Heh. Expose yourself. Good one, me.
a_damage = int(m_attk[1])
hit = a_damage + randint(5, 10)
gamer.take_hit(hit)
danger.take_hit(gamer.damage())
await ctx.send("Your health is {1}." .format(ctx, gamer.health()))
if g_msg.content == "Die":
await ctx.send("You die before the blow hits, confusing the monster.")
gamer.take_hit(gamer.health())
except asyncio.TimeoutError:
await ctx.send("You didn't move fast enough! The attack lands!")
t_damage = int(m_attk[1])
gamer.take_hit(t_damage)
await ctx.send("Your health is {1}." .format(ctx, gamer.health()))
if gamer.life() == False:
break
await ctx.send("The monster rears back! Quickly, hit the thing!")
def attack_response(m):
if m.author == ctx.author:
return m.content == "Attack"
else:
return False
try:
a_msg = await bot.wait_for("message", timeout=m_speed, check=attack_response)
if a_msg.content == "Attack":
await ctx.send("You hit the monster!")
danger.take_hit(gamer.damage())
except asyncio.TimeoutError:
await ctx.send("You didn't move fast enough!")
#Right, by this point, the monster has attacked, and the player has attacked.
#Need to check if the player is dead or not.
if gamer.life() == False:
break
#Only other option now is that the monster is still alive, requiring another turn, or it's dead, in which case...
#We should end up here, outside the loop.
if gamer.life() == True: #Necessary. Can break above loop without being alive, due to 'Die'.
await ctx.send("The monster has been defeated.")
slain_enemies = slain_enemies + 1
lootroll = randint(0, 4)
#Five cases. 0 - nothing. 1 - Physical shield. 2 - Magic shield. 3 - Health. 4 - Damage.
if lootroll == 0:
await ctx.send("The monster dropped nothing.")
if lootroll == 1:
await ctx.send("In the monster's digestive tract, you find a metal shield!")
gamer.get_shield(0)
if lootroll == 2:
await ctx.send("In the monster's spleen, you find a runic shield, glowing with spellcraft!")
gamer.get_shield(1)
if lootroll == 3:
healthroll = randint(5, 30)
await ctx.send("The monster's blood is a powerful restorative! You heal for {1}." .format(ctx, healthroll))
gamer.heal(healthroll)
if lootroll == 4:
dmgroll = randint(3, 12)
await ctx.send("You monster's bones make an excellent weapon! Your damage increases by {1}." .format(ctx, dmgroll))
gamer.dangerify(dmgroll)
#Loot handled. Looping again after describing player stats.
await ctx.send("Your health is {1} and your damage is {2}." .format(ctx, gamer.health(), gamer.damage()))
if gamer.shield_type() == 0:
await ctx.send("You have no shield.")
elif gamer.shield_type() == 1:
await ctx.send("You have a sturdy metal shield. It can take {1} more hits." .format(ctx, gamer.shield_dur()))
elif gamer.shield_type() == 2:
await ctx.send("You have a rune-inscribed shield. It can take {1} more hits." .format(ctx, gamer.shield_dur()))
elif gamer.shield_type() == 3:
await ctx.send("You have an inscribed metal shield. Powerful! It can take {1} more hits." .format(ctx, gamer.shield_dur()))
await ctx.send("Continue?")
con_msg = await bot.wait_for("message", check=continue_check)
if con_msg.content == "No":
break
#End of combat loop. Player is dead.
if game_roll == 2:
await ctx.send("You encounter a great and terrible wizard.")
await ctx.send("Continue?")
con_msg = await bot.wait_for("message", check=continue_check)
if game_roll == 3:
await ctx.send("You stumble into a trap!")
await ctx.send("Continue?")
con_msg = await bot.wait_for("message", check=continue_check)
if game_roll == 4:
await ctx.send("Rocks fall, everyone dies.")
await ctx.send("Continue?")
con_msg = await bot.wait_for("message", check=continue_check)
if game_roll == 5:
await ctx.send("A man just walks up and punches you. What a jerk.")
await ctx.send("Continue?")
con_msg = await bot.wait_for("message", check=continue_check)
#Placeholder maneuvers. Plan to expand game later with more events.
#Get duel working for demo
await ctx.send("You have died. Nice try, though! You killed {1} monsters." .format(ctx, slain_enemies))
@bot.command()
#Shoutout to my friend Janine for helping me cut this beast of a function in half.
async def duel(ctx, *, member):
await ctx.send("You have challenged {1} to a duel! How do you respond {1}?".format(ctx, member))
duelee = member # Discord member, shown as 'Wizard of Chaos#2459' or similar
player1 = Player()
dueler = ctx.author # ditto
player2 = Player()
def filter_tokens(msg, tokens):
"""Returns a list of tokens from the sequence that appear in the message."""
text = msg.content.strip().lower()
return [t for t in tokens if t in text]
def check(m): # Check if duel is accepted
return m.author == duelee and bool(filter_tokens(m, ('accept', 'decline')))
try:
msg = await bot.wait_for("message", timeout=20, check=check)
tokens = filter_tokens(msg, ('accept', 'decline'))
if len(tokens) > 1:
await ctx.send("Your indecision has weirded out your opponent. Good job.")
return
if 'decline' == tokens[0]:
await ctx.send("You have declined the challenge, everyone judges you.") #Coward.
return
if 'accept' == tokens[0]:
await ctx.send("You have accepted the duel!")
except asyncio.TimeoutError:
await ctx.send("{1} appears to be absent. Coward.".format(ctx, duelee))
return
await ctx.send(
"The duel has begun. The three attacks are 'critical strike', 'power attack', and 'flurry'. "
"You can hit someone from the 'left' or the 'right', or just not pick a direction. "
"You can also 'die'."
)
await ctx.send(
"Critical strikes cannot be parried. "
"Power attacks cannot be parried or blocked. "
"Flurries cannot be blocked or dodged effectively."
)
#Slightly more in-depth explanation:
#Critical strikes are blocked from the same direction they came in.
#Attempting to roll in any direction other than the opposite of the incoming attack results in a hit.
#Critical strikes cannot be parried, like, at all.
#Flurries must be parried from the same direction. They can be dodged for reduced damage. They cannot be blocked.
#Power attacks cannot be blocked or parried and MUST be dodged, to the opposite of the incoming direction.
#Dodges have to go in the opposite direction or they fail.
#Attack / defense checks based on incoming messages
def attack_check(m, a):
return m.author == a and bool(filter_tokens(m, attacks))
def defense_check(m, a):
return m.author == a and bool(filter_tokens(m, defenses))
atk_time = 5 # Reaction time for players in seconds, set to 10 for demo, 5 during actual play
attacks = ("critical strike", "flurry", "power attack", "die")
defenses = ("parry", "dodge", "block", "die")
dirs = ("left", "right")
while True: # External infinite loop.
for actor1, actor2, stats1, stats2 in ((duelee, dueler, player1, player2), (dueler, duelee, player2, player1)): # Turn order loop.
if not(player2.life() and player1.life()): # Check if either player died during any turn.
await ctx.send("{1} wins!".format(ctx, duelee if player1.life() else dueler))
return
await ctx.send("It's {1}'s turn to attack.".format(ctx, actor1))
try:
a1_msg = await bot.wait_for("message", timeout=20, check=lambda m: attack_check(m, actor1))
except asyncio.TimeoutError:
await ctx.send("{1} does nothing.".format(ctx, actor1))
continue
attack_tokens = filter_tokens(a1_msg, attacks)
attack_dirs = filter_tokens(a1_msg, dirs)
if len(attack_tokens) > 1 or len(attack_dirs) > 1:
await ctx.send("{1} has wasted too much time on indecisive action and got confused!".format(ctx, actor1))
continue
attack_token = attack_tokens[0]
attack_dir = attack_dirs[0] if attack_dirs else "top"
if "die" == attack_token:
await ctx.send("{1} screams that {2} will never understand their pain, then slits their wrists!".format(ctx, actor1, actor2))
stats1.take_hit(100) # It's no surprise the emo movement failed, no surprise at all.
continue
await ctx.send("{1} throws out a {2} from the {3}!".format(ctx, actor1, attack_token, attack_dir))
try:
a2_msg = await bot.wait_for("message", timeout=atk_time, check=lambda m: defense_check(m, actor2))
except asyncio.TimeoutError:
await ctx.send("{1} doesn't move fast enough, and gets hit!".format(ctx, actor2))
stats2.take_hit((20, 15, 10)[attacks.index(attack_token)])
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
continue
defense_tokens = filter_tokens(a2_msg, defenses)
defense_dirs = filter_tokens(a2_msg, dirs)
if len(defense_tokens) > 1 or len(defense_dirs) > 1:
await ctx.send("{1} doesn't get their act together fast enough and gets hit!".format(ctx, actor2))
stats2.take_hit((20, 15, 10)[attacks.index(attack_token)])
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, player2.health()))
continue
defense_token = defense_tokens[0]
defense_dir = defense_dirs[0] if defense_dirs else "top"
if "die" == defense_token:
await ctx.send("{1} accepts their fate and allows the blow to crush their skull!".format(ctx, actor2))
stats2.take_hit(100)
continue
# A whole bunch of if/elif/else chains. Asyncio REALLY does not like when you try to call outside functions.
# CRITICAL STRIKE:
if "critical strike" == attack_token:
if "left" == attack_dir:
if "block" == defense_token:
if "left" == defense_dir:
await ctx.send("{1} blocks the strike.".format(ctx, actor2))
else:
await ctx.send("{1} tries to block, but misses the direction of the blow!".format(ctx, actor2))
elif "parry" == defense_token:
await ctx.send("{1} attempts to parry, but the blow is too precisely aimed!".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "dodge" == defense_token:
if "left" == defense_dir:
await ctx.send("{1} tries to roll out of the way, but rolls straight into the blow!".format(ctx, actor2))
stats2.take_hit(40)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "right" == defense_token:
await ctx.send("{1} dodges the blow.".format(ctx, actor2))
else:
await ctx.send("{1} misses the dodge.".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "right" == attack_dir:
if "block" == defense_token:
if "right" == defense_dir:
await ctx.send("{1} blocks the strike.".format(ctx, actor2))
else:
await ctx.send("{1} tries to block, but misses the direction of the blow!".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "parry" == defense_token:
await ctx.send("{1} attempts to parry, but the blow is too precisely aimed!".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "dodge" == defense_token:
if "right" == defense_dir:
await ctx.send("{1} tries to roll out of the way, but rolls straight into the blow!".format(ctx, actor2))
stats2.take_hit(40)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "left" == defense_dir:
await ctx.send("{1} dodges the blow.".format(ctx, actor2))
else:
await ctx.send("{1} misses the dodge.".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
else:
if "block" == defense_token:
if defense_dir != "top":
await ctx.send("{1} fails to block the central strike!".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
else:
await ctx.send("{1} blocks the strike.".format(ctx, actor2))
elif "parry" == defense_token:
await ctx.send("{1} attempts to parry, but the blow is too precisely aimed!".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "dodge" == defense_token:
if defense_dir != "top":
await ctx.send("{1} tries to roll, but gets slapped anyway!".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
else:
await ctx.send("{1} dodges the blow.".format(ctx, actor2))
#All critical strike maneuvers handled.
#FLURRY:
if "flurry" == attack_token:
if "left" == attack_dir:
if "block" == defense_token:
await ctx.send("{1} attempts to block the blows, but there's just too many!".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "parry" == defense_token:
if "left" == defense_dir:
await ctx.send("{1} easily parries the attacks, redirecting them onto {2}!".format(ctx, actor2, actor1))
stats1.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor1, stats1.health()))
else:
await ctx.send("{1} tries to parry, but misjudges the direction and gets hit!".format(ctx, actor2))
stats2.take_hit(15)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "dodge" == defense_token:
if "left" == defense_dir:
await ctx.send("{1} tries to roll out of the way, but rolls straight into the blows!".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "right" == defense_dir:
await ctx.send("{1} dodges most of the blows, but takes one across the back!".format(ctx, actor2))
stats2.take_hit(5)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
else:
await ctx.send("{1} misses the dodge.".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "right" == attack_dir:
if "block" == defense_token:
await ctx.send("{1} attempts to block the blows, but there's just too many!".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "parry" == defense_token:
if "right" == defense_dir:
await ctx.send("{1} easily parries the attacks, redirecting them onto {2}!".format(ctx, actor2, actor1))
stats1.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor1, stats1.health()))
else:
await ctx.send("{1} tries to parry, but misjudges the direction and gets hit!".format(ctx, actor2))
stats2.take_hit(15)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "dodge" == defense_token:
if "right" == defense_dir:
await ctx.send("{1} tries to roll out of the way, but rolls straight into the blows!".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "left" == defense_dir:
await ctx.send("{1} dodges most of the blows, but takes one across the back!".format(ctx, actor2))
stats2.take_hit(5)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
else:
await ctx.send("{1} misses the dodge.".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
else:
if "block" == defense_token:
await ctx.send("{1} attempts to block the blows, but there's just too many!".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "parry" == defense_token:
if defense_dir != "top":
await ctx.send("{1} tries to parry, but misjudges the direction and gets hit!".format(ctx, actor2))
stats2.take_hit(5)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
else:
await ctx.send("{1} easily parries the attacks, redirecting them onto {2}!".format(ctx, actor2, actor1))
stats1.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor1, stats1.health()))
elif "dodge" == defense_token:
if defense_dir != "top":
await ctx.send("{1} tries to roll, but gets slapped anyway!".format(ctx, actor2))
stats2.take_hit(15)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
else:
await ctx.send("{1} dodges most of the blows, but takes one hit anyway!".format(ctx, actor2))
stats2.take_hit(5)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
#Flurry maneuvers handled.
#POWER ATTACK:
if "power attack" == attack_token:
if "left" == attack_dir:
if "block" == defense_token:
await ctx.send("{1} tries to block, but the blow is too much!".format(ctx, actor2))
stats2.take_hit(10)
elif "parry" == defense_token:
await ctx.send("{1} attempts to parry, but the blow is too much!".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "dodge" == defense_token:
if "left" == defense_dir:
await ctx.send("{1} tries to roll out of the way, but rolls straight into the blow!".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "right" == defense_dir:
await ctx.send("{1} dodges the blow.".format(ctx, actor2))
else:
await ctx.send("{1} misses the dodge.".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "right" == attack_dir:
if "block" == defense_token:
await ctx.send("{1} tries to block, but the blow is too much!".format(ctx, actor2))
stats2.take_hit(10)
elif "parry" == defense_token:
await ctx.send("{1} attempts to parry, but the blow is too much!".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "dodge" == defense_token:
if "right" == defense_dir:
await ctx.send("{1} tries to roll out of the way, but rolls straight into the blow!".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "left" == defense_dir:
await ctx.send("{1} dodges the blow.".format(ctx, actor2))
else:
await ctx.send("{1} misses the dodge.".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
else:
if "block" == defense_token:
await ctx.send("{1} tries to block, but the blow is too much!".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "parry" == defense_token:
await ctx.send("{1} attempts to parry, but the blow is too much!".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "dodge" == defense_token:
if defense_dir:
await ctx.send("{1} tries to roll, but gets slapped anyway!".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
else:
await ctx.send("{1} dodges the blow.".format(ctx, actor2))
# Power attacks handled.
# All attacks handled. Next player's attack.
#END DUEL
if __name__ == '__main__':
bot.run(get_token()) | #WIZARD BOT IS LIVE
import calendar
import discord as dc
from discord.ext.commands import Bot
from discord.ext import commands
from functools import partial
import asyncio as aio
import time
from random import randint
from datetime import datetime
from discord.ext import commands
from guildconfig import GuildConfig
from rolesaver import RoleSaver
#initializes bot, sets up command sign
bot = commands.Bot(command_prefix = '!')
bot.remove_command('help')
guild_config = GuildConfig(bot, 'config.pkl')
role_saver = RoleSaver(bot, 'roles.pkl')
#GAME STUFF
class Monster:
def __init__(self, speed, damage, health, dmg_type):
self.spd = speed
self.dmg = damage
self.hp = health
self.dmg_type = dmg_type
self.is_alive = True
#All integers.
#Last one is 1 or 0 - there are two damage types. Magical and physical.
#Physical is 0, Magical is 1.
#Attacks return a tuple containing a 1 or a 0 as the first number, then the damage as the second number.
#ACCESSORS
def health(self):
return self.hp
def speed(self):
return self.spd
def damage(self):
return self.dmg
def life(self):
return self.is_alive
#MUTATORS
def take_hit(self, damage):
self.hp = self.hp - damage
if self.hp <= 0:
self.is_alive = False
def make_attack(self):
attack = ""
attack += str(self.dmg_type)
attack += " "
attack += str(self.dmg)
return attack
class Player:
def __init__(self):
self.hp = 100 #Classic!
self.dmg = 10
self.shield = 0
self.s_dur = 0
self.is_alive = True
#Player has four shield conditions.
#0 - has no shield. 1 - Physical shield. 2 - Magical shield. 3 - Both.
#ACCESSORS
def damage(self):
return self.dmg
def life(self):
return self.is_alive
def shield_type(self):
return self.shield
def shield_dur(self):
return self.s_dur
def health(self):
return self.hp
#MUTATORS
def take_hit(self, damage):
self.hp = self.hp - damage
if self.hp <= 0:
self.is_alive = False
def shield_hit(self):
self.s_dur = self.s_dur - 1
if self.s_dur == 0:
self.shield = 0
#Kills your shield when the durability hits 0.
def heal(self, heal):
self.hp = self.hp + heal
def dangerify(self, damage):
self.dmg = self.dmg + damage
def get_shield(self, shield):
#This one's a bit tricky. The shield is 0 or 1 - Physical or magical.
#It then updates the player's shield accordingly.
if shield == 0:
if self.shield == 0:
self.shield = 1
self.s_dur = 10
if self.shield == 2:
self.shield = 3
self.s_dur = 5
elif shield == 1:
if self.shield == 0:
self.shield = 2
self.s_dur = 10
if self.shield == 1:
self.shield = 3
self.s_dur = 5
#Shield durabilty goes to 5, regardless of what it was before, on picking up a SECOND shield.
#Other four cases don't need to be covered.
#WIZBOT OLD STUFF ENDS HERE
#FUNCTIONS HERE
def get_token():
with open('token.dat', 'r') as tokenfile:
return ''.join(
chr(int(''.join(c), 16))
for c in zip(*[iter(tokenfile.read().strip())]*2)
)
def monthdelta(date, delta):
m, y = (date.month+delta) % 12, date.year + ((date.month)+delta-1) // 12
if not m: m = 12
d = min(date.day, calendar.monthrange(y, m)[1])
return date.replace(day=d, month=m, year=y)
async def get_last_seen(member, pendant=None):
lastseen = None
for channel in member.guild.text_channels:
lastmsg = await channel.history(limit=None, after=pendant).get(author__name=member.display_name)
if lastmsg and (lastseen is None or lastseen < lastmsg.created_at):
lastseen = lastmsg.created_at
return lastseen
#START OF EVENTS
@bot.event
async def on_ready():
print(f'{bot.user} has connected to Discord!')
@bot.event
async def on_message(message):
if message.content == "EAT THAT HORSE!":
await message.channel.send(":horse:")
await bot.process_commands(message)
@bot.event
async def on_message_edit(bfr, aft):
if bfr.author == bot.user:
return
if not hasattr(bfr.channel, 'guild'):
return
guild_id = bfr.channel.guild.id
if guild_id in guild_config.mod_channels:
embed = dc.Embed(color=dc.Color.gold(), timestamp=aft.created_at)
embed.set_author(
name=f'@{bfr.author} edited a message in #{bfr.channel}:',
icon_url=bfr.author.avatar_url,
)
embed.add_field(name='**Before:**', value=bfr.content, inline=False)
embed.add_field(name='**After:**', value=aft.content, inline=False)
embed.add_field(name='**MESSAGE ID:**', value=f'`{aft.id}`')
embed.add_field(name='**USER ID:**', value=f'`{bfr.author.id}`')
await bot.get_channel(guild_config.mod_channels[guild_id]['msglog']).send(
embed=embed
)
@bot.event
async def on_message_delete(msg):
if not hasattr(msg.channel, 'guild'):
return
guild_id = msg.channel.guild.id
if guild_id in guild_config.mod_channels:
embed = dc.Embed(
color=dc.Color.darker_grey(),
timestamp=msg.created_at,
description=msg.content,
)
embed.set_author(
name=f'@{msg.author} deleted a message in #{msg.channel}:',
icon_url=msg.author.avatar_url,
)
embed.add_field(name='**MESSAGE ID:**', value=f'`{msg.id}`')
embed.add_field(name='**USER ID:**', value=f'`{msg.author.id}`')
await bot.get_channel(guild_config.mod_channels[guild_id]['msglog']).send(
embed=embed
)
@bot.event
async def on_member_join(member):
guild = member.guild
if guild.id in guild_config.mod_channels:
await role_saver.load_roles(member)
embed = dc.Embed(
color=dc.Color.green(),
timestamp=datetime.utcnow(),
description=f':green_circle: **{member}** has joined **{guild}**!\n'
f'The guild now has {len(guild.members)} members!\n'
f'This account was created on `{member.created_at.strftime("%d/%m/%Y %H:%M:%S")}`'
)
embed.set_author(name=f'A user has joined the server!')
embed.set_thumbnail(url=member.avatar_url)
embed.add_field(name='**USER ID:**', value=f'`{member.id}`')
await bot.get_channel(guild_config.mod_channels[guild.id]['usrlog']).send(
embed=embed
)
@bot.event
async def on_member_remove(member):
guild = member.guild
if guild.id in guild_config.mod_channels:
role_saver.save_roles(member)
timestamp = datetime.utcnow()
lastseen = await get_last_seen(member, monthdelta(timestamp, -1)) # Moved grabbing last seen to a function
if lastseen is not None:
lastseenmsg = f'This user was last seen on `{lastseen.strftime("%d/%m/%Y %H:%M:%S")}`'
else:
lastseenmsg = 'This user has not spoken for at least 1 month!'
embed = dc.Embed(
color=dc.Color.red(),
timestamp=timestamp,
description=f':red_circle: **{member}** has left **{guild}**!\n'
f'The guild now has {len(guild.members)} members!\n{lastseenmsg}'
)
embed.set_author(name=f'A user left or got beaned!')
embed.set_thumbnail(url=member.avatar_url)
embed.add_field(
name='**ROLES SNAGGED:**',
value=(', '.join(
f'`{guild.get_role(role).name}`'
for role in role_saver.get_roles(member)
)
or None),
inline=False)
embed.add_field(name='**USER ID:**', value=f'`{member.id}`')
await bot.get_channel(guild_config.mod_channels[guild.id]['usrlog']).send(
embed=embed
)
@bot.event
async def on_member_update(bfr, aft): # Log role and nickname changes
guild = bfr.guild
if guild.id in guild_config.mod_channels:
changetype = None
if bfr.nick != aft.nick:
changetype = 'Nickname Update:'
changelog = f'**{bfr}** had their nickname changed to **{aft.nick}**'
if bfr.roles != aft.roles:
changetype = 'Role Update:'
diffrole = next(iter(set(aft.roles) ^ set(bfr.roles)))
difftype = 'added' if len(bfr.roles) < len(aft.roles) else 'removed'
changelog = f'**{aft}** had the following role {difftype}: `{diffrole.name}`'
if changetype is not None:
embed = dc.Embed(
color=dc.Color.blue(),
timestamp=datetime.utcnow(),
description=changelog,
)
embed.set_author(name=changetype, icon_url=aft.avatar_url)
embed.add_field(name='**USER ID:**', value=f'`{aft.id}`', inline=False)
await bot.get_channel(guild_config.mod_channels[guild.id]['usrlog']).send(
embed=embed
)
@bot.event
async def on_user_update(bfr, aft): # Log avatar, name, discrim changes
for guild in bot.guilds:
if guild.get_member(bfr.id) is not None:
changetype = None
if bfr.name != aft.name:
changetype = 'Username Update:'
changelog = f'@{bfr} has changed their username to {aft}'
if bfr.discriminator != aft.discriminator:
changetype = 'Discriminator Update:'
changelog = (
f'@{bfr} had their discriminator changed from '
f'{bfr.discriminator} to {aft.discriminator}'
)
if bfr.avatar != aft.avatar:
changetype = 'Avatar Update:'
changelog = f'@{bfr} has changed their avatar to:'
if changetype is not None:
embed = dc.Embed(
color=dc.Color.purple(),
timestamp=datetime.utcnow(),
description=changelog,
)
embed.set_author(name=changetype, icon_url=bfr.avatar_url)
if changetype.startswith('Avatar'):
embed.set_thumbnail(url=f'{aft.avatar_url}')
embed.add_field(name='**USER ID:**', value=f'`{aft.id}`', inline=False)
await bot.get_channel(guild_config.mod_channels[guild.id]['usrlog']).send(
embed=embed
)
#END OF EVENTS
@bot.command()
async def slap(ctx, arg):
await ctx.send("You have slapped {1}!" .format(ctx, arg))
@bot.command()
async def hello(ctx):
await ctx.send("Hello, World!")
@bot.command()
async def echo(ctx, arg):
await ctx.send(arg)
@bot.command()
async def roll(ctx, arg):
value = randint(1, int(arg))
await ctx.send("You have rolled a {1}!" .format(ctx, value))
@bot.command()
async def help(ctx):
embed = dc.Embed(
color=ctx.author.color,
timestamp=ctx.message.created_at,
description=f'It seems you have asked about the Homestuck and Hiveswap Discord Utility Bot:tm:.'
f'This is a bot designed to cater to the server\'s moderation, utility, and statistic '
f'tracking needs. If the functions herein described are not performing to the degree '
f'that is claimed, please direct your attention to Wizard of Chaos#2459.\n\n'
f'**Command List:**',
)
embed.set_author(name='Help message', icon_url=bot.user.avatar_url)
embed.add_field(name='`help`', value='Display this message.', inline=False)
embed.add_field(
name='`info [username]`',
value='Grabs user information. Leave username empty to get your own info.',
inline=False
)
embed.add_field(name='`ping`', value='Pong!', inline=False)
embed.add_field(
name='`config (msglog|usrlog)`',
value='(Manage Server only) Sets the appropriate log channel.',
inline=False
)
await ctx.send(embed=embed)
@bot.command()
async def info(ctx, member : str=None):
if member is not None:
for gmember in ctx.guild.members:
if member == gmember.display_name:
member = gmember
break
else:
await ctx.send(
'It seems that user can\'t be found. Please check your spelling. '
'Alternatively, try adding double quotes ("") around the name.'
)
return
else:
member = ctx.author
timestamp = datetime.utcnow()
lastseen = await get_last_seen(member, monthdelta(timestamp, -1))
if lastseen is not None:
lastseenmsg = lastseen.strftime("%d/%m/%Y %H:%M:%S")
else:
lastseenmsg = 'This user has not spoken for at least 1 month!'
embed = dc.Embed(color=member.color, timestamp=timestamp)
embed.set_author(name=f'Information for {member}')
embed.set_thumbnail(url=member.avatar_url)
embed.add_field(name='User ID:', value=f'{member.id}')
embed.add_field(name='Last Seen:', value=lastseenmsg, inline=False)
embed.add_field(name='Account Created On:', value=member.created_at.strftime('%d/%m/%Y %H:%M:%S'))
embed.add_field(name='Guild Joined On:', value=member.joined_at.strftime('%d/%m/%Y %H:%M:%S'))
embed.add_field(name='Roles:', value=', '.join(f'`{role.name}`' for role in member.roles[1:]), inline=False)
if ctx.author != member:
msg = 'It seems you\'re a bit of a stalker, aren\'t you?'
else:
msg = None
await ctx.send(msg, embed=embed)
@bot.command()
async def ping(ctx):
await ctx.send(f'Pong, <@!{ctx.message.author.id}>!')
@bot.group()
async def config(ctx):
if ctx.invoked_subcommand is None:
await ctx.send(
'It seems that you have attempted to run a nonexistent command. '
'Would you like to try again? Redos are free, you know.'
)
@config.command()
async def usrlog(ctx):
if ctx.author.guild_permissions.manage_guild == True:
await ctx.send(guild_config.setlog(ctx, 'usrlog'))
else:
await ctx.send("It seems that you don't have the appropriate permissions for this command.")
@config.command()
async def msglog(ctx):
if ctx.author.guild_permissions.manage_guild == True:
await ctx.send(guild_config.setlog(ctx, 'msglog'))
else:
await ctx.send("It seems that you don't have the appropriate permissions for this command.")
#GAME EVENT
#ABANDON ALL HOPE YE WHO GO BELOW HERE
@bot.command()
async def rogue_game(ctx):
await ctx.send("Game started! Choose a starting buff - 'Health' or 'Damage'.")
def check(m):
if m.author == ctx.author:
return m.content == "Health" or m.content == "Damage" or m.content == "CMSC280 FREE PASS"
else:
return False
gamer = Player() #Initializing player class
msg = await bot.wait_for("message", check=check)
if msg.content == "Health":
await ctx.send("+25 HP!")
gamer.heal(25)
elif msg.content == "Damage":
await ctx.send("+5 Damage!")
gamer.dangerify(5)
elif msg.content == "CMSC280 FREE PASS":
await ctx.send("Free shield!")
gamer.get_shield(1)
gamer.get_shield(0)
await ctx.send("OPTIONS: You can 'Block', 'Dodge' or 'Attack' a monster. Alternatively, you may 'Die'.")
slain_enemies = 0
def continue_check(m): #Check used several times
if m.author == ctx.author:
return m.content == "Yes" or m.content == "No"
else:
return False
while gamer.life() == True:
game_roll = randint(1, 1) #placeholder
if game_roll == 1:
#Monster speed is between 5 and 12.
#Monster health is between 40 and 120.
#Monster damage is between 5 and 20.
#Monster damage type is random one or the other (physical or magical).
m_speed = randint(5, 12)
m_hp = randint(40, 120)
m_dmg = randint(5, 20)
m_type = randint(0, 1)
danger = Monster(m_speed, m_dmg, m_hp, m_type) #Initializing monster class
print(f"Monster generated.")
await ctx.send("There is a beast, and you must tenderize it!")
while danger.life() == True:
await ctx.send("Monsters speed is {1}, damage {2}, health {3}." .format(ctx, danger.speed(), danger.damage(), danger.health()))
m_attk_str = danger.make_attack()
m_attk = m_attk_str.split(" ")
if "0" in m_attk:
await ctx.send("The monster is about to bite you!")
elif "1" in m_attk:
await ctx.send("The monster is about to breathe fire at you!")
def game_response(m): #Player response
if m.author == ctx.author:
return m.content == "Block" or m.content == "Dodge" or m.content == "Attack" or m.content == "Die"
else:
return False
#Reactions to the monster's attack
try:
g_msg = await bot.wait_for("message",timeout=m_speed, check=game_response)
if g_msg.content == "Block":
if "0" in m_attk:
if gamer.shield_type() == 1 or gamer.shield_type() == 3:
gamer.shield_hit()
await ctx.send("You block the attack!")
if gamer.shield_type() == 0:
await ctx.send("Your shield shatters from the force of the blow.")
else:
await ctx.send("You try to block it, but your shield isn't rated for this kind of damage!")
bp_damage = int(m_attk[1])
gamer.take_hit(bp_damage)
curhp = gamer.health()
await ctx.send("Your health is {1}." .format(ctx, curhp))
if "1" in m_attk:
if gamer.shield_type() == 2 or gamer.shield_type() == 3:
gamer.shield_hit()
await ctx.send("You block the attack!")
if gamer.shield_type() == 0:
await ctx.send("Your shield falls to pieces in a burst of multicolored light.")
else:
await ctx.send("The magical assault burns right through your shield!")
bm_damage = int(m_attk[1])
gamer.take_hit(bm_damage)
curhp = gamer.health()
await ctx.send("Your health is {1}." .format(ctx, curhp))
if g_msg.content == "Dodge":
await ctx.send("You roll to one side, avoiding some of the damage!")
d_damage = int(m_attk[1])
hit = d_damage - randint(5, 18)
gamer.take_hit(hit)
await ctx.send("Your health is {1}." .format(ctx, gamer.health()))
if g_msg.content == "Attack":
await ctx.send("You strike at the monster, but in doing so, expose yourself to the blow!") #Heh. Expose yourself. Good one, me.
a_damage = int(m_attk[1])
hit = a_damage + randint(5, 10)
gamer.take_hit(hit)
danger.take_hit(gamer.damage())
await ctx.send("Your health is {1}." .format(ctx, gamer.health()))
if g_msg.content == "Die":
await ctx.send("You die before the blow hits, confusing the monster.")
gamer.take_hit(gamer.health())
except asyncio.TimeoutError:
await ctx.send("You didn't move fast enough! The attack lands!")
t_damage = int(m_attk[1])
gamer.take_hit(t_damage)
await ctx.send("Your health is {1}." .format(ctx, gamer.health()))
if gamer.life() == False:
break
await ctx.send("The monster rears back! Quickly, hit the thing!")
def attack_response(m):
if m.author == ctx.author:
return m.content == "Attack"
else:
return False
try:
a_msg = await bot.wait_for("message", timeout=m_speed, check=attack_response)
if a_msg.content == "Attack":
await ctx.send("You hit the monster!")
danger.take_hit(gamer.damage())
except asyncio.TimeoutError:
await ctx.send("You didn't move fast enough!")
#Right, by this point, the monster has attacked, and the player has attacked.
#Need to check if the player is dead or not.
if gamer.life() == False:
break
#Only other option now is that the monster is still alive, requiring another turn, or it's dead, in which case...
#We should end up here, outside the loop.
if gamer.life() == True: #Necessary. Can break above loop without being alive, due to 'Die'.
await ctx.send("The monster has been defeated.")
slain_enemies = slain_enemies + 1
lootroll = randint(0, 4)
#Five cases. 0 - nothing. 1 - Physical shield. 2 - Magic shield. 3 - Health. 4 - Damage.
if lootroll == 0:
await ctx.send("The monster dropped nothing.")
if lootroll == 1:
await ctx.send("In the monster's digestive tract, you find a metal shield!")
gamer.get_shield(0)
if lootroll == 2:
await ctx.send("In the monster's spleen, you find a runic shield, glowing with spellcraft!")
gamer.get_shield(1)
if lootroll == 3:
healthroll = randint(5, 30)
await ctx.send("The monster's blood is a powerful restorative! You heal for {1}." .format(ctx, healthroll))
gamer.heal(healthroll)
if lootroll == 4:
dmgroll = randint(3, 12)
await ctx.send("You monster's bones make an excellent weapon! Your damage increases by {1}." .format(ctx, dmgroll))
gamer.dangerify(dmgroll)
#Loot handled. Looping again after describing player stats.
await ctx.send("Your health is {1} and your damage is {2}." .format(ctx, gamer.health(), gamer.damage()))
if gamer.shield_type() == 0:
await ctx.send("You have no shield.")
elif gamer.shield_type() == 1:
await ctx.send("You have a sturdy metal shield. It can take {1} more hits." .format(ctx, gamer.shield_dur()))
elif gamer.shield_type() == 2:
await ctx.send("You have a rune-inscribed shield. It can take {1} more hits." .format(ctx, gamer.shield_dur()))
elif gamer.shield_type() == 3:
await ctx.send("You have an inscribed metal shield. Powerful! It can take {1} more hits." .format(ctx, gamer.shield_dur()))
await ctx.send("Continue?")
con_msg = await bot.wait_for("message", check=continue_check)
if con_msg.content == "No":
break
#End of combat loop. Player is dead.
if game_roll == 2:
await ctx.send("You encounter a great and terrible wizard.")
await ctx.send("Continue?")
con_msg = await bot.wait_for("message", check=continue_check)
if game_roll == 3:
await ctx.send("You stumble into a trap!")
await ctx.send("Continue?")
con_msg = await bot.wait_for("message", check=continue_check)
if game_roll == 4:
await ctx.send("Rocks fall, everyone dies.")
await ctx.send("Continue?")
con_msg = await bot.wait_for("message", check=continue_check)
if game_roll == 5:
await ctx.send("A man just walks up and punches you. What a jerk.")
await ctx.send("Continue?")
con_msg = await bot.wait_for("message", check=continue_check)
#Placeholder maneuvers. Plan to expand game later with more events.
#Get duel working for demo
await ctx.send("You have died. Nice try, though! You killed {1} monsters." .format(ctx, slain_enemies))
@bot.command()
#Shoutout to my friend Janine for helping me cut this beast of a function in half.
async def duel(ctx, *, member):
await ctx.send("You have challenged {1} to a duel! How do you respond {1}?".format(ctx, member))
duelee = member # Discord member, shown as 'Wizard of Chaos#2459' or similar
player1 = Player()
dueler = ctx.author # ditto
player2 = Player()
def filter_tokens(msg, tokens):
"""Returns a list of tokens from the sequence that appear in the message."""
text = msg.content.strip().lower()
return [t for t in tokens if t in text]
def check(m): # Check if duel is accepted
return m.author == duelee and bool(filter_tokens(m, ('accept', 'decline')))
try:
msg = await bot.wait_for("message", timeout=20, check=check)
tokens = filter_tokens(msg, ('accept', 'decline'))
if len(tokens) > 1:
await ctx.send("Your indecision has weirded out your opponent. Good job.")
return
if 'decline' == tokens[0]:
await ctx.send("You have declined the challenge, everyone judges you.") #Coward.
return
if 'accept' == tokens[0]:
await ctx.send("You have accepted the duel!")
except asyncio.TimeoutError:
await ctx.send("{1} appears to be absent. Coward.".format(ctx, duelee))
return
await ctx.send(
"The duel has begun. The three attacks are 'critical strike', 'power attack', and 'flurry'. "
"You can hit someone from the 'left' or the 'right', or just not pick a direction. "
"You can also 'die'."
)
await ctx.send(
"Critical strikes cannot be parried. "
"Power attacks cannot be parried or blocked. "
"Flurries cannot be blocked or dodged effectively."
)
#Slightly more in-depth explanation:
#Critical strikes are blocked from the same direction they came in.
#Attempting to roll in any direction other than the opposite of the incoming attack results in a hit.
#Critical strikes cannot be parried, like, at all.
#Flurries must be parried from the same direction. They can be dodged for reduced damage. They cannot be blocked.
#Power attacks cannot be blocked or parried and MUST be dodged, to the opposite of the incoming direction.
#Dodges have to go in the opposite direction or they fail.
#Attack / defense checks based on incoming messages
def attack_check(m, a):
return m.author == a and bool(filter_tokens(m, attacks))
def defense_check(m, a):
return m.author == a and bool(filter_tokens(m, defenses))
atk_time = 5 # Reaction time for players in seconds, set to 10 for demo, 5 during actual play
attacks = ("critical strike", "flurry", "power attack", "die")
defenses = ("parry", "dodge", "block", "die")
dirs = ("left", "right")
while True: # External infinite loop.
for actor1, actor2, stats1, stats2 in ((duelee, dueler, player1, player2), (dueler, duelee, player2, player1)): # Turn order loop.
if not(player2.life() and player1.life()): # Check if either player died during any turn.
await ctx.send("{1} wins!".format(ctx, duelee if player1.life() else dueler))
return
await ctx.send("It's {1}'s turn to attack.".format(ctx, actor1))
try:
a1_msg = await bot.wait_for("message", timeout=20, check=lambda m: attack_check(m, actor1))
except asyncio.TimeoutError:
await ctx.send("{1} does nothing.".format(ctx, actor1))
continue
attack_tokens = filter_tokens(a1_msg, attacks)
attack_dirs = filter_tokens(a1_msg, dirs)
if len(attack_tokens) > 1 or len(attack_dirs) > 1:
await ctx.send("{1} has wasted too much time on indecisive action and got confused!".format(ctx, actor1))
continue
attack_token = attack_tokens[0]
attack_dir = attack_dirs[0] if attack_dirs else "top"
if "die" == attack_token:
await ctx.send("{1} screams that {2} will never understand their pain, then slits their wrists!".format(ctx, actor1, actor2))
stats1.take_hit(100) # It's no surprise the emo movement failed, no surprise at all.
continue
await ctx.send("{1} throws out a {2} from the {3}!".format(ctx, actor1, attack_token, attack_dir))
try:
a2_msg = await bot.wait_for("message", timeout=atk_time, check=lambda m: defense_check(m, actor2))
except asyncio.TimeoutError:
await ctx.send("{1} doesn't move fast enough, and gets hit!".format(ctx, actor2))
stats2.take_hit((20, 15, 10)[attacks.index(attack_token)])
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
continue
defense_tokens = filter_tokens(a2_msg, defenses)
defense_dirs = filter_tokens(a2_msg, dirs)
if len(defense_tokens) > 1 or len(defense_dirs) > 1:
await ctx.send("{1} doesn't get their act together fast enough and gets hit!".format(ctx, actor2))
stats2.take_hit((20, 15, 10)[attacks.index(attack_token)])
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, player2.health()))
continue
defense_token = defense_tokens[0]
defense_dir = defense_dirs[0] if defense_dirs else "top"
if "die" == defense_token:
await ctx.send("{1} accepts their fate and allows the blow to crush their skull!".format(ctx, actor2))
stats2.take_hit(100)
continue
# A whole bunch of if/elif/else chains. Asyncio REALLY does not like when you try to call outside functions.
# CRITICAL STRIKE:
if "critical strike" == attack_token:
if "left" == attack_dir:
if "block" == defense_token:
if "left" == defense_dir:
await ctx.send("{1} blocks the strike.".format(ctx, actor2))
else:
await ctx.send("{1} tries to block, but misses the direction of the blow!".format(ctx, actor2))
elif "parry" == defense_token:
await ctx.send("{1} attempts to parry, but the blow is too precisely aimed!".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "dodge" == defense_token:
if "left" == defense_dir:
await ctx.send("{1} tries to roll out of the way, but rolls straight into the blow!".format(ctx, actor2))
stats2.take_hit(40)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "right" == defense_token:
await ctx.send("{1} dodges the blow.".format(ctx, actor2))
else:
await ctx.send("{1} misses the dodge.".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "right" == attack_dir:
if "block" == defense_token:
if "right" == defense_dir:
await ctx.send("{1} blocks the strike.".format(ctx, actor2))
else:
await ctx.send("{1} tries to block, but misses the direction of the blow!".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "parry" == defense_token:
await ctx.send("{1} attempts to parry, but the blow is too precisely aimed!".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "dodge" == defense_token:
if "right" == defense_dir:
await ctx.send("{1} tries to roll out of the way, but rolls straight into the blow!".format(ctx, actor2))
stats2.take_hit(40)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "left" == defense_dir:
await ctx.send("{1} dodges the blow.".format(ctx, actor2))
else:
await ctx.send("{1} misses the dodge.".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
else:
if "block" == defense_token:
if defense_dir != "top":
await ctx.send("{1} fails to block the central strike!".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
else:
await ctx.send("{1} blocks the strike.".format(ctx, actor2))
elif "parry" == defense_token:
await ctx.send("{1} attempts to parry, but the blow is too precisely aimed!".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "dodge" == defense_token:
if defense_dir != "top":
await ctx.send("{1} tries to roll, but gets slapped anyway!".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
else:
await ctx.send("{1} dodges the blow.".format(ctx, actor2))
#All critical strike maneuvers handled.
#FLURRY:
if "flurry" == attack_token:
if "left" == attack_dir:
if "block" == defense_token:
await ctx.send("{1} attempts to block the blows, but there's just too many!".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "parry" == defense_token:
if "left" == defense_dir:
await ctx.send("{1} easily parries the attacks, redirecting them onto {2}!".format(ctx, actor2, actor1))
stats1.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor1, stats1.health()))
else:
await ctx.send("{1} tries to parry, but misjudges the direction and gets hit!".format(ctx, actor2))
stats2.take_hit(15)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "dodge" == defense_token:
if "left" == defense_dir:
await ctx.send("{1} tries to roll out of the way, but rolls straight into the blows!".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "right" == defense_dir:
await ctx.send("{1} dodges most of the blows, but takes one across the back!".format(ctx, actor2))
stats2.take_hit(5)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
else:
await ctx.send("{1} misses the dodge.".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "right" == attack_dir:
if "block" == defense_token:
await ctx.send("{1} attempts to block the blows, but there's just too many!".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "parry" == defense_token:
if "right" == defense_dir:
await ctx.send("{1} easily parries the attacks, redirecting them onto {2}!".format(ctx, actor2, actor1))
stats1.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor1, stats1.health()))
else:
await ctx.send("{1} tries to parry, but misjudges the direction and gets hit!".format(ctx, actor2))
stats2.take_hit(15)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "dodge" == defense_token:
if "right" == defense_dir:
await ctx.send("{1} tries to roll out of the way, but rolls straight into the blows!".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "left" == defense_dir:
await ctx.send("{1} dodges most of the blows, but takes one across the back!".format(ctx, actor2))
stats2.take_hit(5)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
else:
await ctx.send("{1} misses the dodge.".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
else:
if "block" == defense_token:
await ctx.send("{1} attempts to block the blows, but there's just too many!".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "parry" == defense_token:
if defense_dir != "top":
await ctx.send("{1} tries to parry, but misjudges the direction and gets hit!".format(ctx, actor2))
stats2.take_hit(5)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
else:
await ctx.send("{1} easily parries the attacks, redirecting them onto {2}!".format(ctx, actor2, actor1))
stats1.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor1, stats1.health()))
elif "dodge" == defense_token:
if defense_dir != "top":
await ctx.send("{1} tries to roll, but gets slapped anyway!".format(ctx, actor2))
stats2.take_hit(15)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
else:
await ctx.send("{1} dodges most of the blows, but takes one hit anyway!".format(ctx, actor2))
stats2.take_hit(5)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
#Flurry maneuvers handled.
#POWER ATTACK:
if "power attack" == attack_token:
if "left" == attack_dir:
if "block" == defense_token:
await ctx.send("{1} tries to block, but the blow is too much!".format(ctx, actor2))
stats2.take_hit(10)
elif "parry" == defense_token:
await ctx.send("{1} attempts to parry, but the blow is too much!".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "dodge" == defense_token:
if "left" == defense_dir:
await ctx.send("{1} tries to roll out of the way, but rolls straight into the blow!".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "right" == defense_dir:
await ctx.send("{1} dodges the blow.".format(ctx, actor2))
else:
await ctx.send("{1} misses the dodge.".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "right" == attack_dir:
if "block" == defense_token:
await ctx.send("{1} tries to block, but the blow is too much!".format(ctx, actor2))
stats2.take_hit(10)
elif "parry" == defense_token:
await ctx.send("{1} attempts to parry, but the blow is too much!".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "dodge" == defense_token:
if "right" == defense_dir:
await ctx.send("{1} tries to roll out of the way, but rolls straight into the blow!".format(ctx, actor2))
stats2.take_hit(20)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "left" == defense_dir:
await ctx.send("{1} dodges the blow.".format(ctx, actor2))
else:
await ctx.send("{1} misses the dodge.".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
else:
if "block" == defense_token:
await ctx.send("{1} tries to block, but the blow is too much!".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "parry" == defense_token:
await ctx.send("{1} attempts to parry, but the blow is too much!".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
elif "dodge" == defense_token:
if defense_dir:
await ctx.send("{1} tries to roll, but gets slapped anyway!".format(ctx, actor2))
stats2.take_hit(10)
await ctx.send("{1} 's health is {2}.".format(ctx, actor2, stats2.health()))
else:
await ctx.send("{1} dodges the blow.".format(ctx, actor2))
# Power attacks handled.
# All attacks handled. Next player's attack.
#END DUEL
if __name__ == '__main__':
bot.run(get_token()) | en | 0.909228 | #WIZARD BOT IS LIVE #initializes bot, sets up command sign #GAME STUFF #All integers. #Last one is 1 or 0 - there are two damage types. Magical and physical. #Physical is 0, Magical is 1. #Attacks return a tuple containing a 1 or a 0 as the first number, then the damage as the second number. #ACCESSORS #MUTATORS #Classic! #Player has four shield conditions. #0 - has no shield. 1 - Physical shield. 2 - Magical shield. 3 - Both. #ACCESSORS #MUTATORS #Kills your shield when the durability hits 0. #This one's a bit tricky. The shield is 0 or 1 - Physical or magical. #It then updates the player's shield accordingly. #Shield durabilty goes to 5, regardless of what it was before, on picking up a SECOND shield. #Other four cases don't need to be covered. #WIZBOT OLD STUFF ENDS HERE #FUNCTIONS HERE #START OF EVENTS #{bfr.channel}:', #{msg.channel}:', # Moved grabbing last seen to a function # Log role and nickname changes # Log avatar, name, discrim changes #END OF EVENTS #2459.\n\n' #GAME EVENT #ABANDON ALL HOPE YE WHO GO BELOW HERE #Initializing player class #Check used several times #placeholder #Monster speed is between 5 and 12. #Monster health is between 40 and 120. #Monster damage is between 5 and 20. #Monster damage type is random one or the other (physical or magical). #Initializing monster class #Player response #Reactions to the monster's attack #Heh. Expose yourself. Good one, me. #Right, by this point, the monster has attacked, and the player has attacked. #Need to check if the player is dead or not. #Only other option now is that the monster is still alive, requiring another turn, or it's dead, in which case... #We should end up here, outside the loop. #Necessary. Can break above loop without being alive, due to 'Die'. #Five cases. 0 - nothing. 1 - Physical shield. 2 - Magic shield. 3 - Health. 4 - Damage. #Loot handled. Looping again after describing player stats. #End of combat loop. Player is dead. #Placeholder maneuvers. Plan to expand game later with more events. #Get duel working for demo #Shoutout to my friend Janine for helping me cut this beast of a function in half. # Discord member, shown as 'Wizard of Chaos#2459' or similar # ditto Returns a list of tokens from the sequence that appear in the message. # Check if duel is accepted #Coward. #Slightly more in-depth explanation: #Critical strikes are blocked from the same direction they came in. #Attempting to roll in any direction other than the opposite of the incoming attack results in a hit. #Critical strikes cannot be parried, like, at all. #Flurries must be parried from the same direction. They can be dodged for reduced damage. They cannot be blocked. #Power attacks cannot be blocked or parried and MUST be dodged, to the opposite of the incoming direction. #Dodges have to go in the opposite direction or they fail. #Attack / defense checks based on incoming messages # Reaction time for players in seconds, set to 10 for demo, 5 during actual play # External infinite loop. # Turn order loop. # Check if either player died during any turn. # It's no surprise the emo movement failed, no surprise at all. # A whole bunch of if/elif/else chains. Asyncio REALLY does not like when you try to call outside functions. # CRITICAL STRIKE: #All critical strike maneuvers handled. #FLURRY: #Flurry maneuvers handled. #POWER ATTACK: # Power attacks handled. # All attacks handled. Next player's attack. #END DUEL | 2.700929 | 3 |
stack.py | henryoliver/data-structures | 0 | 9580 | <gh_stars>0
class Stack:
def __init__(self):
self.stack = []
self.minMaxStack = []
# O(1) time | O(1) space
def peek(self):
if (len(self.stack)):
return self.stack[-1]
return None
# O(1) time | O(1) space
def pop(self):
if (len(self.stack)):
self.minMaxStack.pop()
return self.stack.pop()
return None
# Procedure
# O(1) time | O(1) space
def push(self, value):
minNumber = value
maxNumber = value
if (len(self.minMaxStack)):
lastMinMax = self.minMaxStack[-1]
minNumber = min(lastMinMax[0], minNumber)
maxNumber = max(lastMinMax[1], maxNumber)
self.stack.append(value)
self.minMaxStack.append((minNumber, maxNumber))
print(self.stack)
print(self.minMaxStack)
# O(1) time | O(1) space
def getMin(self):
if (len(self.minMaxStack)):
return self.minMaxStack[-1][0]
return None
# O(1) time | O(1) space
def getMax(self):
if (len(self.minMaxStack)):
return self.minMaxStack[-1][1]
return None
| class Stack:
def __init__(self):
self.stack = []
self.minMaxStack = []
# O(1) time | O(1) space
def peek(self):
if (len(self.stack)):
return self.stack[-1]
return None
# O(1) time | O(1) space
def pop(self):
if (len(self.stack)):
self.minMaxStack.pop()
return self.stack.pop()
return None
# Procedure
# O(1) time | O(1) space
def push(self, value):
minNumber = value
maxNumber = value
if (len(self.minMaxStack)):
lastMinMax = self.minMaxStack[-1]
minNumber = min(lastMinMax[0], minNumber)
maxNumber = max(lastMinMax[1], maxNumber)
self.stack.append(value)
self.minMaxStack.append((minNumber, maxNumber))
print(self.stack)
print(self.minMaxStack)
# O(1) time | O(1) space
def getMin(self):
if (len(self.minMaxStack)):
return self.minMaxStack[-1][0]
return None
# O(1) time | O(1) space
def getMax(self):
if (len(self.minMaxStack)):
return self.minMaxStack[-1][1]
return None | en | 0.546102 | # O(1) time | O(1) space # O(1) time | O(1) space # Procedure # O(1) time | O(1) space # O(1) time | O(1) space # O(1) time | O(1) space | 3.857621 | 4 |
ProyectoDAI/settings.py | javiergarridomellado/proyectodai | 1 | 9581 | """
Django settings for TusPachangas project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
import django
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
TEMPLATE_PATH = os.path.join(BASE_DIR, 'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'registration', #add in the registration package
'rest_framework',
'restaurante',
'easy_maps',
)
if django.VERSION < (1, 7):
INSTALLED_APPS += (
'south',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'ProyectoDAI.urls'
WSGI_APPLICATION = 'ProyectoDAI.wsgi.application'
TEMPLATE_DIRS = (TEMPLATE_PATH,)
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
ON_HEROKU = os.environ.get('PORT')
if ON_HEROKU:
DATABASE_URL='postgres://kytzveedsclzaf:eIJAAuElYvSxPK-vmSdXG9Hjv8@ec2-107-21-219-235.compute-1.amazonaws.com:5432/df9sfr7a9b8vjf'
DATABASES = {'default': dj_database_url.config(default=DATABASE_URL)}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_PATH = os.path.join(BASE_DIR,'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
STATIC_PATH,
)
#Media
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
| """
Django settings for TusPachangas project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
import django
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
TEMPLATE_PATH = os.path.join(BASE_DIR, 'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'registration', #add in the registration package
'rest_framework',
'restaurante',
'easy_maps',
)
if django.VERSION < (1, 7):
INSTALLED_APPS += (
'south',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'ProyectoDAI.urls'
WSGI_APPLICATION = 'ProyectoDAI.wsgi.application'
TEMPLATE_DIRS = (TEMPLATE_PATH,)
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
ON_HEROKU = os.environ.get('PORT')
if ON_HEROKU:
DATABASE_URL='postgres://kytzveedsclzaf:eIJAAuElYvSxPK-vmSdXG9Hjv8@ec2-107-21-219-235.compute-1.amazonaws.com:5432/df9sfr7a9b8vjf'
DATABASES = {'default': dj_database_url.config(default=DATABASE_URL)}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_PATH = os.path.join(BASE_DIR,'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
STATIC_PATH,
)
#Media
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
| en | 0.625928 | Django settings for TusPachangas project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! # SECURITY WARNING: don't run with debug turned on in production! # Application definition #add in the registration package # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ #Media | 1.993762 | 2 |
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/python/eager/test.py | JustinACoder/H22-GR3-UnrealAI | 6 | 9582 | <reponame>JustinACoder/H22-GR3-UnrealAI
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for testing tfe code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops as _ops
from tensorflow.python.platform import test as _test
from tensorflow.python.platform.test import * # pylint: disable=wildcard-import
# TODO(akshayka): Do away with this file.
def main(argv=None):
_ops.enable_eager_execution()
_test.main(argv)
| # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for testing tfe code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops as _ops
from tensorflow.python.platform import test as _test
from tensorflow.python.platform.test import * # pylint: disable=wildcard-import
# TODO(akshayka): Do away with this file.
def main(argv=None):
_ops.enable_eager_execution()
_test.main(argv) | en | 0.80101 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Utilities for testing tfe code. # pylint: disable=wildcard-import # TODO(akshayka): Do away with this file. | 1.679239 | 2 |
util.py | monokim/CheesyBullets | 1 | 9583 | <reponame>monokim/CheesyBullets
import time
import pyautogui
import win32gui
def get_screen_rect(caption='CheesyBullets'):
hwnd = win32gui.FindWindow(None, caption)
rect = win32gui.GetWindowRect(hwnd)
screen_rect = (rect[0], rect[1], rect[2] - rect[0], rect[3] - rect[1])
return rect
class Timer():
def __init__(self):
self.times = []
self.cnt = 0
def set_timer(self, name="timer"):
flag = False
for i, t in enumerate(self.times):
if t[1] == name:
flag = True
t[0] = time.time()
break
if flag == False:
self.times.append([time.time(), name])
def print_time(self, name="timer"):
flag = False
for i, t in enumerate(self.times):
if t[1] == name:
flag = True
print(name + " takes (%.5f)s" % (time.time() - t[0]))
break
if flag == False:
raise Exception("There is no timer")
def delete_timer(self, name = None):
for i, t in enumerate(self.times):
if t[1] == name:
self.times.pop(i)
break
| import time
import pyautogui
import win32gui
def get_screen_rect(caption='CheesyBullets'):
hwnd = win32gui.FindWindow(None, caption)
rect = win32gui.GetWindowRect(hwnd)
screen_rect = (rect[0], rect[1], rect[2] - rect[0], rect[3] - rect[1])
return rect
class Timer():
def __init__(self):
self.times = []
self.cnt = 0
def set_timer(self, name="timer"):
flag = False
for i, t in enumerate(self.times):
if t[1] == name:
flag = True
t[0] = time.time()
break
if flag == False:
self.times.append([time.time(), name])
def print_time(self, name="timer"):
flag = False
for i, t in enumerate(self.times):
if t[1] == name:
flag = True
print(name + " takes (%.5f)s" % (time.time() - t[0]))
break
if flag == False:
raise Exception("There is no timer")
def delete_timer(self, name = None):
for i, t in enumerate(self.times):
if t[1] == name:
self.times.pop(i)
break | none | 1 | 3.011938 | 3 |
|
graph-to-graph/elf_correlate.py | mbrcknl/graph-refine | 6 | 9584 | #
# Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
#
# SPDX-License-Identifier: BSD-2-Clause
#
import re
import graph_refine.syntax as syntax
import graph_refine.problem as problem
import graph_refine.stack_logic as stack_logic
from graph_refine.syntax import true_term, false_term, mk_not
from graph_refine.check import *
import graph_refine.search as search
import elf_parser
import graph_refine.target_objects as target_objects
from imm_utils import *
from elf_file import *
from addr_utils import *
from call_graph_utils import gFuncsCalled
from dot_utils import toDot,toGraph
from addr_utils import gToPAddrP,callNodes
def loadCounts(dir_name):
#loop_counts.py must contain exactly 1 dict called man_loop_counts
context = {}
execfile('%s/loop_counts.py' % dir_name,context)
#we should have a dict of addr -> bound
assert 'loops_by_fs' in context
lbfs = context['loops_by_fs']
return lbfs
class immFunc (Borg):
def __init__(self,elf_fun=None,load_counts=False):
Borg.__init__(self)
if not elf_fun:
return
self.elf_fun = elf_fun
self.name = elf_fun.name
self.addr = elf_fun.addr
self.g_f = elf_fun.g_f
self.asm_fs = elfFile().asm_fs
self.imm_nodes = {}
self.bbs = {}
self.loaded_loop_counts = False
self.parse_only = False
self.loop_bounds = {}
# dict of f-> loop_heads -> (bound, description)
self.loops_by_fs = {}
#f -> p_n
self.p_entries = {}
if load_counts:
self.loaded_loops_by_fs = loadCounts(elfFile().dir_name)
self.loaded_loop_counts = True
def process(self):
if self.bbs != {}:
return
self.makeBinGraph()
self.loopheads = {}
self.findLoopheads()
lbfs = self.loops_by_fs
if self.loaded_loop_counts:
self.bin_loops_by_fs = self.loaded_loops_by_fs
print 'loaded loop counts from file'
else:
#build bin_loops_by_fs from loops_by_fs
self.bin_loops_by_fs = {}
blbf = self.bin_loops_by_fs
for f in lbfs:
blbf[f] = {}
p = self.f_problems[f]
pA = lambda x: phyAddrP(x,p)
loops = lbfs[f]
for p_head in loops:
assert pA(p_head) not in blbf
blbf[f][pA(p_head)] = loops[p_head]
def isBBHead(self,p_nf):
if not self.isRealNode(p_nf):
return False
g_n = self.phyAddr(p_nf)
if not type(g_n) == int:
return False
return g_n in self.bbs
#bin addr to bb addr
def bbAddr(self,addr):
bbs = self.bbs
for x in bbs:
if addr in bbs[x]:
return x
print 'addr: %x' % addr
assert False, 'BB not found !!'
def toPhyAddrs(self, p_nis):
return [self.phyAddr(x) for x in p_nis]
#find all possible entries of the loop for Chronos
def findLoopEntries(self, loop, f):
p = self.f_problems[f]
head = None
lp = [x for x in list(loop) if self.isRealNode( (x,f) )]
lpp = []
lp_phys = self.toPhyAddrs([(x,f) for x in lp])
for x in lp:
#loop entry, must be
#1. a basic block head and
#2. has >=1 edge from outside the loop
if (x, f ) in self.pf_deadends:
##gotta be halt / branch to halt
continue
phy_n = self.phyAddr((x,f))
node = self.imm_nodes[phy_n]
imm_ext_edges_to = [y for y in node.edges_to if (y not in lp_phys)]
if ( len(imm_ext_edges_to) >= 1 and self.isBBHead((x,f)) ):
lpp.append(x)
return lpp
def findLoopheads(self):
self.imm_loopheads = {}
#loopheads = {}
loopheads = []
#self.loopheads = loopheads
loops_by_fs = self.loops_by_fs
for (f,p) in [(f,self.f_problems[f]) for f in self.f_problems]:
p.compute_preds()
p.do_loop_analysis()
l = p.loop_data
if p.loop_heads():
loops_by_fs[f] = {}
for x in p.loop_heads():
fun,_ = self.pNToFunGN((x,f))
#dodge halt
if fun in elfFile().deadend_funcs:
continue
loopheads.append((x, f))
#the 0 worker_id will get ignored by genLoopHeads.
#FIXME: do this properly..
loops_by_fs[f][x] = (2**30,'dummy',0)
assert loopheads
for p_nf in loopheads:
p_n, f = p_nf
p = self.f_problems[f]
ll = p.loop_data[p_n][1]
z = self.findLoopEntries(ll, f)
#map from potential heads -> head, hack around chronos 'feature'
for q in z:
assert q not in self.imm_loopheads, 'one addr cannot have >1 loopcounts !'
self.imm_loopheads[self.phyAddr((q,f))] = p_nf
return
def firstRealNodes(self,p_nf,visited = None,may_multi=False,may_call=False,skip_ret=False):
"""
Locate the first real node from, and including, p_addr,
or branch targets if it hits a branch before that.
Returns a list of p_nf
"""
elf_fun = self.elf_fun
p_n,f = p_nf
next_p_nf = p_nf
ret = []
if visited == None:
#print 'fRN on p_n %d, fun: %s' % (p_n,f)
visited = []
if p_nf in visited:
return []
visited.append(p_nf)
assert self.pf_deadends != None
while True:
if self.isRealNode(next_p_nf):
return [next_p_nf]
next_p_n , next_f, next_p = self.unpackPNF(next_p_nf)
if ( next_p_n == 'Ret' and f == self.name):
return [('Ret',f)]
elif next_p_n == 'Ret':
if skip_ret:
return []
assert False,'firstRealNodes reached Ret when skip_ret is False'
p_node, edges = self.pNodeConts(next_p_nf, may_call=may_call)
if edges == []:
return []
assert (edges)
if len(edges) > 1:
assert may_multi
for p_e in edges:
for ee in self.firstRealNodes(p_e ,visited = list(visited),may_multi=may_multi,may_call=may_call,skip_ret=skip_ret):
ret.append(ee)
return ret
else:
next_p_nf = edges[0]
#function p_n belongs to, g_n
def pNToFunGN(self,p_nf):
p_n,f,p = self.unpackPNF(p_nf)
tag = p.node_tags[p_n]
_, x = tag
f_name, g_n = x
return f_name,g_n
#given p_n is an imm call, return is_taillcall
def isCallTailCall(self,p_nf):
# suc = p_n_cs[0]
g_n = self.phyAddr(p_nf)
return elf_parser.isDirectBranch(g_n)
def isStraightToRetToRoot(self,p_nf):
p_n,f,p = self.unpackPNF(p_nf)
if p_n == 'Ret' and f == self.name:
return True
elif p_n == 'Ret':
return False
if self.isRealNode(p_nf):
return False
if self.phyAddr(p_nf)=='RetToCaller':
return False
elif type(p_n) == int:
_,pf_conts = self.pNodeConts(p_nf)
p_conts = [x[0] for x in pf_conts]
if len(p_conts) == 1:
return self.isStraightToRetToRoot((p_conts[0],f))
return False
#whether the corresponding imm has a return edge
def isImmRootReturn(self,p_nf):
p_n,f = p_nf
if f != self.name :
return False
_, pf_conts = self.pNodeConts(p_nf)
for x in pf_conts:
if self.isStraightToRetToRoot(x):
return True
return False
#whether p_n leads straightly to RetToCaller
def isStraightToRetToCaller(self,p_nf):
p_n,f = p_nf
if p_n == 'Ret':
if f != self.name:
return True
else:
return False
if self.isRealNode(p_nf):
return False
if self.phyAddr(p_nf)=="RetToCaller":
return True
elif type(p_n) == int:
_,pf_conts = self.pNodeConts(p_nf)
p_conts = [x[0] for x in pf_conts]
if len(p_conts) == 1:
return self.isStraightToRetToCaller((p_conts[0],f))
return False
#All return except the root one
def isImmRetToCaller(self,p_nf):
g_n = self.phyAddr(p_nf)
p_n,f,p = self.unpackPNF(p_nf)
if isCall(p.nodes[p_n]):
return False
p_node,pf_conts = self.pNodeConts(p_nf)
p_conts = [x[0] for x in pf_conts]
conts = [x for x in p_conts if type(p_n) == int]
#print ' p_n %s p_conts %s' % (p_n,p_conts)
n_rtc = 0
assert self.phyAddr(p_nf) == g_n
for pf_cont in pf_conts:
cont_n,cont_f = pf_cont
if not isCall(self.f_problems[cont_f].nodes[cont_n]):
if self.isStraightToRetToCaller(pf_cont):
ret = (pf_cont)
n_rtc += 1
if not ( n_rtc <= 1):
#print 'p_n %s g_n %s: n_rtc %s' % (p_n, self.phyAddr(p_n), n_rtc)
assert False
if n_rtc > 0:
return ret
return False
def funName(self,p_nf):
p_n,f = p_nf
fname = self.f_problems[f].nodes[p_n].fname
if '.' in fname:
#print 'f: %s' % fname
s = []
for c in fname:
if c == '.':
s.append('_')
else:
s.append(c)
return ''.join(s)
return fname
def makeProblem(self,f):
p = problem.Problem(None, 'Functions (%s)' % f)
p.add_entry_function(self.asm_fs[f], 'ASM')
p.do_analysis()
return p
def isSpecInsFunc(self,f):
"""
Returns whether f is the name of a special function
used to model special instruction
"""
return f.startswith ("instruction'")
def makeBinGraph(self):
"""
Prepare problems for all functions transitively called by self,
and turn this into a binary CFG
"""
self.f_problems = {}
if self.name not in elfFile().tcg:
print elfFile().tcg.keys()
tc_fs = list(elfFile().tcg[self.name])
for f in tc_fs + [self.name]:
assert '.' not in f
if self.isSpecInsFunc(f):
continue
p = problem.Problem(None, 'Functions (%s)' % f)
p.add_entry_function(self.asm_fs[f], 'ASM')
self.f_problems[f] = p
#print 'f %s, p.nodes: %d' % (f,len(p.nodes) )
#get its entry
assert len(p.entries) == 1
self.p_entries[f] = p.entries[0][0]
print 'all problems generated'
self.findAllDeadends()
print "all deadends found"
#now generate the bin graph
for f,p in self.f_problems.iteritems():
for p_n in p.nodes:
if type(p_n) != int:
continue
p_nf = (p_n,f)
if p_nf in self.pf_deadends:
continue
if self.isRealNode(p_nf):
#print 'adding: %s' % str(p_nf)
self.addImmNode(p_nf)
self.imm_entry = self.phyAddr(self.firstRealNodes((self.p_entries[self.name], self.name ))[0])
#print 'self.imm_entry %x' % self.imm_entry
self.bbs = findBBs(self.imm_entry,self)
def findAllDeadends(self):
self.pf_deadends = []
pf_deadends = self.pf_deadends
self.deadend_g_ns = set()
#Halt is a deadend function, and should never be called, it's equivalent to Err for our purpose
for dead_f in elfFile().deadend_funcs:
print 'dead_f %s' % dead_f
deadend_f_g_n = elfFile().funcs[dead_f].addr
self.deadend_g_ns.add (deadend_f_g_n)
print 'deadend_f_g_n 0x%x' % deadend_f_g_n
for (f,p) in self.f_problems.iteritems():
for p_n in p.nodes:
if self.isDeadend((p_n,f)):
pf_deadends.append((p_n,f))
def isDeadend(self,p_nf,visited=None):
'''
Determine if p_nf (p_n, function) is a deadend node
'''
if p_nf in self.pf_deadends:
return True
p_n, f, p = self.unpackPNF(p_nf)
if visited == None:
visited = []
if p_n == 'Err':
return True
if p_n == 'Ret':
return False
if p_nf in visited:
return True
if isCall(p.nodes[p_n]):
#walk into the callee problem
f = self.funName(p_nf)
#FIXME: dodge dummy functions
if 'instruction' in f:
return False
if f in elfFile().deadend_funcs:
return True
p_callee = self.f_problems[f]
assert len(p_callee.entries) == 1
p_callee_n = p_callee.entries[0][0]
return self.isDeadend((p_callee_n,f),visited=visited + [p_nf])
if type(p_n) == int and self.phyAddr(p_nf) == 'RetToCaller':
return False
g_n = self.phyAddr(p_nf)
if g_n in self.deadend_g_ns:
return True
#note: pNodeConts ensures we stay in the same problem
node,fconts = self.pNodeConts(p_nf)
conts = [ x[0] for x in fconts]
for p_c in conts:
assert p_c != p_n
if not self.isDeadend( (p_c,f), visited = visited + [p_nf]):
return False
#all ends are dead, thus deadend
return True
def unpackPNF(self,p_nf):
p_n,f = p_nf
p = self.f_problems[f]
return (p_n,f,p)
def phyAddr (self,p_nf) :
p_n, f , p = self.unpackPNF(p_nf)
if not isinstance(p_n,int):
return p_n
_,x = p.node_tags[p_n]
if x == 'LoopReturn':
return 'LoopReturn'
try:
f_name,g_addr = x
except:
print f
print 'tags: %s'% str(p.node_tags[p_n])
assert False
return g_addr
#must not reach Ret
def pNodeConts(self, p_nf, no_deadends=False, may_call = False):
p_n,f, p = self.unpackPNF(p_nf)
p_node = p.nodes[p_n]
if isCall(p_node):
assert may_call
fun_called = self.funName(p_nf)
p = self.f_problems[fun_called]
entry = self.p_entries[fun_called]
pf_conts = [(entry,fun_called)]
return p_node, pf_conts
assert p_n != 'Ret'
p_conts = filter(lambda x: x != 'Err', p_node.get_conts())
if no_deadends:
p_conts = filter(lambda x: (x, p_i) not in pi_deadends, p_conts)
pf_conts = [(x , f) for x in p_conts]
return p_node,pf_conts
def isRealNode(self,p_nf):
p_n,f = p_nf
if p_n == 'Ret':
return False
g_n = self.phyAddr(p_nf)
if g_n == 'RetToCaller':
return False
elif self.isLoopReturn(p_nf):
return False
elif type(g_n) != int:
print 'g_n %s' % str(g_n)
assert False, 'g_n expected of typ int'
#elif g_n % 4 == 0 and not self.isLoopReturn(p_nf):
elif g_n % 4 == 0:
assert not self.isLoopReturn(p_nf)
return True
else:
return False
def isLoopReturn(self,p_nf):
p_n,f = p_nf
p = self.f_problems[f]
tag = p.node_tags[p_n]
return tag[1] == 'LoopReturn'
def addImmNode(self,p_nf):
imm_nodes = self.imm_nodes
g_n = self.phyAddr(p_nf)
p_node,pf_conts = self.pNodeConts(p_nf)
p_conts = [x[0] for x in pf_conts]
p_n,f,p = self.unpackPNF(p_nf)
#print "adding imm_node p_n: %s f: %s" % (p_n,f)
if g_n in imm_nodes:
#we have been here before
node = imm_nodes[g_n]
else:
node = immNode(g_n,rawVals(g_n))
imm_nodes[g_n] = node
dont_emit = []
p_imm_return_to_caller_edge = self.isImmRetToCaller(p_nf)
call_pn = self.getCallTarg(p_nf)
if call_pn:
fun_called = self.funName((call_pn, f))
if self.isSpecInsFunc(fun_called):
#Hack: go straight to the return node, do nothing else
next_addrs = p.nodes[call_pn].get_conts()
assert len(next_addrs) == 1
next_addr = next_addrs[0]
assert next_addr not in ['Ret','Err']
phy_next_addr = self.phyAddr((next_addr,f))
i_e = immEdge(phy_next_addr, emit = True)
node.addEdge(i_e)
return
imm_call = self.parseImmCall(p_nf)
assert not p_imm_return_to_caller_edge
g_call_targ,g_ret_addr,is_tail_call = imm_call
dont_emit.append(g_call_targ)
node.addCallRetEdges(g_call_targ, g_ret_addr,is_tail_call)
elif p_imm_return_to_caller_edge or self.isImmRootReturn(p_nf):
node.addRetEdge()
#add edges to the imm node,ingore Err and halt
for p_targ in p_conts:
if type(p_targ) == int and (p_targ, f) not in self.pf_deadends:
if p_targ == 'Ret':
continue
edges = self.firstRealNodes((p_targ,f),may_multi=True,may_call=True,skip_ret=True)
for p_e in edges :
#dodge halt
if (p_e) in self.pf_deadends:
continue
g_e = self.phyAddr(p_e)
assert g_e != None
if g_e == 'Ret':
continue
assert g_e != 'Ret'
i_e = immEdge(g_e,emit = g_e not in dont_emit)
node.addEdge(i_e)
def retPF(self,call_p_nf):
p_n,f,p = self.unpackPNF(call_p_nf)
assert len(p.nodes[p_n].get_conts()) == 1
return ( (p.nodes[p_n].get_conts())[0] , f)
def getCallTarg(self, p_nf):
p_n,f,p = self.unpackPNF(p_nf)
_, pf_conts = self.pNodeConts(p_nf)
p_conts = map(lambda x: x[0],pf_conts)
#is Imm call iff there is a successor of kind Call in the g graph
p_n_cs = filter(lambda p_n_c:
type(p_n_c) == int
and not self.isLoopReturn(( p_n_c, f))
and isCall(self.gNode((p_n_c,f)))
, p_conts)
if not p_n_cs:
return None
assert len(p_n_cs) == 1
#return the p_n of the call node
return p_n_cs[0]
def parseImmCall(self,p_nf):
"""
Returns (entry point to the called function, return addr, is_tailcall)
"""
call_pn = self.getCallTarg(p_nf)
assert call_pn != None
p_n,f,p = self.unpackPNF(p_nf)
#print "p_n: %s, f: %s" % (p_n,f)
p_nodes = p.nodes
#find the return addr
#print "call_pn = %d" % call_pn
suc = self.firstRealNodes( (call_pn, f) ,may_multi=False,may_call=True)
pf_call_targ = suc[0]
g_call_targ = self.phyAddr(pf_call_targ)
#locate the call return address
f_caller, _ = self.pNToFunGN(p_nf)
is_tailcall = self.isCallTailCall(p_nf)
if not is_tailcall:
#return the return addr
phy_ret_addr = self.phyAddr(self.retPF((call_pn,f)))
else:
phy_ret_addr = None
assert type(phy_ret_addr) == int or is_tailcall, "g_call_targ %s phy_ret_addr %s" % (g_call_targ, phy_ret_addr)
#print 'call detected: phy_ret_addr %x' % phy_ret_addr
return (g_call_targ, phy_ret_addr,is_tailcall)
def gNode(self,p_nf):
p_n,f,p = self.unpackPNF(p_nf)
tag = p.node_tags[p_n]
f = tag[1][0]
g_n = tag[1][1]
return self.asm_fs[f].nodes[g_n]
| #
# Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
#
# SPDX-License-Identifier: BSD-2-Clause
#
import re
import graph_refine.syntax as syntax
import graph_refine.problem as problem
import graph_refine.stack_logic as stack_logic
from graph_refine.syntax import true_term, false_term, mk_not
from graph_refine.check import *
import graph_refine.search as search
import elf_parser
import graph_refine.target_objects as target_objects
from imm_utils import *
from elf_file import *
from addr_utils import *
from call_graph_utils import gFuncsCalled
from dot_utils import toDot,toGraph
from addr_utils import gToPAddrP,callNodes
def loadCounts(dir_name):
#loop_counts.py must contain exactly 1 dict called man_loop_counts
context = {}
execfile('%s/loop_counts.py' % dir_name,context)
#we should have a dict of addr -> bound
assert 'loops_by_fs' in context
lbfs = context['loops_by_fs']
return lbfs
class immFunc (Borg):
def __init__(self,elf_fun=None,load_counts=False):
Borg.__init__(self)
if not elf_fun:
return
self.elf_fun = elf_fun
self.name = elf_fun.name
self.addr = elf_fun.addr
self.g_f = elf_fun.g_f
self.asm_fs = elfFile().asm_fs
self.imm_nodes = {}
self.bbs = {}
self.loaded_loop_counts = False
self.parse_only = False
self.loop_bounds = {}
# dict of f-> loop_heads -> (bound, description)
self.loops_by_fs = {}
#f -> p_n
self.p_entries = {}
if load_counts:
self.loaded_loops_by_fs = loadCounts(elfFile().dir_name)
self.loaded_loop_counts = True
def process(self):
if self.bbs != {}:
return
self.makeBinGraph()
self.loopheads = {}
self.findLoopheads()
lbfs = self.loops_by_fs
if self.loaded_loop_counts:
self.bin_loops_by_fs = self.loaded_loops_by_fs
print 'loaded loop counts from file'
else:
#build bin_loops_by_fs from loops_by_fs
self.bin_loops_by_fs = {}
blbf = self.bin_loops_by_fs
for f in lbfs:
blbf[f] = {}
p = self.f_problems[f]
pA = lambda x: phyAddrP(x,p)
loops = lbfs[f]
for p_head in loops:
assert pA(p_head) not in blbf
blbf[f][pA(p_head)] = loops[p_head]
def isBBHead(self,p_nf):
if not self.isRealNode(p_nf):
return False
g_n = self.phyAddr(p_nf)
if not type(g_n) == int:
return False
return g_n in self.bbs
#bin addr to bb addr
def bbAddr(self,addr):
bbs = self.bbs
for x in bbs:
if addr in bbs[x]:
return x
print 'addr: %x' % addr
assert False, 'BB not found !!'
def toPhyAddrs(self, p_nis):
return [self.phyAddr(x) for x in p_nis]
#find all possible entries of the loop for Chronos
def findLoopEntries(self, loop, f):
p = self.f_problems[f]
head = None
lp = [x for x in list(loop) if self.isRealNode( (x,f) )]
lpp = []
lp_phys = self.toPhyAddrs([(x,f) for x in lp])
for x in lp:
#loop entry, must be
#1. a basic block head and
#2. has >=1 edge from outside the loop
if (x, f ) in self.pf_deadends:
##gotta be halt / branch to halt
continue
phy_n = self.phyAddr((x,f))
node = self.imm_nodes[phy_n]
imm_ext_edges_to = [y for y in node.edges_to if (y not in lp_phys)]
if ( len(imm_ext_edges_to) >= 1 and self.isBBHead((x,f)) ):
lpp.append(x)
return lpp
def findLoopheads(self):
self.imm_loopheads = {}
#loopheads = {}
loopheads = []
#self.loopheads = loopheads
loops_by_fs = self.loops_by_fs
for (f,p) in [(f,self.f_problems[f]) for f in self.f_problems]:
p.compute_preds()
p.do_loop_analysis()
l = p.loop_data
if p.loop_heads():
loops_by_fs[f] = {}
for x in p.loop_heads():
fun,_ = self.pNToFunGN((x,f))
#dodge halt
if fun in elfFile().deadend_funcs:
continue
loopheads.append((x, f))
#the 0 worker_id will get ignored by genLoopHeads.
#FIXME: do this properly..
loops_by_fs[f][x] = (2**30,'dummy',0)
assert loopheads
for p_nf in loopheads:
p_n, f = p_nf
p = self.f_problems[f]
ll = p.loop_data[p_n][1]
z = self.findLoopEntries(ll, f)
#map from potential heads -> head, hack around chronos 'feature'
for q in z:
assert q not in self.imm_loopheads, 'one addr cannot have >1 loopcounts !'
self.imm_loopheads[self.phyAddr((q,f))] = p_nf
return
def firstRealNodes(self,p_nf,visited = None,may_multi=False,may_call=False,skip_ret=False):
"""
Locate the first real node from, and including, p_addr,
or branch targets if it hits a branch before that.
Returns a list of p_nf
"""
elf_fun = self.elf_fun
p_n,f = p_nf
next_p_nf = p_nf
ret = []
if visited == None:
#print 'fRN on p_n %d, fun: %s' % (p_n,f)
visited = []
if p_nf in visited:
return []
visited.append(p_nf)
assert self.pf_deadends != None
while True:
if self.isRealNode(next_p_nf):
return [next_p_nf]
next_p_n , next_f, next_p = self.unpackPNF(next_p_nf)
if ( next_p_n == 'Ret' and f == self.name):
return [('Ret',f)]
elif next_p_n == 'Ret':
if skip_ret:
return []
assert False,'firstRealNodes reached Ret when skip_ret is False'
p_node, edges = self.pNodeConts(next_p_nf, may_call=may_call)
if edges == []:
return []
assert (edges)
if len(edges) > 1:
assert may_multi
for p_e in edges:
for ee in self.firstRealNodes(p_e ,visited = list(visited),may_multi=may_multi,may_call=may_call,skip_ret=skip_ret):
ret.append(ee)
return ret
else:
next_p_nf = edges[0]
#function p_n belongs to, g_n
def pNToFunGN(self,p_nf):
p_n,f,p = self.unpackPNF(p_nf)
tag = p.node_tags[p_n]
_, x = tag
f_name, g_n = x
return f_name,g_n
#given p_n is an imm call, return is_taillcall
def isCallTailCall(self,p_nf):
# suc = p_n_cs[0]
g_n = self.phyAddr(p_nf)
return elf_parser.isDirectBranch(g_n)
def isStraightToRetToRoot(self,p_nf):
p_n,f,p = self.unpackPNF(p_nf)
if p_n == 'Ret' and f == self.name:
return True
elif p_n == 'Ret':
return False
if self.isRealNode(p_nf):
return False
if self.phyAddr(p_nf)=='RetToCaller':
return False
elif type(p_n) == int:
_,pf_conts = self.pNodeConts(p_nf)
p_conts = [x[0] for x in pf_conts]
if len(p_conts) == 1:
return self.isStraightToRetToRoot((p_conts[0],f))
return False
#whether the corresponding imm has a return edge
def isImmRootReturn(self,p_nf):
p_n,f = p_nf
if f != self.name :
return False
_, pf_conts = self.pNodeConts(p_nf)
for x in pf_conts:
if self.isStraightToRetToRoot(x):
return True
return False
#whether p_n leads straightly to RetToCaller
def isStraightToRetToCaller(self,p_nf):
p_n,f = p_nf
if p_n == 'Ret':
if f != self.name:
return True
else:
return False
if self.isRealNode(p_nf):
return False
if self.phyAddr(p_nf)=="RetToCaller":
return True
elif type(p_n) == int:
_,pf_conts = self.pNodeConts(p_nf)
p_conts = [x[0] for x in pf_conts]
if len(p_conts) == 1:
return self.isStraightToRetToCaller((p_conts[0],f))
return False
#All return except the root one
def isImmRetToCaller(self,p_nf):
g_n = self.phyAddr(p_nf)
p_n,f,p = self.unpackPNF(p_nf)
if isCall(p.nodes[p_n]):
return False
p_node,pf_conts = self.pNodeConts(p_nf)
p_conts = [x[0] for x in pf_conts]
conts = [x for x in p_conts if type(p_n) == int]
#print ' p_n %s p_conts %s' % (p_n,p_conts)
n_rtc = 0
assert self.phyAddr(p_nf) == g_n
for pf_cont in pf_conts:
cont_n,cont_f = pf_cont
if not isCall(self.f_problems[cont_f].nodes[cont_n]):
if self.isStraightToRetToCaller(pf_cont):
ret = (pf_cont)
n_rtc += 1
if not ( n_rtc <= 1):
#print 'p_n %s g_n %s: n_rtc %s' % (p_n, self.phyAddr(p_n), n_rtc)
assert False
if n_rtc > 0:
return ret
return False
def funName(self,p_nf):
p_n,f = p_nf
fname = self.f_problems[f].nodes[p_n].fname
if '.' in fname:
#print 'f: %s' % fname
s = []
for c in fname:
if c == '.':
s.append('_')
else:
s.append(c)
return ''.join(s)
return fname
def makeProblem(self,f):
p = problem.Problem(None, 'Functions (%s)' % f)
p.add_entry_function(self.asm_fs[f], 'ASM')
p.do_analysis()
return p
def isSpecInsFunc(self,f):
"""
Returns whether f is the name of a special function
used to model special instruction
"""
return f.startswith ("instruction'")
def makeBinGraph(self):
"""
Prepare problems for all functions transitively called by self,
and turn this into a binary CFG
"""
self.f_problems = {}
if self.name not in elfFile().tcg:
print elfFile().tcg.keys()
tc_fs = list(elfFile().tcg[self.name])
for f in tc_fs + [self.name]:
assert '.' not in f
if self.isSpecInsFunc(f):
continue
p = problem.Problem(None, 'Functions (%s)' % f)
p.add_entry_function(self.asm_fs[f], 'ASM')
self.f_problems[f] = p
#print 'f %s, p.nodes: %d' % (f,len(p.nodes) )
#get its entry
assert len(p.entries) == 1
self.p_entries[f] = p.entries[0][0]
print 'all problems generated'
self.findAllDeadends()
print "all deadends found"
#now generate the bin graph
for f,p in self.f_problems.iteritems():
for p_n in p.nodes:
if type(p_n) != int:
continue
p_nf = (p_n,f)
if p_nf in self.pf_deadends:
continue
if self.isRealNode(p_nf):
#print 'adding: %s' % str(p_nf)
self.addImmNode(p_nf)
self.imm_entry = self.phyAddr(self.firstRealNodes((self.p_entries[self.name], self.name ))[0])
#print 'self.imm_entry %x' % self.imm_entry
self.bbs = findBBs(self.imm_entry,self)
def findAllDeadends(self):
self.pf_deadends = []
pf_deadends = self.pf_deadends
self.deadend_g_ns = set()
#Halt is a deadend function, and should never be called, it's equivalent to Err for our purpose
for dead_f in elfFile().deadend_funcs:
print 'dead_f %s' % dead_f
deadend_f_g_n = elfFile().funcs[dead_f].addr
self.deadend_g_ns.add (deadend_f_g_n)
print 'deadend_f_g_n 0x%x' % deadend_f_g_n
for (f,p) in self.f_problems.iteritems():
for p_n in p.nodes:
if self.isDeadend((p_n,f)):
pf_deadends.append((p_n,f))
def isDeadend(self,p_nf,visited=None):
'''
Determine if p_nf (p_n, function) is a deadend node
'''
if p_nf in self.pf_deadends:
return True
p_n, f, p = self.unpackPNF(p_nf)
if visited == None:
visited = []
if p_n == 'Err':
return True
if p_n == 'Ret':
return False
if p_nf in visited:
return True
if isCall(p.nodes[p_n]):
#walk into the callee problem
f = self.funName(p_nf)
#FIXME: dodge dummy functions
if 'instruction' in f:
return False
if f in elfFile().deadend_funcs:
return True
p_callee = self.f_problems[f]
assert len(p_callee.entries) == 1
p_callee_n = p_callee.entries[0][0]
return self.isDeadend((p_callee_n,f),visited=visited + [p_nf])
if type(p_n) == int and self.phyAddr(p_nf) == 'RetToCaller':
return False
g_n = self.phyAddr(p_nf)
if g_n in self.deadend_g_ns:
return True
#note: pNodeConts ensures we stay in the same problem
node,fconts = self.pNodeConts(p_nf)
conts = [ x[0] for x in fconts]
for p_c in conts:
assert p_c != p_n
if not self.isDeadend( (p_c,f), visited = visited + [p_nf]):
return False
#all ends are dead, thus deadend
return True
def unpackPNF(self,p_nf):
p_n,f = p_nf
p = self.f_problems[f]
return (p_n,f,p)
def phyAddr (self,p_nf) :
p_n, f , p = self.unpackPNF(p_nf)
if not isinstance(p_n,int):
return p_n
_,x = p.node_tags[p_n]
if x == 'LoopReturn':
return 'LoopReturn'
try:
f_name,g_addr = x
except:
print f
print 'tags: %s'% str(p.node_tags[p_n])
assert False
return g_addr
#must not reach Ret
def pNodeConts(self, p_nf, no_deadends=False, may_call = False):
p_n,f, p = self.unpackPNF(p_nf)
p_node = p.nodes[p_n]
if isCall(p_node):
assert may_call
fun_called = self.funName(p_nf)
p = self.f_problems[fun_called]
entry = self.p_entries[fun_called]
pf_conts = [(entry,fun_called)]
return p_node, pf_conts
assert p_n != 'Ret'
p_conts = filter(lambda x: x != 'Err', p_node.get_conts())
if no_deadends:
p_conts = filter(lambda x: (x, p_i) not in pi_deadends, p_conts)
pf_conts = [(x , f) for x in p_conts]
return p_node,pf_conts
def isRealNode(self,p_nf):
p_n,f = p_nf
if p_n == 'Ret':
return False
g_n = self.phyAddr(p_nf)
if g_n == 'RetToCaller':
return False
elif self.isLoopReturn(p_nf):
return False
elif type(g_n) != int:
print 'g_n %s' % str(g_n)
assert False, 'g_n expected of typ int'
#elif g_n % 4 == 0 and not self.isLoopReturn(p_nf):
elif g_n % 4 == 0:
assert not self.isLoopReturn(p_nf)
return True
else:
return False
def isLoopReturn(self,p_nf):
p_n,f = p_nf
p = self.f_problems[f]
tag = p.node_tags[p_n]
return tag[1] == 'LoopReturn'
def addImmNode(self,p_nf):
imm_nodes = self.imm_nodes
g_n = self.phyAddr(p_nf)
p_node,pf_conts = self.pNodeConts(p_nf)
p_conts = [x[0] for x in pf_conts]
p_n,f,p = self.unpackPNF(p_nf)
#print "adding imm_node p_n: %s f: %s" % (p_n,f)
if g_n in imm_nodes:
#we have been here before
node = imm_nodes[g_n]
else:
node = immNode(g_n,rawVals(g_n))
imm_nodes[g_n] = node
dont_emit = []
p_imm_return_to_caller_edge = self.isImmRetToCaller(p_nf)
call_pn = self.getCallTarg(p_nf)
if call_pn:
fun_called = self.funName((call_pn, f))
if self.isSpecInsFunc(fun_called):
#Hack: go straight to the return node, do nothing else
next_addrs = p.nodes[call_pn].get_conts()
assert len(next_addrs) == 1
next_addr = next_addrs[0]
assert next_addr not in ['Ret','Err']
phy_next_addr = self.phyAddr((next_addr,f))
i_e = immEdge(phy_next_addr, emit = True)
node.addEdge(i_e)
return
imm_call = self.parseImmCall(p_nf)
assert not p_imm_return_to_caller_edge
g_call_targ,g_ret_addr,is_tail_call = imm_call
dont_emit.append(g_call_targ)
node.addCallRetEdges(g_call_targ, g_ret_addr,is_tail_call)
elif p_imm_return_to_caller_edge or self.isImmRootReturn(p_nf):
node.addRetEdge()
#add edges to the imm node,ingore Err and halt
for p_targ in p_conts:
if type(p_targ) == int and (p_targ, f) not in self.pf_deadends:
if p_targ == 'Ret':
continue
edges = self.firstRealNodes((p_targ,f),may_multi=True,may_call=True,skip_ret=True)
for p_e in edges :
#dodge halt
if (p_e) in self.pf_deadends:
continue
g_e = self.phyAddr(p_e)
assert g_e != None
if g_e == 'Ret':
continue
assert g_e != 'Ret'
i_e = immEdge(g_e,emit = g_e not in dont_emit)
node.addEdge(i_e)
def retPF(self,call_p_nf):
p_n,f,p = self.unpackPNF(call_p_nf)
assert len(p.nodes[p_n].get_conts()) == 1
return ( (p.nodes[p_n].get_conts())[0] , f)
def getCallTarg(self, p_nf):
p_n,f,p = self.unpackPNF(p_nf)
_, pf_conts = self.pNodeConts(p_nf)
p_conts = map(lambda x: x[0],pf_conts)
#is Imm call iff there is a successor of kind Call in the g graph
p_n_cs = filter(lambda p_n_c:
type(p_n_c) == int
and not self.isLoopReturn(( p_n_c, f))
and isCall(self.gNode((p_n_c,f)))
, p_conts)
if not p_n_cs:
return None
assert len(p_n_cs) == 1
#return the p_n of the call node
return p_n_cs[0]
def parseImmCall(self,p_nf):
"""
Returns (entry point to the called function, return addr, is_tailcall)
"""
call_pn = self.getCallTarg(p_nf)
assert call_pn != None
p_n,f,p = self.unpackPNF(p_nf)
#print "p_n: %s, f: %s" % (p_n,f)
p_nodes = p.nodes
#find the return addr
#print "call_pn = %d" % call_pn
suc = self.firstRealNodes( (call_pn, f) ,may_multi=False,may_call=True)
pf_call_targ = suc[0]
g_call_targ = self.phyAddr(pf_call_targ)
#locate the call return address
f_caller, _ = self.pNToFunGN(p_nf)
is_tailcall = self.isCallTailCall(p_nf)
if not is_tailcall:
#return the return addr
phy_ret_addr = self.phyAddr(self.retPF((call_pn,f)))
else:
phy_ret_addr = None
assert type(phy_ret_addr) == int or is_tailcall, "g_call_targ %s phy_ret_addr %s" % (g_call_targ, phy_ret_addr)
#print 'call detected: phy_ret_addr %x' % phy_ret_addr
return (g_call_targ, phy_ret_addr,is_tailcall)
def gNode(self,p_nf):
p_n,f,p = self.unpackPNF(p_nf)
tag = p.node_tags[p_n]
f = tag[1][0]
g_n = tag[1][1]
return self.asm_fs[f].nodes[g_n]
| en | 0.652406 | # # Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) # # SPDX-License-Identifier: BSD-2-Clause # #loop_counts.py must contain exactly 1 dict called man_loop_counts #we should have a dict of addr -> bound # dict of f-> loop_heads -> (bound, description) #f -> p_n #build bin_loops_by_fs from loops_by_fs #bin addr to bb addr #find all possible entries of the loop for Chronos #loop entry, must be #1. a basic block head and #2. has >=1 edge from outside the loop ##gotta be halt / branch to halt #loopheads = {} #self.loopheads = loopheads #dodge halt #the 0 worker_id will get ignored by genLoopHeads. #FIXME: do this properly.. #map from potential heads -> head, hack around chronos 'feature' Locate the first real node from, and including, p_addr, or branch targets if it hits a branch before that. Returns a list of p_nf #print 'fRN on p_n %d, fun: %s' % (p_n,f) #function p_n belongs to, g_n #given p_n is an imm call, return is_taillcall # suc = p_n_cs[0] #whether the corresponding imm has a return edge #whether p_n leads straightly to RetToCaller #All return except the root one #print ' p_n %s p_conts %s' % (p_n,p_conts) #print 'p_n %s g_n %s: n_rtc %s' % (p_n, self.phyAddr(p_n), n_rtc) #print 'f: %s' % fname Returns whether f is the name of a special function used to model special instruction Prepare problems for all functions transitively called by self, and turn this into a binary CFG #print 'f %s, p.nodes: %d' % (f,len(p.nodes) ) #get its entry #now generate the bin graph #print 'adding: %s' % str(p_nf) #print 'self.imm_entry %x' % self.imm_entry #Halt is a deadend function, and should never be called, it's equivalent to Err for our purpose Determine if p_nf (p_n, function) is a deadend node #walk into the callee problem #FIXME: dodge dummy functions #note: pNodeConts ensures we stay in the same problem #all ends are dead, thus deadend #must not reach Ret #elif g_n % 4 == 0 and not self.isLoopReturn(p_nf): #print "adding imm_node p_n: %s f: %s" % (p_n,f) #we have been here before #Hack: go straight to the return node, do nothing else #add edges to the imm node,ingore Err and halt #dodge halt #is Imm call iff there is a successor of kind Call in the g graph #return the p_n of the call node Returns (entry point to the called function, return addr, is_tailcall) #print "p_n: %s, f: %s" % (p_n,f) #find the return addr #print "call_pn = %d" % call_pn #locate the call return address #return the return addr #print 'call detected: phy_ret_addr %x' % phy_ret_addr | 2.14668 | 2 |
Gelatin/parser/Parser.py | Etherbay/Gelatin | 107 | 9585 | <gh_stars>100-1000
# Copyright (c) 2010-2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import codecs
from simpleparse import parser
from .Newline import Newline
from .Indent import Indent
from .Dedent import Dedent
from .util import error
_ebnf_file = os.path.join(os.path.dirname(__file__), 'syntax.ebnf')
with open(_ebnf_file) as _thefile:
_ebnf = _thefile.read()
class Parser(parser.Parser):
def __init__(self):
self.indent = 0
offside = (
("NEWLINE", Newline(self).table()),
("INDENT", Indent(self).table()),
("DEDENT", Dedent(self).table()),
)
parser.Parser.__init__(self, _ebnf, 'root', prebuilts=offside)
def parse_string(self, input, compiler):
compiler.reset()
start, _, end = parser.Parser.parse(self, input, processor=compiler)
if end < len(input):
error(input, end)
if 'input' not in compiler.context.grammars:
error(input, end, 'Required grammar "input" not found.')
return compiler.context
def parse(self, filename, compiler, encoding='utf8'):
with codecs.open(filename, 'r', encoding=encoding) as input_file:
string = input_file.read()
return self.parse_string(string, compiler)
| # Copyright (c) 2010-2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import codecs
from simpleparse import parser
from .Newline import Newline
from .Indent import Indent
from .Dedent import Dedent
from .util import error
_ebnf_file = os.path.join(os.path.dirname(__file__), 'syntax.ebnf')
with open(_ebnf_file) as _thefile:
_ebnf = _thefile.read()
class Parser(parser.Parser):
def __init__(self):
self.indent = 0
offside = (
("NEWLINE", Newline(self).table()),
("INDENT", Indent(self).table()),
("DEDENT", Dedent(self).table()),
)
parser.Parser.__init__(self, _ebnf, 'root', prebuilts=offside)
def parse_string(self, input, compiler):
compiler.reset()
start, _, end = parser.Parser.parse(self, input, processor=compiler)
if end < len(input):
error(input, end)
if 'input' not in compiler.context.grammars:
error(input, end, 'Required grammar "input" not found.')
return compiler.context
def parse(self, filename, compiler, encoding='utf8'):
with codecs.open(filename, 'r', encoding=encoding) as input_file:
string = input_file.read()
return self.parse_string(string, compiler) | en | 0.765851 | # Copyright (c) 2010-2017 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. | 2.229748 | 2 |
C03-Unit-Testing/21-C03V15/utils.py | dirchev/Python-101-Forever-1 | 59 | 9586 | BIG_CONSTANT = "YES"
def group_by(xs, grouper):
groups = {}
for x in xs:
group = grouper(x)
if group not in groups:
groups[group] = []
groups[group].append(x)
return groups
print(group_by([1, 2, 3, 4, 5, 6], lambda x: "even" if x % 2 == 0 else "odd"))
| BIG_CONSTANT = "YES"
def group_by(xs, grouper):
groups = {}
for x in xs:
group = grouper(x)
if group not in groups:
groups[group] = []
groups[group].append(x)
return groups
print(group_by([1, 2, 3, 4, 5, 6], lambda x: "even" if x % 2 == 0 else "odd"))
| none | 1 | 3.744091 | 4 |
|
pipeline/test_sftp_to_s3.py | streamsets/datacollector-tests-external | 1 | 9587 | <filename>pipeline/test_sftp_to_s3.py
# Copyright 2019 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import json
import logging
import os
import string
import time
from streamsets.sdk.models import Configuration
from streamsets.testframework.markers import aws, sftp, sdc_min_version
from streamsets.testframework.utils import get_random_string
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# Sandbox prefix for S3 bucket
S3_BUCKET_PREFIX = 'sftp_upload'
@sdc_min_version('3.8.2')
@sftp
@aws('s3')
def test_sftp_origin_whole_file_to_s3(sdc_builder, sdc_executor, sftp, aws):
"""
This is a test for SDC-11273. First, it creates a large (~6MB) file and puts it on the SFTP server.
Then, it creates a pipeline with SFTP origin and S3 destination, with whole file format, and runs
until the single record (file) is complete. Then, it asserts the S3 bucket contents are correct.
It passes only if the new option ("Disable Read Ahead Stream") is enabled.
"""
sftp_file_name = get_random_string(string.ascii_letters, 10) + '.txt'
raw_text_data = get_random_string(string.printable, 6000000)
sftp.put_string(os.path.join(sftp.path, sftp_file_name), raw_text_data)
s3_bucket = aws.s3_bucket_name
s3_key = f'{S3_BUCKET_PREFIX}/{sftp_file_name}'
# Build the pipeline
builder = sdc_builder.get_pipeline_builder()
sftp_ftp_client = builder.add_stage(name='com_streamsets_pipeline_stage_origin_remote_RemoteDownloadDSource')
sftp_ftp_client.file_name_pattern = sftp_file_name
sftp_ftp_client.data_format = 'WHOLE_FILE'
sftp_ftp_client.set_attributes(disable_read_ahead_stream=True)
s3_destination = builder.add_stage('Amazon S3', type='destination')
s3_destination.file_name_expression = "${record:value('/fileInfo/filename')}"
s3_destination.set_attributes(bucket=s3_bucket, data_format='WHOLE_FILE', partition_prefix=s3_key)
sftp_ftp_client >> s3_destination
sftp_to_s3_pipeline = builder.build(title='SFTP to S3 Whole File').configure_for_environment(aws).configure_for_environment(sftp)
sdc_executor.add_pipeline(sftp_to_s3_pipeline)
client = aws.s3
try:
# start pipeline and run for one record (the file)
sdc_executor.start_pipeline(sftp_to_s3_pipeline).wait_for_pipeline_output_records_count(1)
sdc_executor.stop_pipeline(sftp_to_s3_pipeline)
# assert record count to S3 the size of the objects put
list_s3_objs = client.list_objects_v2(Bucket=s3_bucket, Prefix=s3_key)
assert len(list_s3_objs['Contents']) == 1
# read data from S3 to assert contents
s3_contents = [client.get_object(Bucket=s3_bucket, Key=s3_content['Key'])['Body'].read().decode().strip()
for s3_content in list_s3_objs['Contents']]
# compare the S3 bucket contents against the original whole file contents
assert s3_contents[0] == raw_text_data
finally:
delete_keys = {'Objects': [{'Key': k['Key']}
for k in client.list_objects_v2(Bucket=s3_bucket, Prefix=s3_key)['Contents']]}
client.delete_objects(Bucket=s3_bucket, Delete=delete_keys)
| <filename>pipeline/test_sftp_to_s3.py
# Copyright 2019 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import json
import logging
import os
import string
import time
from streamsets.sdk.models import Configuration
from streamsets.testframework.markers import aws, sftp, sdc_min_version
from streamsets.testframework.utils import get_random_string
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# Sandbox prefix for S3 bucket
S3_BUCKET_PREFIX = 'sftp_upload'
@sdc_min_version('3.8.2')
@sftp
@aws('s3')
def test_sftp_origin_whole_file_to_s3(sdc_builder, sdc_executor, sftp, aws):
"""
This is a test for SDC-11273. First, it creates a large (~6MB) file and puts it on the SFTP server.
Then, it creates a pipeline with SFTP origin and S3 destination, with whole file format, and runs
until the single record (file) is complete. Then, it asserts the S3 bucket contents are correct.
It passes only if the new option ("Disable Read Ahead Stream") is enabled.
"""
sftp_file_name = get_random_string(string.ascii_letters, 10) + '.txt'
raw_text_data = get_random_string(string.printable, 6000000)
sftp.put_string(os.path.join(sftp.path, sftp_file_name), raw_text_data)
s3_bucket = aws.s3_bucket_name
s3_key = f'{S3_BUCKET_PREFIX}/{sftp_file_name}'
# Build the pipeline
builder = sdc_builder.get_pipeline_builder()
sftp_ftp_client = builder.add_stage(name='com_streamsets_pipeline_stage_origin_remote_RemoteDownloadDSource')
sftp_ftp_client.file_name_pattern = sftp_file_name
sftp_ftp_client.data_format = 'WHOLE_FILE'
sftp_ftp_client.set_attributes(disable_read_ahead_stream=True)
s3_destination = builder.add_stage('Amazon S3', type='destination')
s3_destination.file_name_expression = "${record:value('/fileInfo/filename')}"
s3_destination.set_attributes(bucket=s3_bucket, data_format='WHOLE_FILE', partition_prefix=s3_key)
sftp_ftp_client >> s3_destination
sftp_to_s3_pipeline = builder.build(title='SFTP to S3 Whole File').configure_for_environment(aws).configure_for_environment(sftp)
sdc_executor.add_pipeline(sftp_to_s3_pipeline)
client = aws.s3
try:
# start pipeline and run for one record (the file)
sdc_executor.start_pipeline(sftp_to_s3_pipeline).wait_for_pipeline_output_records_count(1)
sdc_executor.stop_pipeline(sftp_to_s3_pipeline)
# assert record count to S3 the size of the objects put
list_s3_objs = client.list_objects_v2(Bucket=s3_bucket, Prefix=s3_key)
assert len(list_s3_objs['Contents']) == 1
# read data from S3 to assert contents
s3_contents = [client.get_object(Bucket=s3_bucket, Key=s3_content['Key'])['Body'].read().decode().strip()
for s3_content in list_s3_objs['Contents']]
# compare the S3 bucket contents against the original whole file contents
assert s3_contents[0] == raw_text_data
finally:
delete_keys = {'Objects': [{'Key': k['Key']}
for k in client.list_objects_v2(Bucket=s3_bucket, Prefix=s3_key)['Contents']]}
client.delete_objects(Bucket=s3_bucket, Delete=delete_keys)
| en | 0.849352 | # Copyright 2019 StreamSets Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Sandbox prefix for S3 bucket This is a test for SDC-11273. First, it creates a large (~6MB) file and puts it on the SFTP server. Then, it creates a pipeline with SFTP origin and S3 destination, with whole file format, and runs until the single record (file) is complete. Then, it asserts the S3 bucket contents are correct. It passes only if the new option ("Disable Read Ahead Stream") is enabled. # Build the pipeline # start pipeline and run for one record (the file) # assert record count to S3 the size of the objects put # read data from S3 to assert contents # compare the S3 bucket contents against the original whole file contents | 1.930303 | 2 |
terra/tests/__init__.py | NoahRJohnson/terra | 0 | 9588 | <reponame>NoahRJohnson/terra<gh_stars>0
import os
# Use this as a package level setup
def load_tests(loader, standard_tests, pattern):
if os.environ.get('TERRA_UNITTEST', None) != "1":
print('WARNING: Running terra tests without setting TERRA_UNITTEST will '
'result in side effects such as extraneouse log files being '
'generated')
this_dir = os.path.dirname(__file__)
package_tests = loader.discover(start_dir=this_dir, pattern=pattern)
standard_tests.addTests(package_tests)
# Run this test last, to make sure none of the other tests degrated the
# integrity of terra. A configured terra can cause unittests to interfere
# with each other
loader.testMethodPrefix = 'last'
package_tests = loader.discover(start_dir=this_dir, pattern=pattern)
standard_tests.addTests(package_tests)
# This does not check THIS file for 'last', I can't figure that out, cause
# it is "discovered" before load_tests is ever called
return standard_tests
| import os
# Use this as a package level setup
def load_tests(loader, standard_tests, pattern):
if os.environ.get('TERRA_UNITTEST', None) != "1":
print('WARNING: Running terra tests without setting TERRA_UNITTEST will '
'result in side effects such as extraneouse log files being '
'generated')
this_dir = os.path.dirname(__file__)
package_tests = loader.discover(start_dir=this_dir, pattern=pattern)
standard_tests.addTests(package_tests)
# Run this test last, to make sure none of the other tests degrated the
# integrity of terra. A configured terra can cause unittests to interfere
# with each other
loader.testMethodPrefix = 'last'
package_tests = loader.discover(start_dir=this_dir, pattern=pattern)
standard_tests.addTests(package_tests)
# This does not check THIS file for 'last', I can't figure that out, cause
# it is "discovered" before load_tests is ever called
return standard_tests | en | 0.945383 | # Use this as a package level setup # Run this test last, to make sure none of the other tests degrated the # integrity of terra. A configured terra can cause unittests to interfere # with each other # This does not check THIS file for 'last', I can't figure that out, cause # it is "discovered" before load_tests is ever called | 2.301769 | 2 |
icons/svg2png.py | benburrill/formiko | 116 | 9589 | # -*- coding: utf-8 -*-
from gi.repository.GdkPixbuf import Pixbuf
from os import makedirs
def main():
for size in (16, 22, 24, 32, 48, 64, 128, 256, 512):
icon = Pixbuf.new_from_file_at_scale("formiko.svg", size, size, True)
makedirs("%dx%d" % (size, size))
icon.savev("%dx%d/formiko.png" % (size, size), "png", [], [])
if __name__ == "__main__":
main()
| # -*- coding: utf-8 -*-
from gi.repository.GdkPixbuf import Pixbuf
from os import makedirs
def main():
for size in (16, 22, 24, 32, 48, 64, 128, 256, 512):
icon = Pixbuf.new_from_file_at_scale("formiko.svg", size, size, True)
makedirs("%dx%d" % (size, size))
icon.savev("%dx%d/formiko.png" % (size, size), "png", [], [])
if __name__ == "__main__":
main()
| en | 0.769321 | # -*- coding: utf-8 -*- | 2.485753 | 2 |
django/currencies/migrations/0003_auto_20211121_0701.py | AngelOnFira/megagame-controller | 0 | 9590 | <reponame>AngelOnFira/megagame-controller<filename>django/currencies/migrations/0003_auto_20211121_0701.py
# Generated by Django 3.2.8 on 2021-11-21 12:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("currencies", "0002_initial"),
]
operations = [
migrations.AddField(
model_name="payment",
name="completed",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="payment",
name="completion_amount",
field=models.IntegerField(default=0),
),
]
| # Generated by Django 3.2.8 on 2021-11-21 12:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("currencies", "0002_initial"),
]
operations = [
migrations.AddField(
model_name="payment",
name="completed",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="payment",
name="completion_amount",
field=models.IntegerField(default=0),
),
] | en | 0.891731 | # Generated by Django 3.2.8 on 2021-11-21 12:01 | 1.590925 | 2 |
etna/transforms/decomposition/trend.py | tinkoff-ai/etna-ts | 96 | 9591 | from typing import Optional
import pandas as pd
from ruptures import Binseg
from ruptures.base import BaseCost
from sklearn.linear_model import LinearRegression
from etna.transforms.base import PerSegmentWrapper
from etna.transforms.decomposition.change_points_trend import BaseEstimator
from etna.transforms.decomposition.change_points_trend import TDetrendModel
from etna.transforms.decomposition.change_points_trend import _OneSegmentChangePointsTrendTransform
class _OneSegmentTrendTransform(_OneSegmentChangePointsTrendTransform):
"""_OneSegmentTrendTransform adds trend as a feature."""
def __init__(
self,
in_column: str,
out_column: str,
change_point_model: BaseEstimator,
detrend_model: TDetrendModel,
**change_point_model_predict_params,
):
"""Init _OneSegmentTrendTransform.
Parameters
----------
in_column:
name of column to apply transform to
out_column:
name of added column
change_point_model:
model to get trend change points
detrend_model:
model to get trend from data
change_point_model_predict_params:
params for change_point_model predict method
"""
self.out_column = out_column
super().__init__(
in_column=in_column,
change_point_model=change_point_model,
detrend_model=detrend_model,
**change_point_model_predict_params,
)
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
"""Add column with trend, got from the detrend_model.
Parameters
----------
df:
data to get trend from
Returns
-------
pd.DataFrame:
df with trend column
"""
df._is_copy = False
series = df[self.in_column]
trend_series = self._predict_per_interval_model(series=series)
df[self.out_column] = trend_series
return df
def inverse_transform(self, df: pd.DataFrame) -> pd.DataFrame:
"""Inverse transform dataframe.
Parameters
----------
df:
one segment dataframe
Returns
-------
pd.DataFrame:
given dataframe
"""
return df
class _TrendTransform(PerSegmentWrapper):
"""_TrendTransform adds trend as a feature. Creates column 'regressor_<in_column>_trend'."""
def __init__(
self,
in_column: str,
out_column: str,
change_point_model: BaseEstimator,
detrend_model: TDetrendModel,
**change_point_model_predict_params,
):
"""Init _TrendTransform.
Parameters
----------
in_column:
name of column to apply transform to
out_column:
name of added column
change_point_model:
model to get trend change points
detrend_model:
model to get trend in data
change_point_model_predict_params:
params for change_point_model predict method
"""
super().__init__(
transform=_OneSegmentTrendTransform(
in_column=in_column,
out_column=out_column,
change_point_model=change_point_model,
detrend_model=detrend_model,
**change_point_model_predict_params,
)
)
class TrendTransform(_TrendTransform):
"""TrendTransform adds trend as a feature.
TrendTransform uses Binseg model as a change point detection model in _TrendTransform.
"""
def __init__(
self,
in_column: str,
out_column: Optional[str] = None,
detrend_model: TDetrendModel = LinearRegression(),
model: str = "ar",
custom_cost: Optional[BaseCost] = None,
min_size: int = 2,
jump: int = 1,
n_bkps: int = 5,
pen: Optional[float] = None,
epsilon: Optional[float] = None,
):
"""Init TrendTransform.
Parameters
----------
in_column:
name of column to apply transform to
out_column:
name of added column. Don't forget to add regressor prefix if necessary.
If not given, use 'regressor_{self.__repr__()}'
detrend_model:
model to get trend in data
model:
binseg segment model, ["l1", "l2", "rbf",...]. Not used if 'custom_cost' is not None.
custom_cost:
binseg custom cost function
min_size:
minimum segment length necessary to decide it is a stable trend segment
jump:
jump value can speed up computations: if jump==k, the algo will use every k-th value for change points search.
n_bkps:
number of change points to find
pen:
penalty value (>0)
epsilon:
reconstruction budget (>0)
"""
self.in_column = in_column
self.out_column = out_column
self.detrend_model = detrend_model
self.model = model
self.custom_cost = custom_cost
self.min_size = min_size
self.jump = jump
self.n_bkps = n_bkps
self.pen = pen
self.epsilon = epsilon
super().__init__(
in_column=self.in_column,
out_column=self.out_column if self.out_column is not None else f"regressor_{self.__repr__()}",
change_point_model=Binseg(
model=self.model, custom_cost=self.custom_cost, min_size=self.min_size, jump=self.jump
),
detrend_model=self.detrend_model,
n_bkps=self.n_bkps,
pen=self.pen,
epsilon=self.epsilon,
)
| from typing import Optional
import pandas as pd
from ruptures import Binseg
from ruptures.base import BaseCost
from sklearn.linear_model import LinearRegression
from etna.transforms.base import PerSegmentWrapper
from etna.transforms.decomposition.change_points_trend import BaseEstimator
from etna.transforms.decomposition.change_points_trend import TDetrendModel
from etna.transforms.decomposition.change_points_trend import _OneSegmentChangePointsTrendTransform
class _OneSegmentTrendTransform(_OneSegmentChangePointsTrendTransform):
"""_OneSegmentTrendTransform adds trend as a feature."""
def __init__(
self,
in_column: str,
out_column: str,
change_point_model: BaseEstimator,
detrend_model: TDetrendModel,
**change_point_model_predict_params,
):
"""Init _OneSegmentTrendTransform.
Parameters
----------
in_column:
name of column to apply transform to
out_column:
name of added column
change_point_model:
model to get trend change points
detrend_model:
model to get trend from data
change_point_model_predict_params:
params for change_point_model predict method
"""
self.out_column = out_column
super().__init__(
in_column=in_column,
change_point_model=change_point_model,
detrend_model=detrend_model,
**change_point_model_predict_params,
)
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
"""Add column with trend, got from the detrend_model.
Parameters
----------
df:
data to get trend from
Returns
-------
pd.DataFrame:
df with trend column
"""
df._is_copy = False
series = df[self.in_column]
trend_series = self._predict_per_interval_model(series=series)
df[self.out_column] = trend_series
return df
def inverse_transform(self, df: pd.DataFrame) -> pd.DataFrame:
"""Inverse transform dataframe.
Parameters
----------
df:
one segment dataframe
Returns
-------
pd.DataFrame:
given dataframe
"""
return df
class _TrendTransform(PerSegmentWrapper):
"""_TrendTransform adds trend as a feature. Creates column 'regressor_<in_column>_trend'."""
def __init__(
self,
in_column: str,
out_column: str,
change_point_model: BaseEstimator,
detrend_model: TDetrendModel,
**change_point_model_predict_params,
):
"""Init _TrendTransform.
Parameters
----------
in_column:
name of column to apply transform to
out_column:
name of added column
change_point_model:
model to get trend change points
detrend_model:
model to get trend in data
change_point_model_predict_params:
params for change_point_model predict method
"""
super().__init__(
transform=_OneSegmentTrendTransform(
in_column=in_column,
out_column=out_column,
change_point_model=change_point_model,
detrend_model=detrend_model,
**change_point_model_predict_params,
)
)
class TrendTransform(_TrendTransform):
"""TrendTransform adds trend as a feature.
TrendTransform uses Binseg model as a change point detection model in _TrendTransform.
"""
def __init__(
self,
in_column: str,
out_column: Optional[str] = None,
detrend_model: TDetrendModel = LinearRegression(),
model: str = "ar",
custom_cost: Optional[BaseCost] = None,
min_size: int = 2,
jump: int = 1,
n_bkps: int = 5,
pen: Optional[float] = None,
epsilon: Optional[float] = None,
):
"""Init TrendTransform.
Parameters
----------
in_column:
name of column to apply transform to
out_column:
name of added column. Don't forget to add regressor prefix if necessary.
If not given, use 'regressor_{self.__repr__()}'
detrend_model:
model to get trend in data
model:
binseg segment model, ["l1", "l2", "rbf",...]. Not used if 'custom_cost' is not None.
custom_cost:
binseg custom cost function
min_size:
minimum segment length necessary to decide it is a stable trend segment
jump:
jump value can speed up computations: if jump==k, the algo will use every k-th value for change points search.
n_bkps:
number of change points to find
pen:
penalty value (>0)
epsilon:
reconstruction budget (>0)
"""
self.in_column = in_column
self.out_column = out_column
self.detrend_model = detrend_model
self.model = model
self.custom_cost = custom_cost
self.min_size = min_size
self.jump = jump
self.n_bkps = n_bkps
self.pen = pen
self.epsilon = epsilon
super().__init__(
in_column=self.in_column,
out_column=self.out_column if self.out_column is not None else f"regressor_{self.__repr__()}",
change_point_model=Binseg(
model=self.model, custom_cost=self.custom_cost, min_size=self.min_size, jump=self.jump
),
detrend_model=self.detrend_model,
n_bkps=self.n_bkps,
pen=self.pen,
epsilon=self.epsilon,
)
| en | 0.593738 | _OneSegmentTrendTransform adds trend as a feature. Init _OneSegmentTrendTransform. Parameters ---------- in_column: name of column to apply transform to out_column: name of added column change_point_model: model to get trend change points detrend_model: model to get trend from data change_point_model_predict_params: params for change_point_model predict method Add column with trend, got from the detrend_model. Parameters ---------- df: data to get trend from Returns ------- pd.DataFrame: df with trend column Inverse transform dataframe. Parameters ---------- df: one segment dataframe Returns ------- pd.DataFrame: given dataframe _TrendTransform adds trend as a feature. Creates column 'regressor_<in_column>_trend'. Init _TrendTransform. Parameters ---------- in_column: name of column to apply transform to out_column: name of added column change_point_model: model to get trend change points detrend_model: model to get trend in data change_point_model_predict_params: params for change_point_model predict method TrendTransform adds trend as a feature. TrendTransform uses Binseg model as a change point detection model in _TrendTransform. Init TrendTransform. Parameters ---------- in_column: name of column to apply transform to out_column: name of added column. Don't forget to add regressor prefix if necessary. If not given, use 'regressor_{self.__repr__()}' detrend_model: model to get trend in data model: binseg segment model, ["l1", "l2", "rbf",...]. Not used if 'custom_cost' is not None. custom_cost: binseg custom cost function min_size: minimum segment length necessary to decide it is a stable trend segment jump: jump value can speed up computations: if jump==k, the algo will use every k-th value for change points search. n_bkps: number of change points to find pen: penalty value (>0) epsilon: reconstruction budget (>0) | 2.33922 | 2 |
argopy/tests/test_fetchers_facade_index.py | schwehr/argopy | 0 | 9592 | import xarray as xr
import pytest
import warnings
import argopy
from argopy import IndexFetcher as ArgoIndexFetcher
from argopy.errors import InvalidFetcherAccessPoint, InvalidFetcher, ErddapServerError, DataNotFound
from . import (
AVAILABLE_INDEX_SOURCES,
requires_fetcher_index,
requires_connected_erddap_index,
requires_localftp_index,
requires_connection,
safe_to_server_errors
)
class Test_Facade:
src = list(AVAILABLE_INDEX_SOURCES.keys())[0]
def test_invalid_fetcher(self):
with pytest.raises(InvalidFetcher):
ArgoIndexFetcher(src="invalid_fetcher").to_xarray()
@requires_fetcher_index
def test_invalid_accesspoint(self):
# Use the first valid data source
with pytest.raises(InvalidFetcherAccessPoint):
ArgoIndexFetcher(
src=self.src
).invalid_accesspoint.to_xarray() # Can't get data if access point not defined first
with pytest.raises(InvalidFetcherAccessPoint):
ArgoIndexFetcher(
src=self.src
).to_xarray() # Can't get data if access point not defined first
@requires_fetcher_index
def test_invalid_dataset(self):
with pytest.raises(ValueError):
ArgoIndexFetcher(src=self.src, ds='dummy_ds')
@requires_connection
@requires_fetcher_index
class Test_AllBackends:
""" Test main API facade for all available index fetching backends """
local_ftp = argopy.tutorial.open_dataset("localftp")[0]
# todo Determine the list of output format to test
# what else beyond .to_xarray() ?
fetcher_opts = {}
# Define API entry point options to tests:
# These should be available online and with the argopy-data dummy gdac ftp
args = {}
args["float"] = [[2901623], [6901929, 2901623]]
args["region"] = [
[-60, -40, 40.0, 60.0],
[-60, -40, 40.0, 60.0, "2007-08-01", "2007-09-01"],
]
args["profile"] = [[2901623, 2], [6901929, [5, 45]]]
def __test_float(self, bk, **ftc_opts):
""" Test float index fetching for a given backend """
for arg in self.args["float"]:
options = {**self.fetcher_opts, **ftc_opts}
f = ArgoIndexFetcher(src=bk, **options).float(arg)
assert isinstance(f.to_xarray(), xr.Dataset)
def __test_profile(self, bk, **ftc_opts):
""" Test profile index fetching for a given backend """
for arg in self.args["profile"]:
options = {**self.fetcher_opts, **ftc_opts}
f = ArgoIndexFetcher(src=bk, **options).profile(*arg)
assert isinstance(f.to_xarray(), xr.Dataset)
def __test_region(self, bk, **ftc_opts):
""" Test float index fetching for a given backend """
for arg in self.args["region"]:
options = {**self.fetcher_opts, **ftc_opts}
f = ArgoIndexFetcher(src=bk, **options).region(arg)
assert isinstance(f.to_xarray(), xr.Dataset)
@pytest.mark.skip(reason="Waiting for https://github.com/euroargodev/argopy/issues/16")
@requires_connected_erddap_index
@safe_to_server_errors
def test_float_erddap(self):
self.__test_float("erddap")
@requires_localftp_index
def test_float_localftp(self):
with argopy.set_options(local_ftp=self.local_ftp):
self.__test_float("localftp", index_file="ar_index_global_prof.txt")
@requires_localftp_index
def test_profile_localftp(self):
with argopy.set_options(local_ftp=self.local_ftp):
self.__test_profile("localftp", index_file="ar_index_global_prof.txt")
@pytest.mark.skip(reason="Waiting for https://github.com/euroargodev/argopy/issues/16")
@requires_connected_erddap_index
def test_region_erddap(self):
self.__test_region("erddap")
@requires_localftp_index
def test_region_localftp(self):
with argopy.set_options(local_ftp=self.local_ftp):
self.__test_region("localftp", index_file="ar_index_global_prof.txt")
| import xarray as xr
import pytest
import warnings
import argopy
from argopy import IndexFetcher as ArgoIndexFetcher
from argopy.errors import InvalidFetcherAccessPoint, InvalidFetcher, ErddapServerError, DataNotFound
from . import (
AVAILABLE_INDEX_SOURCES,
requires_fetcher_index,
requires_connected_erddap_index,
requires_localftp_index,
requires_connection,
safe_to_server_errors
)
class Test_Facade:
src = list(AVAILABLE_INDEX_SOURCES.keys())[0]
def test_invalid_fetcher(self):
with pytest.raises(InvalidFetcher):
ArgoIndexFetcher(src="invalid_fetcher").to_xarray()
@requires_fetcher_index
def test_invalid_accesspoint(self):
# Use the first valid data source
with pytest.raises(InvalidFetcherAccessPoint):
ArgoIndexFetcher(
src=self.src
).invalid_accesspoint.to_xarray() # Can't get data if access point not defined first
with pytest.raises(InvalidFetcherAccessPoint):
ArgoIndexFetcher(
src=self.src
).to_xarray() # Can't get data if access point not defined first
@requires_fetcher_index
def test_invalid_dataset(self):
with pytest.raises(ValueError):
ArgoIndexFetcher(src=self.src, ds='dummy_ds')
@requires_connection
@requires_fetcher_index
class Test_AllBackends:
""" Test main API facade for all available index fetching backends """
local_ftp = argopy.tutorial.open_dataset("localftp")[0]
# todo Determine the list of output format to test
# what else beyond .to_xarray() ?
fetcher_opts = {}
# Define API entry point options to tests:
# These should be available online and with the argopy-data dummy gdac ftp
args = {}
args["float"] = [[2901623], [6901929, 2901623]]
args["region"] = [
[-60, -40, 40.0, 60.0],
[-60, -40, 40.0, 60.0, "2007-08-01", "2007-09-01"],
]
args["profile"] = [[2901623, 2], [6901929, [5, 45]]]
def __test_float(self, bk, **ftc_opts):
""" Test float index fetching for a given backend """
for arg in self.args["float"]:
options = {**self.fetcher_opts, **ftc_opts}
f = ArgoIndexFetcher(src=bk, **options).float(arg)
assert isinstance(f.to_xarray(), xr.Dataset)
def __test_profile(self, bk, **ftc_opts):
""" Test profile index fetching for a given backend """
for arg in self.args["profile"]:
options = {**self.fetcher_opts, **ftc_opts}
f = ArgoIndexFetcher(src=bk, **options).profile(*arg)
assert isinstance(f.to_xarray(), xr.Dataset)
def __test_region(self, bk, **ftc_opts):
""" Test float index fetching for a given backend """
for arg in self.args["region"]:
options = {**self.fetcher_opts, **ftc_opts}
f = ArgoIndexFetcher(src=bk, **options).region(arg)
assert isinstance(f.to_xarray(), xr.Dataset)
@pytest.mark.skip(reason="Waiting for https://github.com/euroargodev/argopy/issues/16")
@requires_connected_erddap_index
@safe_to_server_errors
def test_float_erddap(self):
self.__test_float("erddap")
@requires_localftp_index
def test_float_localftp(self):
with argopy.set_options(local_ftp=self.local_ftp):
self.__test_float("localftp", index_file="ar_index_global_prof.txt")
@requires_localftp_index
def test_profile_localftp(self):
with argopy.set_options(local_ftp=self.local_ftp):
self.__test_profile("localftp", index_file="ar_index_global_prof.txt")
@pytest.mark.skip(reason="Waiting for https://github.com/euroargodev/argopy/issues/16")
@requires_connected_erddap_index
def test_region_erddap(self):
self.__test_region("erddap")
@requires_localftp_index
def test_region_localftp(self):
with argopy.set_options(local_ftp=self.local_ftp):
self.__test_region("localftp", index_file="ar_index_global_prof.txt")
| en | 0.484368 | # Use the first valid data source # Can't get data if access point not defined first # Can't get data if access point not defined first Test main API facade for all available index fetching backends # todo Determine the list of output format to test # what else beyond .to_xarray() ? # Define API entry point options to tests: # These should be available online and with the argopy-data dummy gdac ftp Test float index fetching for a given backend Test profile index fetching for a given backend Test float index fetching for a given backend | 2.275676 | 2 |
custom_components/acthor/config_flow.py | jatty/hass-acthor | 0 | 9593 | import voluptuous as vol
from homeassistant.config_entries import ConfigFlow
from homeassistant.const import CONF_HOST, CONF_NAME
from .acthor import test_connection
from .const import DEVICE_NAME, DOMAIN
class ACThorConfigFlow(ConfigFlow, domain=DOMAIN):
async def async_step_user(self, user_input: dict = None) -> dict:
errors = {}
if user_input is not None:
ok = await test_connection(user_input[CONF_HOST], timeout=5)
if ok:
return self.async_create_entry(
title=user_input[CONF_NAME],
data=user_input,
)
else:
errors["base"] = "connection_failed"
return self.async_show_form(
step_id="user",
data_schema=vol.Schema({
vol.Required(CONF_NAME, default=DEVICE_NAME): str,
vol.Required(CONF_HOST): str,
}),
errors=errors,
)
| import voluptuous as vol
from homeassistant.config_entries import ConfigFlow
from homeassistant.const import CONF_HOST, CONF_NAME
from .acthor import test_connection
from .const import DEVICE_NAME, DOMAIN
class ACThorConfigFlow(ConfigFlow, domain=DOMAIN):
async def async_step_user(self, user_input: dict = None) -> dict:
errors = {}
if user_input is not None:
ok = await test_connection(user_input[CONF_HOST], timeout=5)
if ok:
return self.async_create_entry(
title=user_input[CONF_NAME],
data=user_input,
)
else:
errors["base"] = "connection_failed"
return self.async_show_form(
step_id="user",
data_schema=vol.Schema({
vol.Required(CONF_NAME, default=DEVICE_NAME): str,
vol.Required(CONF_HOST): str,
}),
errors=errors,
)
| none | 1 | 2.261267 | 2 |
|
doajtest/fixtures/common.py | glauberm/doaj | 0 | 9594 | <gh_stars>0
NOTES = {
'notes': [
{'date': '2014-05-22T00:00:00Z', 'note': 'Second Note'},
{'date': '2014-05-21T14:02:45Z', 'note': 'First Note'}
]
}
SUBJECT = {
"subject": ['HB1-3840', 'H']
}
OWNER = {
"owner": "Owner"
}
EDITORIAL = {
"editor_group": "editorgroup",
"editor": "associate"
}
SEAL = {
"doaj_seal": True,
}
| NOTES = {
'notes': [
{'date': '2014-05-22T00:00:00Z', 'note': 'Second Note'},
{'date': '2014-05-21T14:02:45Z', 'note': 'First Note'}
]
}
SUBJECT = {
"subject": ['HB1-3840', 'H']
}
OWNER = {
"owner": "Owner"
}
EDITORIAL = {
"editor_group": "editorgroup",
"editor": "associate"
}
SEAL = {
"doaj_seal": True,
} | none | 1 | 1.68745 | 2 |
|
docnado/docnado.py | HEInventions/docnado | 78 | 9595 | """ docnado.py
A rapid documentation tool that will blow you away.
"""
import os
import re
import sys
import csv
import glob
import time
import signal
import shutil
import urllib
import base64
import hashlib
import argparse
import tempfile
import datetime
import threading
import traceback
import subprocess
import platform
import requests
from bs4 import BeautifulSoup
from multiprocessing import Pool
from urllib.parse import urlparse
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
from xml.etree import ElementTree
from flask import Flask, url_for, abort, send_from_directory, \
render_template, Markup, make_response, render_template_string
import markdown
import markdown.util
from markdown.extensions import Extension
from markdown.postprocessors import Postprocessor
from markdown.inlinepatterns import LinkPattern, IMAGE_LINK_RE, dequote, handleAttributes
from markdown.blockprocessors import HashHeaderProcessor
from http.client import responses
if __package__:
from .navtree import NavItem, parse_nav_string
else:
from navtree import NavItem, parse_nav_string
class MultiPurposeLinkPattern(LinkPattern):
""" Embed image, video, youtube, csv or file download links
by extending the typical image tag pattern.
#  or 
If the link has "DOWNLOAD" in the alt text, treat it as a download.
Otherwise, see if its a YouTube video. Otherwise, see if its a
csv that can be turned into a table, otherwise if the link cannot be parsed
as a video, it will always be treated as an image.
"""
SUPPORTED_VIDEO = ('ogv', 'ogg', 'avi', 'mp4', 'webm', )
SUPPORTED_TABLES = ('csv', )
SUPPORTED_PDF = ('pdf', )
def get_src(self, m):
""" Get the source and parts from the matched groups: src, parts """
src_parts = m.group(9).split()
if src_parts:
src = src_parts[0]
if src[0] == "<" and src[-1] == ">":
src = src[1:-1]
return self.sanitize_url(self.unescape(src)), src_parts
else:
return '', src_parts
@staticmethod
def youtube_url_validation(url):
""" Given a YouTube URL, return the ID component.
https://stackoverflow.com/questions/4705996
"""
youtube_regex = (r'(https?://)?(www\.)?'
r'(youtube|youtu|youtube-nocookie)\.(com|be)/'
r'(watch\?v=|embed/|v/|.+\?v=)?([^&=%\?]{11})')
youtube_regex_match = re.match(youtube_regex, url)
return youtube_regex_match.group(6) if youtube_regex_match else None
@staticmethod
def as_youtube(m, video_id):
""" Return a DOM element that embeds a YouTube video. """
el = ElementTree.Element('iframe')
el.set('class', 'video')
el.set('src', f'https://www.youtube.com/embed/{video_id}?rel=0')
el.set('frameborder', '0')
el.set('allow', 'autoplay; encrypted-media')
el.set('allowfullscreen', '1')
return el
def as_pdf(self, m):
""" Return a DOM element that embeds a PDF document using an embed. """
src, parts = self.get_src(m)
wrapper = ElementTree.Element('aside')
wrapper.set('class', 'pdf-embed-wrapper')
el = ElementTree.SubElement(wrapper, 'embed')
el.set('class', 'pdf-embed')
el.set('src', src)
el.set('width', '100%')
el.set('type', 'application/pdf')
el.set('height', '100%') # width * 1.4142 (aspect ratio of a4)
el.set('pluginspage', 'http://www.adobe.com/products/acrobat/readstep2.html')
if len(parts) > 1:
el.set('alt', dequote(self.unescape(" ".join(parts[1:]))))
return wrapper
def as_video(self, m):
""" Return a video element """
src, parts = self.get_src(m)
el = ElementTree.Element('video')
el.set('src', src)
el.set("controls", "true")
handleAttributes(m.group(2), el)
return el
def as_image(self, m):
""" Return an image element """
el = ElementTree.Element('img')
src, parts = self.get_src(m)
el.set('src', src)
# Set the title if present.
if len(parts) > 1:
el.set('title', dequote(self.unescape(" ".join(parts[1:]))))
# Set the attributes on the element, if enabled.
# Set the 'alt' attribute with whatever is left from `handleAttributes`.
attrs = self.markdown.enable_attributes
alt_text = handleAttributes(m.group(2), el) if attrs else m.group(2)
el.set('alt', self.unescape(alt_text))
return el
def as_csv(self, m):
src, parts = self.get_src(m)
root = ElementTree.Element('table')
root.set('source', src)
root.set('class', 'csv-table table thead-light table-hover')
file_path = os.path.join(self.markdown.page_root, src)
with open(file_path, 'r', encoding='utf-8') as f:
reader = csv.reader(f)
headers = next(reader)
rows = [r for r in reader]
thead = ElementTree.SubElement(root, 'thead')
for col in headers:
ElementTree.SubElement(thead, 'th').text = col
for row in rows:
tr = ElementTree.SubElement(root, 'tr')
for col in row:
ElementTree.SubElement(tr, 'td').text = col
return root
def as_download(self, m):
""" Create card layers used to make a download button. """
src, parts = self.get_src(m)
# Returns a human readable string representation of bytes
def _human_size(byte_number, units=(' bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB')):
return str(byte_number) + units[0] if byte_number < 1024 else _human_size(byte_number >> 10, units[1:])
# Get information required for card.
split_src = os.path.split(src)
file_path = os.path.join(self.markdown.page_root, *split_src)
file_size = os.path.getsize(file_path)
file_basename = os.path.basename(file_path)
card_text = dequote(self.unescape(" ".join(parts[1:]))) if len(parts) > 1 else ''
# If its a pptx, extract the thumbnail previews.
# NOTE: This works, but is is removed until we support other
# file types, which for now is not a priority.
# preview_uri = None
# import zipfile
# if (file_path.endswith('pptx')):
# with zipfile.ZipFile(file_path) as zipper:
# with zipper.open('docProps/thumbnail.jpeg', 'r') as fp:
# mime = 'image/jpeg'
# data64 = base64.b64encode(fp.read()).decode('utf-8')
# preview_uri = u'data:%s;base64,%s' % (mime, data64)
# Card and structure.
card = ElementTree.Element("div")
card.set('class', 'card download-card')
header = ElementTree.SubElement(card, 'div')
header.set('class', 'download-card-header')
body = ElementTree.SubElement(card, 'div')
body.set('class', 'download-card-body')
# Add preview image.
# if preview_uri:
# img = ET.SubElement(header, 'img')
# img.set('src', preview_uri)
# Filename link heading.
heading = ElementTree.SubElement(body, 'a')
heading.set('class', 'download-card-title')
heading.set('href', src)
download_icon = ElementTree.SubElement(heading, 'i')
download_icon.set('class', 'fa fa-download')
download_text = ElementTree.SubElement(heading, 'span')
download_text.text = file_basename
# Title element from the "quote marks" part.
body_desc = ElementTree.SubElement(body, 'span')
body_desc.text = card_text
# File size span at the bottom.
body_size = ElementTree.SubElement(body, 'span')
body_size.set('class', 'small text-muted')
body_size.text = f'{_human_size(file_size)}'
return card
@staticmethod
def _is_inject(m):
""" Determine if the ALT text [] part of the link says 'INJECT'. """
alt = m.group(2)
return alt.lower() == 'inject'
def as_raw(self, m):
""" Load the HTML document specified in the link, parse it to HTML elements and return it.
"""
src, parts = self.get_src(m)
# Find the path to the HTML document, relative to the current markdown page.
file_path = os.path.join(self.markdown.page_root, src)
raw_html_string = read_html_for_injection(file_path)
if len(parts) < 2:
parts.append("nothing_one=1||nothing_two=2")
# Helper function.
def _argify(args):
if '=' not in args:
raise ValueError('injection template requires named arguments split by ||')
left, right = args.split('=')
return left.strip(), right.strip()
# Split arg string on double pipe. Joins them to undo automattic splitting from the markdown.
arg_strings = " ".join(parts[1:]).strip('\"').split("||")
# Parse into dictionary of key-value pairs based on the '=' notation.
try:
named_args = dict([_argify(args) for args in arg_strings])
except Exception as e:
raise Exception(f"Error parsing ![INJECT] arguments in {self.markdown.page_file} {repr(e)}")
# Take the template renderer and give it our string, and named args.
# Capture the output as a string.
try:
injectable_templated_str = render_template_string(raw_html_string, **named_args)
except Exception as e:
raise Exception(f"Error rendering ![INJECT] template for file {file_path} {repr(e)}")
# Feed that string to the XML parser.
try:
return ElementTree.fromstring(injectable_templated_str)
except Exception as e:
raise Exception(f"Error parsing ![INJECT] template for file {file_path} {repr(e)}")
@staticmethod
def _is_download(m):
""" Determine if the ALT text [] part of the link says 'DOWNLOAD'. """
alt = m.group(2)
return alt.lower() == 'download'
def handleMatch(self, m):
""" Use the URL extension to render the link. """
src, parts = self.get_src(m)
if self._is_download(m):
return self.as_download(m)
elif self._is_inject(m):
return self.as_raw(m)
youtube = self.youtube_url_validation(src)
if youtube:
return self.as_youtube(m, youtube)
src_lower = src.lower()
if src_lower.endswith(self.SUPPORTED_TABLES):
return self.as_csv(m)
elif src_lower.endswith(self.SUPPORTED_PDF):
return self.as_pdf(m)
elif src_lower.endswith(self.SUPPORTED_VIDEO):
return self.as_video(m)
return self.as_image(m)
class OffsetHashHeaderProcessor(HashHeaderProcessor):
""" Process hash headers with an offset to control the type of heading
DOM element that is generated. """
HEADING_LEVEL_OFFSET = 1
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
before = block[:m.start()]
after = block[m.end():]
if before:
self.parser.parseBlocks(parent, [before])
heading_level = len(m.group('level'))
h = ElementTree.SubElement(parent, 'h%d' % (heading_level + self.HEADING_LEVEL_OFFSET))
h.text = m.group('header').strip()
if after:
blocks.insert(0, after)
class ChecklistPostprocessor(Postprocessor):
"""
Adds checklist class to list element.
Adapted from: `markdown_checklist.extension`
"""
pattern = re.compile(r'<li>\[([ Xx])\]')
def run(self, html):
html = re.sub(self.pattern, self._convert_checkbox, html)
before = '<ul>\n<li><input type="checkbox"'
after = before.replace('<ul>', '<ul class="checklist">')
html = html.replace(before, after)
return html
@staticmethod
def _convert_checkbox(match):
state = match.group(1)
checked = ' checked' if state != ' ' else ''
return '<li><input type="checkbox" disabled%s>' % checked
# Remove the `video`, `iframe`, `aside`, and `table` elements as block elements.
markdown.util.BLOCK_LEVEL_ELEMENTS = re.compile(
r"^(p|div|h[1-6]|blockquote|pre|dl|ol|ul"
r"|script|noscript|form|fieldset|math"
r"|hr|hr/|style|li|dt|dd|thead|tbody"
r"|tr|th|td|section|footer|header|group|figure"
r"|figcaption|article|canvas|output"
r"|progress|nav|main)$",
re.IGNORECASE
)
class MultiExtension(Extension):
""" Markdown `Extension` that adds our new components and
overrides some that we are not using.
"""
def extendMarkdown(self, md, md_globals):
""" Configure markdown by disabling elements and replacing them with
others. """
# Add checklist processing extension based on: 'markdown_checklist.extension'.
md.postprocessors.add('checklist', ChecklistPostprocessor(md), '>raw_html')
# Remove default patterns.
del md.inlinePatterns['image_link']
# Create a new one and insert into pipeline.
multi_purpose_pattern = MultiPurposeLinkPattern(IMAGE_LINK_RE, md)
md.inlinePatterns['multi_purpose_pattern'] = multi_purpose_pattern
# Remove line headers.
del md.parser.blockprocessors['setextheader']
# Swap hash headers for one that can change the DOM h1, h2 level.
md.parser.blockprocessors['hashheader'] = OffsetHashHeaderProcessor(md.parser)
# https://python-markdown.github.io/extensions/
mdextensions = [MultiExtension(),
'markdown.extensions.tables',
'markdown.extensions.meta',
'markdown.extensions.def_list',
'markdown.extensions.headerid',
'markdown.extensions.fenced_code',
'markdown.extensions.attr_list']
def build_meta_cache(root):
""" Recursively search for Markdown files and build a cache of `Meta`
from metadata in the Markdown.
:param root: str: The path to search for files from.
"""
doc_files = glob.iglob(root + '/**/*.md', recursive=True)
def _meta(path):
with open(path, 'r', encoding='utf-8') as f:
md = markdown.Markdown(extensions=mdextensions)
md.page_root = os.path.dirname(path)
Markup(md.convert(f.read()))
return md.Meta if hasattr(md, 'Meta') else None
doc_files_meta = {os.path.relpath(path, start=root): _meta(path) for path in doc_files}
doc_files_meta = {path: value for path, value in doc_files_meta.items() if value is not None}
# If a nav filter is set, exclude relevant documents.
# This takes the comma separated string supplied to `nav_limit`
# and excludes certain documents if they are NOT in this list.
global CMD_ARGS
if CMD_ARGS.nav_limit:
nav_filters = CMD_ARGS.nav_limit.split(',')
nav_filters = [nav_filter.strip().lower() for nav_filter in nav_filters]
nav_filters = [nav_filter for nav_filter in nav_filters if nav_filter]
def _should_include(doc_meta):
nav_strings = [nav.lower() for nav in doc_meta.get('nav', [])]
return any([y.startswith(x) for x in nav_filters for y in nav_strings])
doc_files_meta = {path: value for path, value in doc_files_meta.items() if _should_include(value)}
return doc_files_meta
def build_nav_menu(meta_cache):
""" Given a cache of Markdown `Meta` data, compile a structure that can be
used to generate the NAV menu.
This uses the `nav: Assembly>Bench>Part` variable at the top of the Markdown file.
"""
root = NavItem('root', 0)
# Pre-sort the nav-items alphabetically by nav-string. This will get overridden with the arange()
# function, but this avoids-un arranged items moving round between page refreshes due to Dicts being
# unordered.
sorted_meta_cache = sorted(
meta_cache.items(),
key = lambda items: items[1].get('nav', [''])[0].split('>')[-1] # Sort by the last part of the nav string for each page.
)
for path, meta in sorted_meta_cache:
nav_str = meta.get('nav', [None])[0]
nav_chunks = parse_nav_string(nav_str)
node = root
for name, weight in nav_chunks:
n = NavItem(name, weight)
node = node.add(n)
node.bind(meta=meta, link=path)
root.arrange()
return root
def build_reload_files_list(extra_dirs):
""" Given a list of directories, return a list of files to watch for modification
and subsequent server reload. """
extra_files = extra_dirs[:]
for extra_dir in extra_dirs:
for dirname, dirs, files in os.walk(extra_dir):
for filename in files:
filename = os.path.join(dirname, filename)
if os.path.isfile(filename):
extra_files.append(filename)
return extra_files
def read_html_for_injection(path):
""" Open an HTML file at the given path and return the contents
as a string. If the file does not exist, we raise an exception.
"""
# TODO: In the future, consider adding some caching here. However,
# beware of reloading / refereshing the page UX implications.
with open(path) as file:
return file.read()
def _render_markdown(file_path, **kwargs):
""" Given a `file_path` render the Markdown and return the result of `render_template`.
"""
global NAV_MENU, PROJECT_LOGO, PDF_GENERATION_ENABLED
default_template = 'document'
with open(file_path, 'r', encoding='utf-8') as f:
md = markdown.Markdown(extensions=mdextensions)
md.page_root = os.path.dirname(file_path)
md.page_file = file_path
markup = Markup(md.convert(f.read()))
# Fetch the template defined in the metadata.
template = md.Meta.get('template', None)
template = template[0] if template else default_template
if not template:
raise Exception('no template found for document')
template = f'{template}.html'
# Load any HTML to be injected from the meta-data.
injections = md.Meta.get('inject', [])
injections = [os.path.join(md.page_root, file) for file in injections]
injections = [read_html_for_injection(file) for file in injections]
# Render it out with all the prepared data.
return render_template(template,
content=markup,
nav_menu=NAV_MENU,
project_logo=PROJECT_LOGO,
pdf_enabled=PDF_GENERATION_ENABLED,
injections=injections,
**md.Meta,
**kwargs)
def configure_flask(app, root_dir):
""" Setup the flask application within this scope. """
@app.before_first_request
def build_navigation_cache():
""" Build an in-memory cache of document meta-data.
NOTE: The design choice is made to crash the application if any
of the markdown files cannot be opened and parsed. In the
future when it becomes more stable, this will probably change.
"""
# This is called each time the server restarts.
global NAV_MENU
meta_cache = build_meta_cache(root_dir)
# Build the nav menu data-structure.
NAV_MENU = build_nav_menu(meta_cache)
# Store the reference to the function that rebuilds the navigation cache.
app.build_navigation_cache = build_navigation_cache
@app.template_filter('gravatar')
def gravatar(email, size=100, rating='g', default='retro', use_ssl=False):
""" Return a gravatar link for a given email address. """
url = "https://secure.gravatar.com/avatar/" if use_ssl else "http://www.gravatar.com/avatar/"
email = email.strip().lower().encode('utf-8')
hash_email = hashlib.md5(email).hexdigest()
return f'{url}{hash_email}?s={size}&r={rating}&d={default}'
@app.template_filter()
def url_unquote(url):
""" Removes encoding around a URL. """
return urllib.parse.unquote(url)
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
@app.route("/print_header")
def print_header():
""" Render the template for the header used when printing with WKPDFTOHTML. """
global PROJECT_LOGO
return render_template('print_header.html', project_logo=PROJECT_LOGO)
@app.route("/print_footer")
def print_footer():
""" Render the template for the footer used when printing with WKPDFTOHTML. """
global PROJECT_LOGO
return render_template('print_footer.html', project_logo=PROJECT_LOGO)
@app.errorhandler(404)
def page_not_found(e):
global NAV_MENU, PROJECT_LOGO
return render_template('404.html', nav_menu=NAV_MENU, project_logo=PROJECT_LOGO), 404
@app.route("/w/<path:page>")
def wiki(page):
""" Render the page. """
file_path = os.path.abspath(os.path.join(root_dir, page))
if not os.path.isfile(file_path):
abort(404)
if '.md' in [ext.lower() for ext in os.path.splitext(file_path)]:
return _render_markdown(file_path, current_page=page)
else:
return send_from_directory(os.path.dirname(file_path), os.path.basename(file_path))
@app.route("/")
@app.route("/w/")
def homepage():
return wiki('home.md')
@app.route("/pdf/<path:page>")
def wiki_pdf(page):
file_path = os.path.abspath(os.path.join(root_dir, page))
if not os.path.isfile(file_path):
abort(404)
if '.md' not in [ext.lower() for ext in os.path.splitext(file_path)]:
return send_from_directory(os.path.dirname(file_path), os.path.basename(file_path))
# Configure the different paths.
pdf_temp = f'{tempfile.mktemp()}.pdf'
input_url = url_for('wiki', page=page, _external=True)
header_url = url_for('print_header', _external=True)
footer_url = url_for('print_footer', _external=True)
args = f'{WKHTMLTOPDF_BINARY} --header-html {header_url} --footer-html {footer_url} \
--print-media-type --header-spacing 2 {input_url} {pdf_temp}'
# Invoke WkHTMLtoPDF
result = subprocess.check_output(args, shell=True)
if not result:
pass
# Write the newly generated temp pdf into a response.
with open(pdf_temp, 'rb') as f:
binary_pdf = f.read()
target_file_name = page.replace("/", "_").replace("\\", "_")
response = make_response(binary_pdf)
response.headers['Content-Type'] = 'application/pdf'
# response.headers['Content-Disposition'] = f'attachment; filename={target_file_name}.pdf'
response.headers['Content-Disposition'] = f'inline; filename={target_file_name}.pdf'
# Delete the temp file and return the response.
os.remove(pdf_temp)
return response
def generate_static_pdf(app, root_dir, output_dir, nav_filter=None):
""" Generate a static PDF directory for the documentation in `root_dir`
into `output_dir`.
"""
global PORT_NUMBER
# Find all markdown document paths that are in the nav.
documents = build_meta_cache(root_dir)
markdown_docs_urls = ['pdf/' + file.replace('\\', '/') for file in documents.keys()]
# Generate URl to file pairs.
pairs = [(f'http://localhost:{PORT_NUMBER}/{url}',
f'{os.path.join(output_dir, *os.path.split(url))}.pdf')
for url in markdown_docs_urls]
# Download each pair.
for source, target in pairs:
os.makedirs(os.path.dirname(target), exist_ok=True)
print(f'Source: {source} \n Target: {target}')
urllib.request.urlretrieve(source, target)
# Helper function to return the domain if present.
def is_absolute(url):
""" Returns True if the passed url string is an absolute path.
False if not
"""
links = urlparse(url)
return bool(links.netloc)
def generate_static_html(app, root_dir, output_dir):
""" Generate a static HTML site for the documentation in `root_dir`
into `output_dir`.
"""
from flask_frozen import Freezer, MissingURLGeneratorWarning
import warnings
warnings.filterwarnings("ignore", category=MissingURLGeneratorWarning)
# Update the flask config.
app.config['FREEZER_RELATIVE_URLS'] = True
app.config['FREEZER_IGNORE_MIMETYPE_WARNINGS'] = True
app.config['FREEZER_DESTINATION'] = output_dir
# Create the freezer app. Make it use specific URLs.
freezer = Freezer(app, with_no_argument_rules=False, log_url_for=False)
# Register a generator that passes ALL files in the docs directory into the
# `wiki` flask route.
@freezer.register_generator
def wiki():
all_docs = [file.replace(f'{root_dir}', '/w').replace(f'{os.path.sep}', '/')
for file in glob.iglob(f'{root_dir}/**/*', recursive=True)
if os.path.isfile(file)]
for doc in all_docs:
yield doc
# Save all the URLs using the correct extension and MIME type.
freezer.freeze()
# For each `.md` file in the output directory:
for markdown_file in glob.iglob(f'{output_dir}/**/*.md', recursive=True):
# Rewrite all relative links to other `.md` files to `.html.`
output = ''
with open(markdown_file, 'r', encoding="utf-8") as f:
html = f.read()
def _href_replace(m):
href = m.group()
if is_absolute(href[6:-1]):
return href
return href.replace('.md', '.html')
output = re.sub('href="(.*md)"', _href_replace, html)
# Rename the file from `.md` to HTML.
with open(markdown_file[:-3] + '.html', 'w', encoding="utf-8") as f:
f.write(output)
# Delete the Markdown file.
os.remove(markdown_file)
def load_project_logo(logo_file=None):
""" Attempt to load the project logo from the specified path.
If this fails, return None. If this succeeds, convert it to a data-uri.
"""
if not logo_file:
return None
if not os.path.exists(logo_file):
return None
with open(logo_file, 'rb') as fp:
mime = 'image/png'
data64 = base64.b64encode(fp.read()).decode('utf-8')
preview_uri = u'data:%s;base64,%s' % (mime, data64)
return preview_uri
def check_pdf_generation_cap():
""" Check to see if we can use PDF generation by attempting to use the binary. """
global WKHTMLTOPDF_BINARY
retcode = subprocess.call(f'{WKHTMLTOPDF_BINARY} --version',
shell=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
return retcode == 0
def copy_local_project(force=False):
""" Copy the sample docs and style into the local working directory.
Note: This will overwrite anything currently in those folders.
"""
source_root = os.path.dirname(__file__)
target_root = os.getcwd()
targets = ['docs', 'style', 'logo.png']
pairs = [(os.path.join(source_root, path), os.path.join(target_root, path))
for path in targets]
for source, target in pairs:
if os.path.isdir(source):
if os.path.exists(target):
if force:
print(f'Deleting existing {target} and replacing it with {target}')
shutil.rmtree(target)
shutil.copytree(source, target)
else:
print(f'Warning: {target} already exists.')
else:
print(f'Copying: {source} -> {target}')
shutil.copytree(source, target)
else:
if os.path.exists(target):
if force:
print(f'Deleting existing {target} and replacing it with {target}')
os.remove(target)
shutil.copyfile(source, target)
else:
print(f'Warning: {target} already exists.')
else:
print(f'Copying: {source} -> {target}')
shutil.copyfile(source, target)
def find_references(document_path):
""" Search through the markdown 'document_path' and make a list of referenced files
with paths that are relative to the directory containing the `document_path`.
"""
# Open the file to search.
with open(document_path, 'r', encoding='utf-8') as f:
markdown_raw_data = f.read()
# Render as HTML.
md = markdown.Markdown(extensions=mdextensions)
document_dir = os.path.dirname(document_path)
md.page_root = document_dir
# Interpret with the BeautifulSoup HTML scraping library.
soup = BeautifulSoup(md.convert(markdown_raw_data), 'html.parser')
tags_to_search = {
'img': 'src',
'a': 'href',
'video': 'src',
'table': 'source',
'embed': 'src',
}
# For each entry in the `tags_to_search` table, extract the tag attribute value.
references = set()
for k, v in tags_to_search.items():
for tag in soup.find_all(k):
val = tag.get(v)
if val:
references.add(val)
# Normalise the referenced assets (to take into account relative paths).
references = [os.path.join(document_dir, urllib.request.url2pathname(ref)) for ref in references]
# Make unique.
return set(references)
def has_nav(markdown_text):
""" Returns True if the passed string of text contains navbar metadata.
Returns False if it does not.
"""
expression = re.compile(r'(?=\n|)nav:\s+\w+(?=\n |)')
return True if expression.search(markdown_text) else False
def find_orphans(files):
""" Searches all files and folders recursively in the given path for image and video assets
that are unused by markdown files.
"""
# Find all references in
pages = {}
for file in files:
if file.endswith('.md'):
pages[file] = find_references(file)
# Remove the markdown documents that have a navbar metadata.
md_with_nav = []
for file in files:
if file.endswith('.md'):
with open(file, encoding='utf-8') as f:
if has_nav(f.read().lower()):
md_with_nav.append(file)
files = [x for x in files if x not in md_with_nav]
# Create a flat list of all references in the markdown files
all_references = []
for i in pages.values():
all_references += [k for k in i]
# Output unused assets
return [i for i in files if i not in all_references]
class DocumentLinks:
""" A helper class to process the `<a href.../>` links from a single
markdown document that is rendered using our own renderer.
"""
def __init__(self, md_file):
""" Open a Markdown document and find all links in `<a href .../>`.
"""
# Store important information about this document.
self.md_file = md_file
self.md_dir = os.path.dirname(md_file)
# Read in Markdown and generate HTML with our parser.
with open(md_file, 'r', encoding='utf-8') as f:
markdown_raw_data = f.read()
md = markdown.Markdown(extensions=mdextensions)
md.page_root = self.md_dir
html = md.convert(markdown_raw_data)
# Interpret with the BeautifulSoup HTML scraping library.
soup = BeautifulSoup(html, 'html.parser')
tags_to_search = {
'img': 'src',
'a': 'href',
'video': 'src',
'table': 'source',
'embed': 'src',
}
self.references = set()
for k, v in tags_to_search.items():
links = soup.find_all(k)
for link in links:
if link.get('href'):
if link.get('href').find('http:') > -1 or link.get('href').find('https:') > -1:
val = link.get(v)
if val:
self.references.add(val)
else:
val = link.get(v)
if val:
self.references.add(val)
@property
def web_links(self):
""" Generate a list of web links from our cached links.
"""
return [link for link in self.references if is_absolute(link)]
@property
def relative_links(self):
""" Generate a list of relative file system links from our cached links.
This converts from a web path to a path on disk then normalises the path to the current directory.
"""
def _norm(path):
return os.path.join(self.md_dir, urllib.request.url2pathname(path))
return [_norm(link) for link in self.references if not is_absolute(link)]
@staticmethod
def validate_url(address):
""" Returns `True` if page at address returns with status code 200 (ok) otherwise returns `False`.
"""
try:
request = requests.head(address)
return request.status_code, address
except requests.exceptions.RequestException:
return False, address
def detect_broken_links(self, process_pool):
""" Go through all the `web_links` and the `relative_links` and report
which are broken (i.e. do not resolve to HTTP200OK or a file on disk).
"""
result = process_pool.map(self.validate_url, self.web_links)
for response, url in result:
if not response == 200:
yield url + ' Status: ' + (responses[response] if response is int else "Exception")
for file in self.relative_links:
if not os.path.exists(file):
yield file
def generate_metadata(path):
""" Add relevant metadata to the top of the markdown file at the passed path.
Title is drawn from the filename, Date from the last modified timestamp, Version defaults at 1.0.0,
Nav is generated from the filepath, and Authors are generated from the git contributors (if applicable) and
are otherwise left blank.
Warning: Does not check if there is existing metadata.
"""
s = subprocess.getoutput(f"git log -p {path}")
lines = s.split(os.linesep)
authors = set([re.search(r'<(.*)>', line).group(1)for line in lines if 'Author:' in line])
file_status = os.stat(path)
nav_path = os.path.sep.join(path.split(os.path.sep)[1:])
metadata = {
'title': ' '.join(
path
.split('.')[0]
.split(os.path.sep)[-1]
.replace('_', ' ')
.replace('-', ' ')
.title()
.split()
),
'desc': '',
'date': datetime.datetime.utcfromtimestamp(file_status.st_mtime).strftime('%Y/%m/%d'),
'version': '1.0.0',
'template': '',
'nav': nav_path.replace(os.path.sep, '>').title().split('.')[0],
'percent': '100',
'authors': ' '.join(authors),
}
result = ""
for key in metadata.keys():
result += ('{}:{}{}\n'.format(key, '\t' if len(key) > 6 else '\t\t', metadata[key]))
with open(path, 'r+', encoding='utf-8') as f:
content = f.read()
f.seek(0, 0)
f.write(result)
f.write(content)
class ReloadHandler(PatternMatchingEventHandler):
""" Rebuild the document metadata / navigation cache when markdown files are updated
in the documents directory. """
def __init__(self, app):
super(ReloadHandler, self).__init__(patterns=['*.md'], ignore_directories=False, case_sensitive=False)
self.flask_app = app
def on_any_event(self, event):
self.flask_app.build_navigation_cache()
global CMD_ARGS, NAV_MENU, PROJECT_LOGO, WKHTMLTOPDF_BINARY, PDF_GENERATION_ENABLED, PORT_NUMBER
CMD_ARGS = None
NAV_MENU = {}
PROJECT_LOGO = None
WKHTMLTOPDF_BINARY = None
PDF_GENERATION_ENABLED = False
def main():
""" Application entrypoint. """
global PORT_NUMBER
PORT_NUMBER = 5000
# Parse the command line arguments.
parser = argparse.ArgumentParser(description='docnado: Lightweight tool for rendering \
Markdown documentation with different templates.')
parser.add_argument('--html', action='store', dest='html_output_dir',
help='Generate a static site from the server and output to the \
specified directory.')
parser.add_argument('--pdf', action='store', dest='pdf_output_dir',
help='Generate static PDFs from the server and output to the \
specified directory.')
parser.add_argument('--nav-limit', action='store', dest='nav_limit',
default=None,
help='Include certain document trees only based on a comma separated \
list of nav strings. e.g. Tooling,Document')
parser.add_argument('--new', action="store_true", dest='new_project',
default=False,
help='Copy the `docs` and `styles` folder into the working directory \
and output a config file that addresses them. Does not overwrite existing files.')
parser.add_argument('--new-force', action="store_true", dest='new_project_force',
default=False,
help='Copy the `docs` and `styles` folder into the working directory \
and output a config file that addresses them. Force deletion of existing files.')
parser.add_argument('--dirs', action="store_true", dest='show_dirs',
default=False,
help='Display the different directories the software is using \
to search for documentation and styles.')
parser.add_argument('--generate-meta', action="store", dest='generate_meta',
default=False,
help='Generate metadata for markdown files in the specified directory.')
parser.add_argument('--find-orphans', action="store_true", dest='find_orphans',
default=False,
help='Identify unused media assets (orphans)')
parser.add_argument('--find-broken-links', action="store_true", dest='find_broken_links',
default=False,
help='Identify broken external links.')
parser.add_argument('--port', action="store", dest='new_port_number',
default=False,
help='Specify a port for the docnado server')
parser.add_argument('--host', action="store", dest='set_host',
default=False,
help='Set the docnado development server to listen on IP addresses.')
# Import the command line args and make them application global.
global CMD_ARGS
args = parser.parse_args()
CMD_ARGS = args
# Load config from the environment and validate it.
global PROJECT_LOGO, PDF_GENERATION_ENABLED, NAV_MENU, WKHTMLTOPDF_BINARY
TRUE = 'TRUE'
FALSE = 'FALSE'
flask_debug = os.environ.get('DN_FLASK_DEBUG', FALSE) == TRUE
watch_changes = os.environ.get('DN_RELOAD_ON_CHANGES', TRUE) == TRUE
WKHTMLTOPDF_BINARY = ('wkhtmltopdf_0.12.5.exe' if platform.system() == 'Windows' else 'wkhtmltopdf')
PDF_GENERATION_ENABLED = check_pdf_generation_cap()
dir_documents = os.environ.get('DN_DOCS_DIR', os.path.join(os.getcwd(), 'docs'))
dir_style = os.environ.get('DN_STYLE_DIR', os.path.join(os.getcwd(), 'style'))
logo_location = os.environ.get('DN_PROJECT_LOGO', os.path.join(os.getcwd(), 'logo.png'))
# If `style` folder does not exist, use the one in site-packages.
if not os.path.exists(dir_style) and not os.path.isdir(dir_style):
dir_style = os.path.join(os.path.dirname(__file__), 'style')
# Attempt to load the project logo into a base64 data uri.
PROJECT_LOGO = load_project_logo(logo_location)
# Compute the static and template directories.
dir_static = os.path.join(dir_style, 'static')
dir_templates = os.path.join(dir_style, 'templates')
# If the user is asking to create a new project.
if args.new_project:
copy_local_project()
sys.exit()
if args.new_project_force:
copy_local_project(force=True)
return 0
if args.new_port_number:
PORT_NUMBER = int(args.new_port_number)
if args.generate_meta:
doc_files = glob.iglob(args.generate_meta + '/**/*.md', recursive=True)
for i in doc_files:
generate_metadata(i)
return 0
if args.find_orphans:
# Find all the assets in the directory/subdirectories recursively and append their file path to a list.
files = glob.glob((dir_documents + '/**/*.*'), recursive=True)
files = [f for f in files if not os.path.isdir(f)]
orphans = find_orphans(files)
if orphans:
print(f'{len(orphans)} Unused assets (orphans):\n\t' + '\n\t'.join(orphans))
return -1
return 0
if args.find_broken_links:
process_pool = Pool(processes=10)
md_files = glob.glob((dir_documents + '/**/*.md'), recursive=True)
md_reports = tuple((md, list(DocumentLinks(md).detect_broken_links(process_pool))) for md in md_files)
num_broken = 0
for file, report in md_reports:
if report:
num_broken += len(report)
print(f'{file}\n\t' + '\n\t'.join(report))
return -1 if num_broken else 0
if args.show_dirs:
print('The following directories are being used: ')
print('\t', f'Documents -> {dir_documents}')
print('\t', f'Logo -> {logo_location}')
print('\t', f'Style -> {dir_style}')
print('\t', f' Static -> {dir_static}')
print('\t', f' Templates -> {dir_templates}')
sys.exit()
if not os.path.exists(dir_documents) and not os.path.isdir(dir_documents):
print(f'Error: Documents directory "{dir_documents}" does not exist. \
Create one called `docs` and fill it with your documentation.', file=sys.stderr)
sys.exit(-1)
if not os.path.exists(dir_static) and not os.path.isdir(dir_static):
print(f'Error: Static directory "{dir_static}" does not exist.', file=sys.stderr)
sys.exit(-1)
if not os.path.exists(dir_templates) and not os.path.isdir(dir_templates):
print(f'Error: Templates directory "{dir_templates}" does not exist.', file=sys.stderr)
sys.exit(-1)
# Create the server.
app = Flask(__name__,
static_url_path='',
template_folder=dir_templates,
static_folder=dir_static)
# Attach routes and filters.
configure_flask(app, dir_documents)
# Output PDF files.
if args.pdf_output_dir:
if not check_pdf_generation_cap():
print(f'Error: PDF generation requires WkHTMLtoPDF.', file=sys.stderr)
sys.exit(-1)
def gen_pdfs():
time.sleep(2)
generate_static_pdf(
app, dir_documents, os.path.join(os.getcwd(), args.pdf_output_dir)
)
time.sleep(5)
os.kill(os.getpid(), signal.SIGTERM)
t1 = threading.Thread(target=gen_pdfs)
t1.start()
app.run(debug=flask_debug, threaded=True, port=PORT_NUMBER)
sys.exit()
# Output a static site.
if args.html_output_dir:
PDF_GENERATION_ENABLED = False
try:
generate_static_html(app, dir_documents, os.path.join(os.getcwd(), args.html_output_dir))
index_html = """ <!DOCTYPE html>
<html>
<head>
<meta http-equiv="refresh" content="0; url=./w/">
</head>
<body>
</body>
</html>"""
with open(os.path.join(os.getcwd(), args.html_output_dir, 'index.html'), 'w') as f:
f.write(index_html)
except Exception:
traceback.print_exc(file=sys.stderr)
sys.exit(-1)
sys.exit()
# Watch for any changes in the docs or style directories.
dn_watch_files = []
observer = None
if watch_changes:
observer = Observer()
observer.schedule(ReloadHandler(app), path=dir_documents, recursive=True)
observer.start()
dn_watch_files = build_reload_files_list([__name__, dir_style])
# Run the server.
if args.set_host:
try:
print('Attempting set sevelopment server listen on public IP address: ' + args.set_host)
print('WARNING: The Docnado development environment is intended to be used as a development tool ONLY, '
'and is not recommended for use in a production environment.')
app.run(debug=flask_debug, port=PORT_NUMBER, extra_files=dn_watch_files, host=args.set_host)
except OSError as e:
print(e)
print(f'Error initialising server.')
except KeyboardInterrupt:
pass
finally:
if observer:
observer.stop()
observer.join()
else:
try:
app.run(debug=flask_debug, port=PORT_NUMBER, extra_files=dn_watch_files)
except OSError as e:
print(e)
print(f'Error initialising server.')
except KeyboardInterrupt:
pass
finally:
if observer:
observer.stop()
observer.join()
# if running brainerd directly, boot the app
if __name__ == "__main__":
main()
| """ docnado.py
A rapid documentation tool that will blow you away.
"""
import os
import re
import sys
import csv
import glob
import time
import signal
import shutil
import urllib
import base64
import hashlib
import argparse
import tempfile
import datetime
import threading
import traceback
import subprocess
import platform
import requests
from bs4 import BeautifulSoup
from multiprocessing import Pool
from urllib.parse import urlparse
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
from xml.etree import ElementTree
from flask import Flask, url_for, abort, send_from_directory, \
render_template, Markup, make_response, render_template_string
import markdown
import markdown.util
from markdown.extensions import Extension
from markdown.postprocessors import Postprocessor
from markdown.inlinepatterns import LinkPattern, IMAGE_LINK_RE, dequote, handleAttributes
from markdown.blockprocessors import HashHeaderProcessor
from http.client import responses
if __package__:
from .navtree import NavItem, parse_nav_string
else:
from navtree import NavItem, parse_nav_string
class MultiPurposeLinkPattern(LinkPattern):
""" Embed image, video, youtube, csv or file download links
by extending the typical image tag pattern.
#  or 
If the link has "DOWNLOAD" in the alt text, treat it as a download.
Otherwise, see if its a YouTube video. Otherwise, see if its a
csv that can be turned into a table, otherwise if the link cannot be parsed
as a video, it will always be treated as an image.
"""
SUPPORTED_VIDEO = ('ogv', 'ogg', 'avi', 'mp4', 'webm', )
SUPPORTED_TABLES = ('csv', )
SUPPORTED_PDF = ('pdf', )
def get_src(self, m):
""" Get the source and parts from the matched groups: src, parts """
src_parts = m.group(9).split()
if src_parts:
src = src_parts[0]
if src[0] == "<" and src[-1] == ">":
src = src[1:-1]
return self.sanitize_url(self.unescape(src)), src_parts
else:
return '', src_parts
@staticmethod
def youtube_url_validation(url):
""" Given a YouTube URL, return the ID component.
https://stackoverflow.com/questions/4705996
"""
youtube_regex = (r'(https?://)?(www\.)?'
r'(youtube|youtu|youtube-nocookie)\.(com|be)/'
r'(watch\?v=|embed/|v/|.+\?v=)?([^&=%\?]{11})')
youtube_regex_match = re.match(youtube_regex, url)
return youtube_regex_match.group(6) if youtube_regex_match else None
@staticmethod
def as_youtube(m, video_id):
""" Return a DOM element that embeds a YouTube video. """
el = ElementTree.Element('iframe')
el.set('class', 'video')
el.set('src', f'https://www.youtube.com/embed/{video_id}?rel=0')
el.set('frameborder', '0')
el.set('allow', 'autoplay; encrypted-media')
el.set('allowfullscreen', '1')
return el
def as_pdf(self, m):
""" Return a DOM element that embeds a PDF document using an embed. """
src, parts = self.get_src(m)
wrapper = ElementTree.Element('aside')
wrapper.set('class', 'pdf-embed-wrapper')
el = ElementTree.SubElement(wrapper, 'embed')
el.set('class', 'pdf-embed')
el.set('src', src)
el.set('width', '100%')
el.set('type', 'application/pdf')
el.set('height', '100%') # width * 1.4142 (aspect ratio of a4)
el.set('pluginspage', 'http://www.adobe.com/products/acrobat/readstep2.html')
if len(parts) > 1:
el.set('alt', dequote(self.unescape(" ".join(parts[1:]))))
return wrapper
def as_video(self, m):
""" Return a video element """
src, parts = self.get_src(m)
el = ElementTree.Element('video')
el.set('src', src)
el.set("controls", "true")
handleAttributes(m.group(2), el)
return el
def as_image(self, m):
""" Return an image element """
el = ElementTree.Element('img')
src, parts = self.get_src(m)
el.set('src', src)
# Set the title if present.
if len(parts) > 1:
el.set('title', dequote(self.unescape(" ".join(parts[1:]))))
# Set the attributes on the element, if enabled.
# Set the 'alt' attribute with whatever is left from `handleAttributes`.
attrs = self.markdown.enable_attributes
alt_text = handleAttributes(m.group(2), el) if attrs else m.group(2)
el.set('alt', self.unescape(alt_text))
return el
def as_csv(self, m):
src, parts = self.get_src(m)
root = ElementTree.Element('table')
root.set('source', src)
root.set('class', 'csv-table table thead-light table-hover')
file_path = os.path.join(self.markdown.page_root, src)
with open(file_path, 'r', encoding='utf-8') as f:
reader = csv.reader(f)
headers = next(reader)
rows = [r for r in reader]
thead = ElementTree.SubElement(root, 'thead')
for col in headers:
ElementTree.SubElement(thead, 'th').text = col
for row in rows:
tr = ElementTree.SubElement(root, 'tr')
for col in row:
ElementTree.SubElement(tr, 'td').text = col
return root
def as_download(self, m):
""" Create card layers used to make a download button. """
src, parts = self.get_src(m)
# Returns a human readable string representation of bytes
def _human_size(byte_number, units=(' bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB')):
return str(byte_number) + units[0] if byte_number < 1024 else _human_size(byte_number >> 10, units[1:])
# Get information required for card.
split_src = os.path.split(src)
file_path = os.path.join(self.markdown.page_root, *split_src)
file_size = os.path.getsize(file_path)
file_basename = os.path.basename(file_path)
card_text = dequote(self.unescape(" ".join(parts[1:]))) if len(parts) > 1 else ''
# If its a pptx, extract the thumbnail previews.
# NOTE: This works, but is is removed until we support other
# file types, which for now is not a priority.
# preview_uri = None
# import zipfile
# if (file_path.endswith('pptx')):
# with zipfile.ZipFile(file_path) as zipper:
# with zipper.open('docProps/thumbnail.jpeg', 'r') as fp:
# mime = 'image/jpeg'
# data64 = base64.b64encode(fp.read()).decode('utf-8')
# preview_uri = u'data:%s;base64,%s' % (mime, data64)
# Card and structure.
card = ElementTree.Element("div")
card.set('class', 'card download-card')
header = ElementTree.SubElement(card, 'div')
header.set('class', 'download-card-header')
body = ElementTree.SubElement(card, 'div')
body.set('class', 'download-card-body')
# Add preview image.
# if preview_uri:
# img = ET.SubElement(header, 'img')
# img.set('src', preview_uri)
# Filename link heading.
heading = ElementTree.SubElement(body, 'a')
heading.set('class', 'download-card-title')
heading.set('href', src)
download_icon = ElementTree.SubElement(heading, 'i')
download_icon.set('class', 'fa fa-download')
download_text = ElementTree.SubElement(heading, 'span')
download_text.text = file_basename
# Title element from the "quote marks" part.
body_desc = ElementTree.SubElement(body, 'span')
body_desc.text = card_text
# File size span at the bottom.
body_size = ElementTree.SubElement(body, 'span')
body_size.set('class', 'small text-muted')
body_size.text = f'{_human_size(file_size)}'
return card
@staticmethod
def _is_inject(m):
""" Determine if the ALT text [] part of the link says 'INJECT'. """
alt = m.group(2)
return alt.lower() == 'inject'
def as_raw(self, m):
""" Load the HTML document specified in the link, parse it to HTML elements and return it.
"""
src, parts = self.get_src(m)
# Find the path to the HTML document, relative to the current markdown page.
file_path = os.path.join(self.markdown.page_root, src)
raw_html_string = read_html_for_injection(file_path)
if len(parts) < 2:
parts.append("nothing_one=1||nothing_two=2")
# Helper function.
def _argify(args):
if '=' not in args:
raise ValueError('injection template requires named arguments split by ||')
left, right = args.split('=')
return left.strip(), right.strip()
# Split arg string on double pipe. Joins them to undo automattic splitting from the markdown.
arg_strings = " ".join(parts[1:]).strip('\"').split("||")
# Parse into dictionary of key-value pairs based on the '=' notation.
try:
named_args = dict([_argify(args) for args in arg_strings])
except Exception as e:
raise Exception(f"Error parsing ![INJECT] arguments in {self.markdown.page_file} {repr(e)}")
# Take the template renderer and give it our string, and named args.
# Capture the output as a string.
try:
injectable_templated_str = render_template_string(raw_html_string, **named_args)
except Exception as e:
raise Exception(f"Error rendering ![INJECT] template for file {file_path} {repr(e)}")
# Feed that string to the XML parser.
try:
return ElementTree.fromstring(injectable_templated_str)
except Exception as e:
raise Exception(f"Error parsing ![INJECT] template for file {file_path} {repr(e)}")
@staticmethod
def _is_download(m):
""" Determine if the ALT text [] part of the link says 'DOWNLOAD'. """
alt = m.group(2)
return alt.lower() == 'download'
def handleMatch(self, m):
""" Use the URL extension to render the link. """
src, parts = self.get_src(m)
if self._is_download(m):
return self.as_download(m)
elif self._is_inject(m):
return self.as_raw(m)
youtube = self.youtube_url_validation(src)
if youtube:
return self.as_youtube(m, youtube)
src_lower = src.lower()
if src_lower.endswith(self.SUPPORTED_TABLES):
return self.as_csv(m)
elif src_lower.endswith(self.SUPPORTED_PDF):
return self.as_pdf(m)
elif src_lower.endswith(self.SUPPORTED_VIDEO):
return self.as_video(m)
return self.as_image(m)
class OffsetHashHeaderProcessor(HashHeaderProcessor):
""" Process hash headers with an offset to control the type of heading
DOM element that is generated. """
HEADING_LEVEL_OFFSET = 1
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
before = block[:m.start()]
after = block[m.end():]
if before:
self.parser.parseBlocks(parent, [before])
heading_level = len(m.group('level'))
h = ElementTree.SubElement(parent, 'h%d' % (heading_level + self.HEADING_LEVEL_OFFSET))
h.text = m.group('header').strip()
if after:
blocks.insert(0, after)
class ChecklistPostprocessor(Postprocessor):
"""
Adds checklist class to list element.
Adapted from: `markdown_checklist.extension`
"""
pattern = re.compile(r'<li>\[([ Xx])\]')
def run(self, html):
html = re.sub(self.pattern, self._convert_checkbox, html)
before = '<ul>\n<li><input type="checkbox"'
after = before.replace('<ul>', '<ul class="checklist">')
html = html.replace(before, after)
return html
@staticmethod
def _convert_checkbox(match):
state = match.group(1)
checked = ' checked' if state != ' ' else ''
return '<li><input type="checkbox" disabled%s>' % checked
# Remove the `video`, `iframe`, `aside`, and `table` elements as block elements.
markdown.util.BLOCK_LEVEL_ELEMENTS = re.compile(
r"^(p|div|h[1-6]|blockquote|pre|dl|ol|ul"
r"|script|noscript|form|fieldset|math"
r"|hr|hr/|style|li|dt|dd|thead|tbody"
r"|tr|th|td|section|footer|header|group|figure"
r"|figcaption|article|canvas|output"
r"|progress|nav|main)$",
re.IGNORECASE
)
class MultiExtension(Extension):
""" Markdown `Extension` that adds our new components and
overrides some that we are not using.
"""
def extendMarkdown(self, md, md_globals):
""" Configure markdown by disabling elements and replacing them with
others. """
# Add checklist processing extension based on: 'markdown_checklist.extension'.
md.postprocessors.add('checklist', ChecklistPostprocessor(md), '>raw_html')
# Remove default patterns.
del md.inlinePatterns['image_link']
# Create a new one and insert into pipeline.
multi_purpose_pattern = MultiPurposeLinkPattern(IMAGE_LINK_RE, md)
md.inlinePatterns['multi_purpose_pattern'] = multi_purpose_pattern
# Remove line headers.
del md.parser.blockprocessors['setextheader']
# Swap hash headers for one that can change the DOM h1, h2 level.
md.parser.blockprocessors['hashheader'] = OffsetHashHeaderProcessor(md.parser)
# https://python-markdown.github.io/extensions/
mdextensions = [MultiExtension(),
'markdown.extensions.tables',
'markdown.extensions.meta',
'markdown.extensions.def_list',
'markdown.extensions.headerid',
'markdown.extensions.fenced_code',
'markdown.extensions.attr_list']
def build_meta_cache(root):
""" Recursively search for Markdown files and build a cache of `Meta`
from metadata in the Markdown.
:param root: str: The path to search for files from.
"""
doc_files = glob.iglob(root + '/**/*.md', recursive=True)
def _meta(path):
with open(path, 'r', encoding='utf-8') as f:
md = markdown.Markdown(extensions=mdextensions)
md.page_root = os.path.dirname(path)
Markup(md.convert(f.read()))
return md.Meta if hasattr(md, 'Meta') else None
doc_files_meta = {os.path.relpath(path, start=root): _meta(path) for path in doc_files}
doc_files_meta = {path: value for path, value in doc_files_meta.items() if value is not None}
# If a nav filter is set, exclude relevant documents.
# This takes the comma separated string supplied to `nav_limit`
# and excludes certain documents if they are NOT in this list.
global CMD_ARGS
if CMD_ARGS.nav_limit:
nav_filters = CMD_ARGS.nav_limit.split(',')
nav_filters = [nav_filter.strip().lower() for nav_filter in nav_filters]
nav_filters = [nav_filter for nav_filter in nav_filters if nav_filter]
def _should_include(doc_meta):
nav_strings = [nav.lower() for nav in doc_meta.get('nav', [])]
return any([y.startswith(x) for x in nav_filters for y in nav_strings])
doc_files_meta = {path: value for path, value in doc_files_meta.items() if _should_include(value)}
return doc_files_meta
def build_nav_menu(meta_cache):
""" Given a cache of Markdown `Meta` data, compile a structure that can be
used to generate the NAV menu.
This uses the `nav: Assembly>Bench>Part` variable at the top of the Markdown file.
"""
root = NavItem('root', 0)
# Pre-sort the nav-items alphabetically by nav-string. This will get overridden with the arange()
# function, but this avoids-un arranged items moving round between page refreshes due to Dicts being
# unordered.
sorted_meta_cache = sorted(
meta_cache.items(),
key = lambda items: items[1].get('nav', [''])[0].split('>')[-1] # Sort by the last part of the nav string for each page.
)
for path, meta in sorted_meta_cache:
nav_str = meta.get('nav', [None])[0]
nav_chunks = parse_nav_string(nav_str)
node = root
for name, weight in nav_chunks:
n = NavItem(name, weight)
node = node.add(n)
node.bind(meta=meta, link=path)
root.arrange()
return root
def build_reload_files_list(extra_dirs):
""" Given a list of directories, return a list of files to watch for modification
and subsequent server reload. """
extra_files = extra_dirs[:]
for extra_dir in extra_dirs:
for dirname, dirs, files in os.walk(extra_dir):
for filename in files:
filename = os.path.join(dirname, filename)
if os.path.isfile(filename):
extra_files.append(filename)
return extra_files
def read_html_for_injection(path):
""" Open an HTML file at the given path and return the contents
as a string. If the file does not exist, we raise an exception.
"""
# TODO: In the future, consider adding some caching here. However,
# beware of reloading / refereshing the page UX implications.
with open(path) as file:
return file.read()
def _render_markdown(file_path, **kwargs):
""" Given a `file_path` render the Markdown and return the result of `render_template`.
"""
global NAV_MENU, PROJECT_LOGO, PDF_GENERATION_ENABLED
default_template = 'document'
with open(file_path, 'r', encoding='utf-8') as f:
md = markdown.Markdown(extensions=mdextensions)
md.page_root = os.path.dirname(file_path)
md.page_file = file_path
markup = Markup(md.convert(f.read()))
# Fetch the template defined in the metadata.
template = md.Meta.get('template', None)
template = template[0] if template else default_template
if not template:
raise Exception('no template found for document')
template = f'{template}.html'
# Load any HTML to be injected from the meta-data.
injections = md.Meta.get('inject', [])
injections = [os.path.join(md.page_root, file) for file in injections]
injections = [read_html_for_injection(file) for file in injections]
# Render it out with all the prepared data.
return render_template(template,
content=markup,
nav_menu=NAV_MENU,
project_logo=PROJECT_LOGO,
pdf_enabled=PDF_GENERATION_ENABLED,
injections=injections,
**md.Meta,
**kwargs)
def configure_flask(app, root_dir):
""" Setup the flask application within this scope. """
@app.before_first_request
def build_navigation_cache():
""" Build an in-memory cache of document meta-data.
NOTE: The design choice is made to crash the application if any
of the markdown files cannot be opened and parsed. In the
future when it becomes more stable, this will probably change.
"""
# This is called each time the server restarts.
global NAV_MENU
meta_cache = build_meta_cache(root_dir)
# Build the nav menu data-structure.
NAV_MENU = build_nav_menu(meta_cache)
# Store the reference to the function that rebuilds the navigation cache.
app.build_navigation_cache = build_navigation_cache
@app.template_filter('gravatar')
def gravatar(email, size=100, rating='g', default='retro', use_ssl=False):
""" Return a gravatar link for a given email address. """
url = "https://secure.gravatar.com/avatar/" if use_ssl else "http://www.gravatar.com/avatar/"
email = email.strip().lower().encode('utf-8')
hash_email = hashlib.md5(email).hexdigest()
return f'{url}{hash_email}?s={size}&r={rating}&d={default}'
@app.template_filter()
def url_unquote(url):
""" Removes encoding around a URL. """
return urllib.parse.unquote(url)
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
@app.route("/print_header")
def print_header():
""" Render the template for the header used when printing with WKPDFTOHTML. """
global PROJECT_LOGO
return render_template('print_header.html', project_logo=PROJECT_LOGO)
@app.route("/print_footer")
def print_footer():
""" Render the template for the footer used when printing with WKPDFTOHTML. """
global PROJECT_LOGO
return render_template('print_footer.html', project_logo=PROJECT_LOGO)
@app.errorhandler(404)
def page_not_found(e):
global NAV_MENU, PROJECT_LOGO
return render_template('404.html', nav_menu=NAV_MENU, project_logo=PROJECT_LOGO), 404
@app.route("/w/<path:page>")
def wiki(page):
""" Render the page. """
file_path = os.path.abspath(os.path.join(root_dir, page))
if not os.path.isfile(file_path):
abort(404)
if '.md' in [ext.lower() for ext in os.path.splitext(file_path)]:
return _render_markdown(file_path, current_page=page)
else:
return send_from_directory(os.path.dirname(file_path), os.path.basename(file_path))
@app.route("/")
@app.route("/w/")
def homepage():
return wiki('home.md')
@app.route("/pdf/<path:page>")
def wiki_pdf(page):
file_path = os.path.abspath(os.path.join(root_dir, page))
if not os.path.isfile(file_path):
abort(404)
if '.md' not in [ext.lower() for ext in os.path.splitext(file_path)]:
return send_from_directory(os.path.dirname(file_path), os.path.basename(file_path))
# Configure the different paths.
pdf_temp = f'{tempfile.mktemp()}.pdf'
input_url = url_for('wiki', page=page, _external=True)
header_url = url_for('print_header', _external=True)
footer_url = url_for('print_footer', _external=True)
args = f'{WKHTMLTOPDF_BINARY} --header-html {header_url} --footer-html {footer_url} \
--print-media-type --header-spacing 2 {input_url} {pdf_temp}'
# Invoke WkHTMLtoPDF
result = subprocess.check_output(args, shell=True)
if not result:
pass
# Write the newly generated temp pdf into a response.
with open(pdf_temp, 'rb') as f:
binary_pdf = f.read()
target_file_name = page.replace("/", "_").replace("\\", "_")
response = make_response(binary_pdf)
response.headers['Content-Type'] = 'application/pdf'
# response.headers['Content-Disposition'] = f'attachment; filename={target_file_name}.pdf'
response.headers['Content-Disposition'] = f'inline; filename={target_file_name}.pdf'
# Delete the temp file and return the response.
os.remove(pdf_temp)
return response
def generate_static_pdf(app, root_dir, output_dir, nav_filter=None):
""" Generate a static PDF directory for the documentation in `root_dir`
into `output_dir`.
"""
global PORT_NUMBER
# Find all markdown document paths that are in the nav.
documents = build_meta_cache(root_dir)
markdown_docs_urls = ['pdf/' + file.replace('\\', '/') for file in documents.keys()]
# Generate URl to file pairs.
pairs = [(f'http://localhost:{PORT_NUMBER}/{url}',
f'{os.path.join(output_dir, *os.path.split(url))}.pdf')
for url in markdown_docs_urls]
# Download each pair.
for source, target in pairs:
os.makedirs(os.path.dirname(target), exist_ok=True)
print(f'Source: {source} \n Target: {target}')
urllib.request.urlretrieve(source, target)
# Helper function to return the domain if present.
def is_absolute(url):
""" Returns True if the passed url string is an absolute path.
False if not
"""
links = urlparse(url)
return bool(links.netloc)
def generate_static_html(app, root_dir, output_dir):
""" Generate a static HTML site for the documentation in `root_dir`
into `output_dir`.
"""
from flask_frozen import Freezer, MissingURLGeneratorWarning
import warnings
warnings.filterwarnings("ignore", category=MissingURLGeneratorWarning)
# Update the flask config.
app.config['FREEZER_RELATIVE_URLS'] = True
app.config['FREEZER_IGNORE_MIMETYPE_WARNINGS'] = True
app.config['FREEZER_DESTINATION'] = output_dir
# Create the freezer app. Make it use specific URLs.
freezer = Freezer(app, with_no_argument_rules=False, log_url_for=False)
# Register a generator that passes ALL files in the docs directory into the
# `wiki` flask route.
@freezer.register_generator
def wiki():
all_docs = [file.replace(f'{root_dir}', '/w').replace(f'{os.path.sep}', '/')
for file in glob.iglob(f'{root_dir}/**/*', recursive=True)
if os.path.isfile(file)]
for doc in all_docs:
yield doc
# Save all the URLs using the correct extension and MIME type.
freezer.freeze()
# For each `.md` file in the output directory:
for markdown_file in glob.iglob(f'{output_dir}/**/*.md', recursive=True):
# Rewrite all relative links to other `.md` files to `.html.`
output = ''
with open(markdown_file, 'r', encoding="utf-8") as f:
html = f.read()
def _href_replace(m):
href = m.group()
if is_absolute(href[6:-1]):
return href
return href.replace('.md', '.html')
output = re.sub('href="(.*md)"', _href_replace, html)
# Rename the file from `.md` to HTML.
with open(markdown_file[:-3] + '.html', 'w', encoding="utf-8") as f:
f.write(output)
# Delete the Markdown file.
os.remove(markdown_file)
def load_project_logo(logo_file=None):
""" Attempt to load the project logo from the specified path.
If this fails, return None. If this succeeds, convert it to a data-uri.
"""
if not logo_file:
return None
if not os.path.exists(logo_file):
return None
with open(logo_file, 'rb') as fp:
mime = 'image/png'
data64 = base64.b64encode(fp.read()).decode('utf-8')
preview_uri = u'data:%s;base64,%s' % (mime, data64)
return preview_uri
def check_pdf_generation_cap():
""" Check to see if we can use PDF generation by attempting to use the binary. """
global WKHTMLTOPDF_BINARY
retcode = subprocess.call(f'{WKHTMLTOPDF_BINARY} --version',
shell=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
return retcode == 0
def copy_local_project(force=False):
""" Copy the sample docs and style into the local working directory.
Note: This will overwrite anything currently in those folders.
"""
source_root = os.path.dirname(__file__)
target_root = os.getcwd()
targets = ['docs', 'style', 'logo.png']
pairs = [(os.path.join(source_root, path), os.path.join(target_root, path))
for path in targets]
for source, target in pairs:
if os.path.isdir(source):
if os.path.exists(target):
if force:
print(f'Deleting existing {target} and replacing it with {target}')
shutil.rmtree(target)
shutil.copytree(source, target)
else:
print(f'Warning: {target} already exists.')
else:
print(f'Copying: {source} -> {target}')
shutil.copytree(source, target)
else:
if os.path.exists(target):
if force:
print(f'Deleting existing {target} and replacing it with {target}')
os.remove(target)
shutil.copyfile(source, target)
else:
print(f'Warning: {target} already exists.')
else:
print(f'Copying: {source} -> {target}')
shutil.copyfile(source, target)
def find_references(document_path):
""" Search through the markdown 'document_path' and make a list of referenced files
with paths that are relative to the directory containing the `document_path`.
"""
# Open the file to search.
with open(document_path, 'r', encoding='utf-8') as f:
markdown_raw_data = f.read()
# Render as HTML.
md = markdown.Markdown(extensions=mdextensions)
document_dir = os.path.dirname(document_path)
md.page_root = document_dir
# Interpret with the BeautifulSoup HTML scraping library.
soup = BeautifulSoup(md.convert(markdown_raw_data), 'html.parser')
tags_to_search = {
'img': 'src',
'a': 'href',
'video': 'src',
'table': 'source',
'embed': 'src',
}
# For each entry in the `tags_to_search` table, extract the tag attribute value.
references = set()
for k, v in tags_to_search.items():
for tag in soup.find_all(k):
val = tag.get(v)
if val:
references.add(val)
# Normalise the referenced assets (to take into account relative paths).
references = [os.path.join(document_dir, urllib.request.url2pathname(ref)) for ref in references]
# Make unique.
return set(references)
def has_nav(markdown_text):
""" Returns True if the passed string of text contains navbar metadata.
Returns False if it does not.
"""
expression = re.compile(r'(?=\n|)nav:\s+\w+(?=\n |)')
return True if expression.search(markdown_text) else False
def find_orphans(files):
""" Searches all files and folders recursively in the given path for image and video assets
that are unused by markdown files.
"""
# Find all references in
pages = {}
for file in files:
if file.endswith('.md'):
pages[file] = find_references(file)
# Remove the markdown documents that have a navbar metadata.
md_with_nav = []
for file in files:
if file.endswith('.md'):
with open(file, encoding='utf-8') as f:
if has_nav(f.read().lower()):
md_with_nav.append(file)
files = [x for x in files if x not in md_with_nav]
# Create a flat list of all references in the markdown files
all_references = []
for i in pages.values():
all_references += [k for k in i]
# Output unused assets
return [i for i in files if i not in all_references]
class DocumentLinks:
""" A helper class to process the `<a href.../>` links from a single
markdown document that is rendered using our own renderer.
"""
def __init__(self, md_file):
""" Open a Markdown document and find all links in `<a href .../>`.
"""
# Store important information about this document.
self.md_file = md_file
self.md_dir = os.path.dirname(md_file)
# Read in Markdown and generate HTML with our parser.
with open(md_file, 'r', encoding='utf-8') as f:
markdown_raw_data = f.read()
md = markdown.Markdown(extensions=mdextensions)
md.page_root = self.md_dir
html = md.convert(markdown_raw_data)
# Interpret with the BeautifulSoup HTML scraping library.
soup = BeautifulSoup(html, 'html.parser')
tags_to_search = {
'img': 'src',
'a': 'href',
'video': 'src',
'table': 'source',
'embed': 'src',
}
self.references = set()
for k, v in tags_to_search.items():
links = soup.find_all(k)
for link in links:
if link.get('href'):
if link.get('href').find('http:') > -1 or link.get('href').find('https:') > -1:
val = link.get(v)
if val:
self.references.add(val)
else:
val = link.get(v)
if val:
self.references.add(val)
@property
def web_links(self):
""" Generate a list of web links from our cached links.
"""
return [link for link in self.references if is_absolute(link)]
@property
def relative_links(self):
""" Generate a list of relative file system links from our cached links.
This converts from a web path to a path on disk then normalises the path to the current directory.
"""
def _norm(path):
return os.path.join(self.md_dir, urllib.request.url2pathname(path))
return [_norm(link) for link in self.references if not is_absolute(link)]
@staticmethod
def validate_url(address):
""" Returns `True` if page at address returns with status code 200 (ok) otherwise returns `False`.
"""
try:
request = requests.head(address)
return request.status_code, address
except requests.exceptions.RequestException:
return False, address
def detect_broken_links(self, process_pool):
""" Go through all the `web_links` and the `relative_links` and report
which are broken (i.e. do not resolve to HTTP200OK or a file on disk).
"""
result = process_pool.map(self.validate_url, self.web_links)
for response, url in result:
if not response == 200:
yield url + ' Status: ' + (responses[response] if response is int else "Exception")
for file in self.relative_links:
if not os.path.exists(file):
yield file
def generate_metadata(path):
""" Add relevant metadata to the top of the markdown file at the passed path.
Title is drawn from the filename, Date from the last modified timestamp, Version defaults at 1.0.0,
Nav is generated from the filepath, and Authors are generated from the git contributors (if applicable) and
are otherwise left blank.
Warning: Does not check if there is existing metadata.
"""
s = subprocess.getoutput(f"git log -p {path}")
lines = s.split(os.linesep)
authors = set([re.search(r'<(.*)>', line).group(1)for line in lines if 'Author:' in line])
file_status = os.stat(path)
nav_path = os.path.sep.join(path.split(os.path.sep)[1:])
metadata = {
'title': ' '.join(
path
.split('.')[0]
.split(os.path.sep)[-1]
.replace('_', ' ')
.replace('-', ' ')
.title()
.split()
),
'desc': '',
'date': datetime.datetime.utcfromtimestamp(file_status.st_mtime).strftime('%Y/%m/%d'),
'version': '1.0.0',
'template': '',
'nav': nav_path.replace(os.path.sep, '>').title().split('.')[0],
'percent': '100',
'authors': ' '.join(authors),
}
result = ""
for key in metadata.keys():
result += ('{}:{}{}\n'.format(key, '\t' if len(key) > 6 else '\t\t', metadata[key]))
with open(path, 'r+', encoding='utf-8') as f:
content = f.read()
f.seek(0, 0)
f.write(result)
f.write(content)
class ReloadHandler(PatternMatchingEventHandler):
""" Rebuild the document metadata / navigation cache when markdown files are updated
in the documents directory. """
def __init__(self, app):
super(ReloadHandler, self).__init__(patterns=['*.md'], ignore_directories=False, case_sensitive=False)
self.flask_app = app
def on_any_event(self, event):
self.flask_app.build_navigation_cache()
global CMD_ARGS, NAV_MENU, PROJECT_LOGO, WKHTMLTOPDF_BINARY, PDF_GENERATION_ENABLED, PORT_NUMBER
CMD_ARGS = None
NAV_MENU = {}
PROJECT_LOGO = None
WKHTMLTOPDF_BINARY = None
PDF_GENERATION_ENABLED = False
def main():
""" Application entrypoint. """
global PORT_NUMBER
PORT_NUMBER = 5000
# Parse the command line arguments.
parser = argparse.ArgumentParser(description='docnado: Lightweight tool for rendering \
Markdown documentation with different templates.')
parser.add_argument('--html', action='store', dest='html_output_dir',
help='Generate a static site from the server and output to the \
specified directory.')
parser.add_argument('--pdf', action='store', dest='pdf_output_dir',
help='Generate static PDFs from the server and output to the \
specified directory.')
parser.add_argument('--nav-limit', action='store', dest='nav_limit',
default=None,
help='Include certain document trees only based on a comma separated \
list of nav strings. e.g. Tooling,Document')
parser.add_argument('--new', action="store_true", dest='new_project',
default=False,
help='Copy the `docs` and `styles` folder into the working directory \
and output a config file that addresses them. Does not overwrite existing files.')
parser.add_argument('--new-force', action="store_true", dest='new_project_force',
default=False,
help='Copy the `docs` and `styles` folder into the working directory \
and output a config file that addresses them. Force deletion of existing files.')
parser.add_argument('--dirs', action="store_true", dest='show_dirs',
default=False,
help='Display the different directories the software is using \
to search for documentation and styles.')
parser.add_argument('--generate-meta', action="store", dest='generate_meta',
default=False,
help='Generate metadata for markdown files in the specified directory.')
parser.add_argument('--find-orphans', action="store_true", dest='find_orphans',
default=False,
help='Identify unused media assets (orphans)')
parser.add_argument('--find-broken-links', action="store_true", dest='find_broken_links',
default=False,
help='Identify broken external links.')
parser.add_argument('--port', action="store", dest='new_port_number',
default=False,
help='Specify a port for the docnado server')
parser.add_argument('--host', action="store", dest='set_host',
default=False,
help='Set the docnado development server to listen on IP addresses.')
# Import the command line args and make them application global.
global CMD_ARGS
args = parser.parse_args()
CMD_ARGS = args
# Load config from the environment and validate it.
global PROJECT_LOGO, PDF_GENERATION_ENABLED, NAV_MENU, WKHTMLTOPDF_BINARY
TRUE = 'TRUE'
FALSE = 'FALSE'
flask_debug = os.environ.get('DN_FLASK_DEBUG', FALSE) == TRUE
watch_changes = os.environ.get('DN_RELOAD_ON_CHANGES', TRUE) == TRUE
WKHTMLTOPDF_BINARY = ('wkhtmltopdf_0.12.5.exe' if platform.system() == 'Windows' else 'wkhtmltopdf')
PDF_GENERATION_ENABLED = check_pdf_generation_cap()
dir_documents = os.environ.get('DN_DOCS_DIR', os.path.join(os.getcwd(), 'docs'))
dir_style = os.environ.get('DN_STYLE_DIR', os.path.join(os.getcwd(), 'style'))
logo_location = os.environ.get('DN_PROJECT_LOGO', os.path.join(os.getcwd(), 'logo.png'))
# If `style` folder does not exist, use the one in site-packages.
if not os.path.exists(dir_style) and not os.path.isdir(dir_style):
dir_style = os.path.join(os.path.dirname(__file__), 'style')
# Attempt to load the project logo into a base64 data uri.
PROJECT_LOGO = load_project_logo(logo_location)
# Compute the static and template directories.
dir_static = os.path.join(dir_style, 'static')
dir_templates = os.path.join(dir_style, 'templates')
# If the user is asking to create a new project.
if args.new_project:
copy_local_project()
sys.exit()
if args.new_project_force:
copy_local_project(force=True)
return 0
if args.new_port_number:
PORT_NUMBER = int(args.new_port_number)
if args.generate_meta:
doc_files = glob.iglob(args.generate_meta + '/**/*.md', recursive=True)
for i in doc_files:
generate_metadata(i)
return 0
if args.find_orphans:
# Find all the assets in the directory/subdirectories recursively and append their file path to a list.
files = glob.glob((dir_documents + '/**/*.*'), recursive=True)
files = [f for f in files if not os.path.isdir(f)]
orphans = find_orphans(files)
if orphans:
print(f'{len(orphans)} Unused assets (orphans):\n\t' + '\n\t'.join(orphans))
return -1
return 0
if args.find_broken_links:
process_pool = Pool(processes=10)
md_files = glob.glob((dir_documents + '/**/*.md'), recursive=True)
md_reports = tuple((md, list(DocumentLinks(md).detect_broken_links(process_pool))) for md in md_files)
num_broken = 0
for file, report in md_reports:
if report:
num_broken += len(report)
print(f'{file}\n\t' + '\n\t'.join(report))
return -1 if num_broken else 0
if args.show_dirs:
print('The following directories are being used: ')
print('\t', f'Documents -> {dir_documents}')
print('\t', f'Logo -> {logo_location}')
print('\t', f'Style -> {dir_style}')
print('\t', f' Static -> {dir_static}')
print('\t', f' Templates -> {dir_templates}')
sys.exit()
if not os.path.exists(dir_documents) and not os.path.isdir(dir_documents):
print(f'Error: Documents directory "{dir_documents}" does not exist. \
Create one called `docs` and fill it with your documentation.', file=sys.stderr)
sys.exit(-1)
if not os.path.exists(dir_static) and not os.path.isdir(dir_static):
print(f'Error: Static directory "{dir_static}" does not exist.', file=sys.stderr)
sys.exit(-1)
if not os.path.exists(dir_templates) and not os.path.isdir(dir_templates):
print(f'Error: Templates directory "{dir_templates}" does not exist.', file=sys.stderr)
sys.exit(-1)
# Create the server.
app = Flask(__name__,
static_url_path='',
template_folder=dir_templates,
static_folder=dir_static)
# Attach routes and filters.
configure_flask(app, dir_documents)
# Output PDF files.
if args.pdf_output_dir:
if not check_pdf_generation_cap():
print(f'Error: PDF generation requires WkHTMLtoPDF.', file=sys.stderr)
sys.exit(-1)
def gen_pdfs():
time.sleep(2)
generate_static_pdf(
app, dir_documents, os.path.join(os.getcwd(), args.pdf_output_dir)
)
time.sleep(5)
os.kill(os.getpid(), signal.SIGTERM)
t1 = threading.Thread(target=gen_pdfs)
t1.start()
app.run(debug=flask_debug, threaded=True, port=PORT_NUMBER)
sys.exit()
# Output a static site.
if args.html_output_dir:
PDF_GENERATION_ENABLED = False
try:
generate_static_html(app, dir_documents, os.path.join(os.getcwd(), args.html_output_dir))
index_html = """ <!DOCTYPE html>
<html>
<head>
<meta http-equiv="refresh" content="0; url=./w/">
</head>
<body>
</body>
</html>"""
with open(os.path.join(os.getcwd(), args.html_output_dir, 'index.html'), 'w') as f:
f.write(index_html)
except Exception:
traceback.print_exc(file=sys.stderr)
sys.exit(-1)
sys.exit()
# Watch for any changes in the docs or style directories.
dn_watch_files = []
observer = None
if watch_changes:
observer = Observer()
observer.schedule(ReloadHandler(app), path=dir_documents, recursive=True)
observer.start()
dn_watch_files = build_reload_files_list([__name__, dir_style])
# Run the server.
if args.set_host:
try:
print('Attempting set sevelopment server listen on public IP address: ' + args.set_host)
print('WARNING: The Docnado development environment is intended to be used as a development tool ONLY, '
'and is not recommended for use in a production environment.')
app.run(debug=flask_debug, port=PORT_NUMBER, extra_files=dn_watch_files, host=args.set_host)
except OSError as e:
print(e)
print(f'Error initialising server.')
except KeyboardInterrupt:
pass
finally:
if observer:
observer.stop()
observer.join()
else:
try:
app.run(debug=flask_debug, port=PORT_NUMBER, extra_files=dn_watch_files)
except OSError as e:
print(e)
print(f'Error initialising server.')
except KeyboardInterrupt:
pass
finally:
if observer:
observer.stop()
observer.join()
# if running brainerd directly, boot the app
if __name__ == "__main__":
main()
| en | 0.751834 | docnado.py A rapid documentation tool that will blow you away. Embed image, video, youtube, csv or file download links by extending the typical image tag pattern. #  or  If the link has "DOWNLOAD" in the alt text, treat it as a download. Otherwise, see if its a YouTube video. Otherwise, see if its a csv that can be turned into a table, otherwise if the link cannot be parsed as a video, it will always be treated as an image. Get the source and parts from the matched groups: src, parts Given a YouTube URL, return the ID component. https://stackoverflow.com/questions/4705996 Return a DOM element that embeds a YouTube video. Return a DOM element that embeds a PDF document using an embed. # width * 1.4142 (aspect ratio of a4) Return a video element Return an image element # Set the title if present. # Set the attributes on the element, if enabled. # Set the 'alt' attribute with whatever is left from `handleAttributes`. Create card layers used to make a download button. # Returns a human readable string representation of bytes # Get information required for card. # If its a pptx, extract the thumbnail previews. # NOTE: This works, but is is removed until we support other # file types, which for now is not a priority. # preview_uri = None # import zipfile # if (file_path.endswith('pptx')): # with zipfile.ZipFile(file_path) as zipper: # with zipper.open('docProps/thumbnail.jpeg', 'r') as fp: # mime = 'image/jpeg' # data64 = base64.b64encode(fp.read()).decode('utf-8') # preview_uri = u'data:%s;base64,%s' % (mime, data64) # Card and structure. # Add preview image. # if preview_uri: # img = ET.SubElement(header, 'img') # img.set('src', preview_uri) # Filename link heading. # Title element from the "quote marks" part. # File size span at the bottom. Determine if the ALT text [] part of the link says 'INJECT'. Load the HTML document specified in the link, parse it to HTML elements and return it. # Find the path to the HTML document, relative to the current markdown page. # Helper function. # Split arg string on double pipe. Joins them to undo automattic splitting from the markdown. # Parse into dictionary of key-value pairs based on the '=' notation. # Take the template renderer and give it our string, and named args. # Capture the output as a string. # Feed that string to the XML parser. Determine if the ALT text [] part of the link says 'DOWNLOAD'. Use the URL extension to render the link. Process hash headers with an offset to control the type of heading DOM element that is generated. Adds checklist class to list element. Adapted from: `markdown_checklist.extension` # Remove the `video`, `iframe`, `aside`, and `table` elements as block elements. Markdown `Extension` that adds our new components and overrides some that we are not using. Configure markdown by disabling elements and replacing them with others. # Add checklist processing extension based on: 'markdown_checklist.extension'. # Remove default patterns. # Create a new one and insert into pipeline. # Remove line headers. # Swap hash headers for one that can change the DOM h1, h2 level. # https://python-markdown.github.io/extensions/ Recursively search for Markdown files and build a cache of `Meta` from metadata in the Markdown. :param root: str: The path to search for files from. # If a nav filter is set, exclude relevant documents. # This takes the comma separated string supplied to `nav_limit` # and excludes certain documents if they are NOT in this list. Given a cache of Markdown `Meta` data, compile a structure that can be used to generate the NAV menu. This uses the `nav: Assembly>Bench>Part` variable at the top of the Markdown file. # Pre-sort the nav-items alphabetically by nav-string. This will get overridden with the arange() # function, but this avoids-un arranged items moving round between page refreshes due to Dicts being # unordered. # Sort by the last part of the nav string for each page. Given a list of directories, return a list of files to watch for modification and subsequent server reload. Open an HTML file at the given path and return the contents as a string. If the file does not exist, we raise an exception. # TODO: In the future, consider adding some caching here. However, # beware of reloading / refereshing the page UX implications. Given a `file_path` render the Markdown and return the result of `render_template`. # Fetch the template defined in the metadata. # Load any HTML to be injected from the meta-data. # Render it out with all the prepared data. Setup the flask application within this scope. Build an in-memory cache of document meta-data. NOTE: The design choice is made to crash the application if any of the markdown files cannot be opened and parsed. In the future when it becomes more stable, this will probably change. # This is called each time the server restarts. # Build the nav menu data-structure. # Store the reference to the function that rebuilds the navigation cache. Return a gravatar link for a given email address. Removes encoding around a URL. Render the template for the header used when printing with WKPDFTOHTML. Render the template for the footer used when printing with WKPDFTOHTML. Render the page. # Configure the different paths. # Invoke WkHTMLtoPDF # Write the newly generated temp pdf into a response. # response.headers['Content-Disposition'] = f'attachment; filename={target_file_name}.pdf' # Delete the temp file and return the response. Generate a static PDF directory for the documentation in `root_dir` into `output_dir`. # Find all markdown document paths that are in the nav. # Generate URl to file pairs. # Download each pair. # Helper function to return the domain if present. Returns True if the passed url string is an absolute path. False if not Generate a static HTML site for the documentation in `root_dir` into `output_dir`. # Update the flask config. # Create the freezer app. Make it use specific URLs. # Register a generator that passes ALL files in the docs directory into the # `wiki` flask route. # Save all the URLs using the correct extension and MIME type. # For each `.md` file in the output directory: # Rewrite all relative links to other `.md` files to `.html.` # Rename the file from `.md` to HTML. # Delete the Markdown file. Attempt to load the project logo from the specified path. If this fails, return None. If this succeeds, convert it to a data-uri. Check to see if we can use PDF generation by attempting to use the binary. Copy the sample docs and style into the local working directory. Note: This will overwrite anything currently in those folders. Search through the markdown 'document_path' and make a list of referenced files with paths that are relative to the directory containing the `document_path`. # Open the file to search. # Render as HTML. # Interpret with the BeautifulSoup HTML scraping library. # For each entry in the `tags_to_search` table, extract the tag attribute value. # Normalise the referenced assets (to take into account relative paths). # Make unique. Returns True if the passed string of text contains navbar metadata. Returns False if it does not. Searches all files and folders recursively in the given path for image and video assets that are unused by markdown files. # Find all references in # Remove the markdown documents that have a navbar metadata. # Create a flat list of all references in the markdown files # Output unused assets A helper class to process the `<a href.../>` links from a single markdown document that is rendered using our own renderer. Open a Markdown document and find all links in `<a href .../>`. # Store important information about this document. # Read in Markdown and generate HTML with our parser. # Interpret with the BeautifulSoup HTML scraping library. Generate a list of web links from our cached links. Generate a list of relative file system links from our cached links. This converts from a web path to a path on disk then normalises the path to the current directory. Returns `True` if page at address returns with status code 200 (ok) otherwise returns `False`. Go through all the `web_links` and the `relative_links` and report which are broken (i.e. do not resolve to HTTP200OK or a file on disk). Add relevant metadata to the top of the markdown file at the passed path. Title is drawn from the filename, Date from the last modified timestamp, Version defaults at 1.0.0, Nav is generated from the filepath, and Authors are generated from the git contributors (if applicable) and are otherwise left blank. Warning: Does not check if there is existing metadata. Rebuild the document metadata / navigation cache when markdown files are updated in the documents directory. Application entrypoint. # Parse the command line arguments. # Import the command line args and make them application global. # Load config from the environment and validate it. # If `style` folder does not exist, use the one in site-packages. # Attempt to load the project logo into a base64 data uri. # Compute the static and template directories. # If the user is asking to create a new project. # Find all the assets in the directory/subdirectories recursively and append their file path to a list. # Create the server. # Attach routes and filters. # Output PDF files. # Output a static site. <!DOCTYPE html> <html> <head> <meta http-equiv="refresh" content="0; url=./w/"> </head> <body> </body> </html> # Watch for any changes in the docs or style directories. # Run the server. # if running brainerd directly, boot the app | 2.641512 | 3 |
modulo-3/aulas/modulos e pacotes/uteis.py | Luis-Felipe-N/curso-em-video-python | 0 | 9596 | <filename>modulo-3/aulas/modulos e pacotes/uteis.py
def fatorial(n):
f = 1
while n != 0:
f *= n
n -= 1
return f
def dobro(n):
n *= 2
return n
def triplo(n):
n *= 3
return n
| <filename>modulo-3/aulas/modulos e pacotes/uteis.py
def fatorial(n):
f = 1
while n != 0:
f *= n
n -= 1
return f
def dobro(n):
n *= 2
return n
def triplo(n):
n *= 3
return n
| none | 1 | 3.248291 | 3 |
|
server/src/oscarbluelight/tests/offer/test_benefit_percentage.py | MaximBrewer/sebe | 8 | 9597 | <reponame>MaximBrewer/sebe
from decimal import Decimal as D
from django.core import exceptions
from django.test import TestCase
from oscar.test import factories
from oscar.test.basket import add_product, add_products
from django_redis import get_redis_connection
from oscarbluelight.offer.models import (
Range,
Benefit,
BluelightCountCondition,
BluelightValueCondition,
BluelightPercentageDiscountBenefit,
)
from unittest import mock
class TestAPercentageDiscountAppliedWithCountCondition(TestCase):
def setUp(self):
# Flush the cache
conn = get_redis_connection("redis")
conn.flushall()
range = Range.objects.create(name="All products", includes_all_products=True)
self.condition = BluelightCountCondition(
range=range,
proxy_class="oscarbluelight.offer.conditions.BluelightCountCondition",
value=2,
)
self.benefit = BluelightPercentageDiscountBenefit(
range=range,
proxy_class="oscarbluelight.offer.benefits.BluelightPercentageDiscountBenefit",
value=20,
)
self.offer = mock.Mock()
self.basket = factories.create_basket(empty=True)
def test_applies_correctly_to_empty_basket(self):
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D("0.00"), result.discount)
self.assertEqual(0, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_with_no_discountable_products(self):
product = factories.create_product(is_discountable=False)
add_product(self.basket, D("12.00"), 2, product=product)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D("0.00"), result.discount)
self.assertEqual(0, self.basket.num_items_with_discount)
self.assertEqual(2, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_matches_condition(self):
add_product(self.basket, D("12.00"), 2)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(2 * D("12.00") * D("0.2"), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_exceeds_condition(self):
add_product(self.basket, D("12.00"), 3)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(3 * D("12.00") * D("0.2"), result.discount)
self.assertEqual(3, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_obeys_max_discount_setting(self):
self.benefit.max_discount = D("5.00")
self.benefit.save()
add_product(self.basket, D("12.00"), 3)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D("5.00"), result.discount)
self.assertEqual(3, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_records_reason_for_discount_no_voucher(self):
self.offer.name = "My Offer Name"
self.offer.description = "My Offer Description"
self.offer.get_voucher = mock.Mock()
self.offer.get_voucher.return_value = None
add_product(self.basket, D("5.00"))
# Apply benefit twice to simulate how Applicator will actually do it
self.benefit.apply(self.basket, self.condition, self.offer)
self.benefit.apply(self.basket, self.condition, self.offer)
line = self.basket.all_lines()[0]
descrs = line.get_discount_descriptions()
self.assertEqual(len(descrs), 1)
self.assertEqual(descrs[0].amount, D("1.00"))
self.assertEqual(descrs[0].offer_name, "My Offer Name")
self.assertEqual(descrs[0].offer_description, "My Offer Description")
self.assertIsNone(descrs[0].voucher_name)
self.assertIsNone(descrs[0].voucher_code)
def test_records_reason_for_discount_with_voucher(self):
voucher = mock.Mock()
voucher.name = "My Voucher"
voucher.code = "SWEETDEAL"
self.offer.name = "Offer for Voucher"
self.offer.description = ""
self.offer.get_voucher = mock.Mock()
self.offer.get_voucher.return_value = voucher
add_product(self.basket, D("5.00"))
# Apply benefit twice to simulate how Applicator will actually do it
self.benefit.apply(self.basket, self.condition, self.offer)
self.benefit.apply(self.basket, self.condition, self.offer)
line = self.basket.all_lines()[0]
descrs = line.get_discount_descriptions()
self.assertEqual(len(descrs), 1)
self.assertEqual(descrs[0].amount, D("1.00"))
self.assertEqual(descrs[0].offer_name, "Offer for Voucher")
self.assertEqual(descrs[0].offer_description, "")
self.assertEqual(descrs[0].voucher_name, "My Voucher")
self.assertEqual(descrs[0].voucher_code, "SWEETDEAL")
class TestAPercentageDiscountWithMaxItemsSetAppliedWithCountCondition(TestCase):
def setUp(self):
# Flush the cache
conn = get_redis_connection("redis")
conn.flushall()
range = Range.objects.create(name="All products", includes_all_products=True)
self.condition = BluelightCountCondition(
range=range,
proxy_class="oscarbluelight.offer.conditions.BluelightCountCondition",
value=2,
)
self.benefit = BluelightPercentageDiscountBenefit(
range=range,
proxy_class="oscarbluelight.offer.benefits.BluelightPercentageDiscountBenefit",
value=20,
max_affected_items=1,
)
self.offer = mock.Mock()
self.basket = factories.create_basket(empty=True)
def test_applies_correctly_to_empty_basket(self):
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D("0.00"), result.discount)
self.assertEqual(0, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_matches_condition(self):
add_product(self.basket, D("12.00"), 2)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(1 * D("12.00") * D("0.2"), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_exceeds_condition(self):
add_products(self.basket, [(D("12.00"), 2), (D("20.00"), 2)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(1 * D("12.00") * D("0.2"), result.discount)
# Should only consume the condition products
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(2, self.basket.num_items_without_discount)
class TestAPercentageDiscountWithMultipleApplicationsWithCountCondition(TestCase):
def setUp(self):
# Flush the cache
conn = get_redis_connection("redis")
conn.flushall()
self.range_mattresses = Range.objects.create(name="Mattresses")
self.range_slippers = Range.objects.create(name="Slippers")
self.mattress = factories.create_product(title="Mattress", price=D("2999.00"))
self.slipper1 = factories.create_product(title="Slipper", price=D("78.00"))
self.slipper2 = factories.create_product(title="Slipper", price=D("79.00"))
self.range_mattresses.add_product(self.mattress)
self.range_slippers.add_product(self.slipper1)
self.range_slippers.add_product(self.slipper2)
self.condition = BluelightCountCondition.objects.create(
range=self.range_mattresses,
proxy_class="oscarbluelight.offer.conditions.BluelightCountCondition",
value=1,
)
self.benefit = BluelightPercentageDiscountBenefit.objects.create(
range=self.range_slippers,
proxy_class="oscarbluelight.offer.benefits.BluelightPercentageDiscountBenefit",
value=D("100.00"),
max_affected_items=1,
)
self.offer = mock.Mock()
self.basket = factories.create_basket(empty=True)
def test_applies_correctly_to_basket_which_matches_multiple_lines_multiple_times(
self,
):
# Add two different lines to the basket
self.basket.add_product(self.mattress, 2)
self.basket.add_product(self.slipper1, 1)
self.basket.add_product(self.slipper2, 1)
# Apply once
self.assertTrue(self.condition.proxy().is_satisfied(self.offer, self.basket))
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertTrue(result.is_successful)
self.assertEqual(result.discount, D("78.00"))
self.assertEqual(self.basket.num_items_with_discount, 2)
self.assertEqual(self.basket.num_items_without_discount, 2)
# Apply second time
self.assertTrue(self.condition.proxy().is_satisfied(self.offer, self.basket))
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertTrue(result.is_successful)
self.assertEqual(result.discount, D("79.00"))
self.assertEqual(self.basket.num_items_with_discount, 4)
self.assertEqual(self.basket.num_items_without_discount, 0)
# Can't apply a third time because the condition is no longer satisfied
self.assertFalse(self.condition.proxy().is_satisfied(self.offer, self.basket))
class TestAPercentageDiscountAppliedWithValueCondition(TestCase):
def setUp(self):
# Flush the cache
conn = get_redis_connection("redis")
conn.flushall()
range = Range.objects.create(name="All products", includes_all_products=True)
self.condition = BluelightValueCondition.objects.create(
range=range,
proxy_class="oscarbluelight.offer.conditions.BluelightValueCondition",
value=D("10.00"),
)
self.benefit = BluelightPercentageDiscountBenefit.objects.create(
range=range,
proxy_class="oscarbluelight.offer.benefits.BluelightPercentageDiscountBenefit",
value=20,
)
self.offer = mock.Mock()
self.basket = factories.create_basket(empty=True)
def test_applies_correctly_to_empty_basket(self):
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D("0.00"), result.discount)
self.assertEqual(0, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_matches_condition(self):
add_product(self.basket, D("5.00"), 2)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(2 * D("5.00") * D("0.2"), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_exceeds_condition_but_matches_on_boundary(
self,
):
add_product(self.basket, D("5.00"), 3)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(3 * D("5.00") * D("0.2"), result.discount)
self.assertEqual(3, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_exceeds_condition(self):
add_product(self.basket, D("4.00"), 3)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(3 * D("4.00") * D("0.2"), result.discount)
self.assertEqual(3, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
class TestAPercentageDiscountWithMaxItemsSetAppliedWithValueCondition(TestCase):
def setUp(self):
# Flush the cache
conn = get_redis_connection("redis")
conn.flushall()
range = Range.objects.create(name="All products", includes_all_products=True)
self.condition = BluelightValueCondition.objects.create(
range=range,
proxy_class="oscarbluelight.offer.conditions.BluelightValueCondition",
value=D("10.00"),
)
self.benefit = BluelightPercentageDiscountBenefit.objects.create(
range=range,
proxy_class="oscarbluelight.offer.benefits.BluelightPercentageDiscountBenefit",
value=20,
max_affected_items=1,
)
self.offer = mock.Mock()
self.basket = factories.create_basket(empty=True)
def test_applies_correctly_to_empty_basket(self):
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D("0.00"), result.discount)
self.assertEqual(0, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_matches_condition(self):
add_product(self.basket, D("5.00"), 2)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(1 * D("5.00") * D("0.2"), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_exceeds_condition_but_matches_on_boundary(
self,
):
add_product(self.basket, D("5.00"), 3)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(1 * D("5.00") * D("0.2"), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(1, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_exceeds_condition(self):
add_product(self.basket, D("4.00"), 3)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(1 * D("4.00") * D("0.2"), result.discount)
self.assertEqual(3, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
class TestAPercentageDiscountBenefit(TestCase):
def setUp(self):
# Flush the cache
conn = get_redis_connection("redis")
conn.flushall()
def test_requires_a_benefit_value(self):
rng = Range.objects.create(name="", includes_all_products=True)
benefit = Benefit.objects.create(
range=rng,
proxy_class="oscarbluelight.offer.benefits.BluelightPercentageDiscountBenefit",
)
with self.assertRaises(exceptions.ValidationError):
benefit.clean()
| from decimal import Decimal as D
from django.core import exceptions
from django.test import TestCase
from oscar.test import factories
from oscar.test.basket import add_product, add_products
from django_redis import get_redis_connection
from oscarbluelight.offer.models import (
Range,
Benefit,
BluelightCountCondition,
BluelightValueCondition,
BluelightPercentageDiscountBenefit,
)
from unittest import mock
class TestAPercentageDiscountAppliedWithCountCondition(TestCase):
def setUp(self):
# Flush the cache
conn = get_redis_connection("redis")
conn.flushall()
range = Range.objects.create(name="All products", includes_all_products=True)
self.condition = BluelightCountCondition(
range=range,
proxy_class="oscarbluelight.offer.conditions.BluelightCountCondition",
value=2,
)
self.benefit = BluelightPercentageDiscountBenefit(
range=range,
proxy_class="oscarbluelight.offer.benefits.BluelightPercentageDiscountBenefit",
value=20,
)
self.offer = mock.Mock()
self.basket = factories.create_basket(empty=True)
def test_applies_correctly_to_empty_basket(self):
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D("0.00"), result.discount)
self.assertEqual(0, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_with_no_discountable_products(self):
product = factories.create_product(is_discountable=False)
add_product(self.basket, D("12.00"), 2, product=product)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D("0.00"), result.discount)
self.assertEqual(0, self.basket.num_items_with_discount)
self.assertEqual(2, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_matches_condition(self):
add_product(self.basket, D("12.00"), 2)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(2 * D("12.00") * D("0.2"), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_exceeds_condition(self):
add_product(self.basket, D("12.00"), 3)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(3 * D("12.00") * D("0.2"), result.discount)
self.assertEqual(3, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_obeys_max_discount_setting(self):
self.benefit.max_discount = D("5.00")
self.benefit.save()
add_product(self.basket, D("12.00"), 3)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D("5.00"), result.discount)
self.assertEqual(3, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_records_reason_for_discount_no_voucher(self):
self.offer.name = "My Offer Name"
self.offer.description = "My Offer Description"
self.offer.get_voucher = mock.Mock()
self.offer.get_voucher.return_value = None
add_product(self.basket, D("5.00"))
# Apply benefit twice to simulate how Applicator will actually do it
self.benefit.apply(self.basket, self.condition, self.offer)
self.benefit.apply(self.basket, self.condition, self.offer)
line = self.basket.all_lines()[0]
descrs = line.get_discount_descriptions()
self.assertEqual(len(descrs), 1)
self.assertEqual(descrs[0].amount, D("1.00"))
self.assertEqual(descrs[0].offer_name, "My Offer Name")
self.assertEqual(descrs[0].offer_description, "My Offer Description")
self.assertIsNone(descrs[0].voucher_name)
self.assertIsNone(descrs[0].voucher_code)
def test_records_reason_for_discount_with_voucher(self):
voucher = mock.Mock()
voucher.name = "My Voucher"
voucher.code = "SWEETDEAL"
self.offer.name = "Offer for Voucher"
self.offer.description = ""
self.offer.get_voucher = mock.Mock()
self.offer.get_voucher.return_value = voucher
add_product(self.basket, D("5.00"))
# Apply benefit twice to simulate how Applicator will actually do it
self.benefit.apply(self.basket, self.condition, self.offer)
self.benefit.apply(self.basket, self.condition, self.offer)
line = self.basket.all_lines()[0]
descrs = line.get_discount_descriptions()
self.assertEqual(len(descrs), 1)
self.assertEqual(descrs[0].amount, D("1.00"))
self.assertEqual(descrs[0].offer_name, "Offer for Voucher")
self.assertEqual(descrs[0].offer_description, "")
self.assertEqual(descrs[0].voucher_name, "My Voucher")
self.assertEqual(descrs[0].voucher_code, "SWEETDEAL")
class TestAPercentageDiscountWithMaxItemsSetAppliedWithCountCondition(TestCase):
def setUp(self):
# Flush the cache
conn = get_redis_connection("redis")
conn.flushall()
range = Range.objects.create(name="All products", includes_all_products=True)
self.condition = BluelightCountCondition(
range=range,
proxy_class="oscarbluelight.offer.conditions.BluelightCountCondition",
value=2,
)
self.benefit = BluelightPercentageDiscountBenefit(
range=range,
proxy_class="oscarbluelight.offer.benefits.BluelightPercentageDiscountBenefit",
value=20,
max_affected_items=1,
)
self.offer = mock.Mock()
self.basket = factories.create_basket(empty=True)
def test_applies_correctly_to_empty_basket(self):
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D("0.00"), result.discount)
self.assertEqual(0, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_matches_condition(self):
add_product(self.basket, D("12.00"), 2)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(1 * D("12.00") * D("0.2"), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_exceeds_condition(self):
add_products(self.basket, [(D("12.00"), 2), (D("20.00"), 2)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(1 * D("12.00") * D("0.2"), result.discount)
# Should only consume the condition products
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(2, self.basket.num_items_without_discount)
class TestAPercentageDiscountWithMultipleApplicationsWithCountCondition(TestCase):
def setUp(self):
# Flush the cache
conn = get_redis_connection("redis")
conn.flushall()
self.range_mattresses = Range.objects.create(name="Mattresses")
self.range_slippers = Range.objects.create(name="Slippers")
self.mattress = factories.create_product(title="Mattress", price=D("2999.00"))
self.slipper1 = factories.create_product(title="Slipper", price=D("78.00"))
self.slipper2 = factories.create_product(title="Slipper", price=D("79.00"))
self.range_mattresses.add_product(self.mattress)
self.range_slippers.add_product(self.slipper1)
self.range_slippers.add_product(self.slipper2)
self.condition = BluelightCountCondition.objects.create(
range=self.range_mattresses,
proxy_class="oscarbluelight.offer.conditions.BluelightCountCondition",
value=1,
)
self.benefit = BluelightPercentageDiscountBenefit.objects.create(
range=self.range_slippers,
proxy_class="oscarbluelight.offer.benefits.BluelightPercentageDiscountBenefit",
value=D("100.00"),
max_affected_items=1,
)
self.offer = mock.Mock()
self.basket = factories.create_basket(empty=True)
def test_applies_correctly_to_basket_which_matches_multiple_lines_multiple_times(
self,
):
# Add two different lines to the basket
self.basket.add_product(self.mattress, 2)
self.basket.add_product(self.slipper1, 1)
self.basket.add_product(self.slipper2, 1)
# Apply once
self.assertTrue(self.condition.proxy().is_satisfied(self.offer, self.basket))
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertTrue(result.is_successful)
self.assertEqual(result.discount, D("78.00"))
self.assertEqual(self.basket.num_items_with_discount, 2)
self.assertEqual(self.basket.num_items_without_discount, 2)
# Apply second time
self.assertTrue(self.condition.proxy().is_satisfied(self.offer, self.basket))
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertTrue(result.is_successful)
self.assertEqual(result.discount, D("79.00"))
self.assertEqual(self.basket.num_items_with_discount, 4)
self.assertEqual(self.basket.num_items_without_discount, 0)
# Can't apply a third time because the condition is no longer satisfied
self.assertFalse(self.condition.proxy().is_satisfied(self.offer, self.basket))
class TestAPercentageDiscountAppliedWithValueCondition(TestCase):
def setUp(self):
# Flush the cache
conn = get_redis_connection("redis")
conn.flushall()
range = Range.objects.create(name="All products", includes_all_products=True)
self.condition = BluelightValueCondition.objects.create(
range=range,
proxy_class="oscarbluelight.offer.conditions.BluelightValueCondition",
value=D("10.00"),
)
self.benefit = BluelightPercentageDiscountBenefit.objects.create(
range=range,
proxy_class="oscarbluelight.offer.benefits.BluelightPercentageDiscountBenefit",
value=20,
)
self.offer = mock.Mock()
self.basket = factories.create_basket(empty=True)
def test_applies_correctly_to_empty_basket(self):
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D("0.00"), result.discount)
self.assertEqual(0, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_matches_condition(self):
add_product(self.basket, D("5.00"), 2)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(2 * D("5.00") * D("0.2"), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_exceeds_condition_but_matches_on_boundary(
self,
):
add_product(self.basket, D("5.00"), 3)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(3 * D("5.00") * D("0.2"), result.discount)
self.assertEqual(3, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_exceeds_condition(self):
add_product(self.basket, D("4.00"), 3)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(3 * D("4.00") * D("0.2"), result.discount)
self.assertEqual(3, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
class TestAPercentageDiscountWithMaxItemsSetAppliedWithValueCondition(TestCase):
def setUp(self):
# Flush the cache
conn = get_redis_connection("redis")
conn.flushall()
range = Range.objects.create(name="All products", includes_all_products=True)
self.condition = BluelightValueCondition.objects.create(
range=range,
proxy_class="oscarbluelight.offer.conditions.BluelightValueCondition",
value=D("10.00"),
)
self.benefit = BluelightPercentageDiscountBenefit.objects.create(
range=range,
proxy_class="oscarbluelight.offer.benefits.BluelightPercentageDiscountBenefit",
value=20,
max_affected_items=1,
)
self.offer = mock.Mock()
self.basket = factories.create_basket(empty=True)
def test_applies_correctly_to_empty_basket(self):
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D("0.00"), result.discount)
self.assertEqual(0, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_matches_condition(self):
add_product(self.basket, D("5.00"), 2)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(1 * D("5.00") * D("0.2"), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_exceeds_condition_but_matches_on_boundary(
self,
):
add_product(self.basket, D("5.00"), 3)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(1 * D("5.00") * D("0.2"), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(1, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_exceeds_condition(self):
add_product(self.basket, D("4.00"), 3)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(1 * D("4.00") * D("0.2"), result.discount)
self.assertEqual(3, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
class TestAPercentageDiscountBenefit(TestCase):
def setUp(self):
# Flush the cache
conn = get_redis_connection("redis")
conn.flushall()
def test_requires_a_benefit_value(self):
rng = Range.objects.create(name="", includes_all_products=True)
benefit = Benefit.objects.create(
range=rng,
proxy_class="oscarbluelight.offer.benefits.BluelightPercentageDiscountBenefit",
)
with self.assertRaises(exceptions.ValidationError):
benefit.clean() | en | 0.79443 | # Flush the cache # Apply benefit twice to simulate how Applicator will actually do it # Apply benefit twice to simulate how Applicator will actually do it # Flush the cache # Should only consume the condition products # Flush the cache # Add two different lines to the basket # Apply once # Apply second time # Can't apply a third time because the condition is no longer satisfied # Flush the cache # Flush the cache # Flush the cache | 2.206006 | 2 |
CLCC/ex8.py | adstr123/LPTHW | 0 | 9598 | # Moving around directories with pushd & popd
# You can save directries to go back to later. These can be built up in a stack.
#pushd i/like/icecream
# current stack: ~temp/i/like/icecream
#pushd i/like
# current stack: ~temp/i/like ~temp/i/like/icecream
#popd
# PS ~temp/i/like
#popd
# PS ~temp/i/like/icecream
# You can also add a directory as an argument to a pushd command to also immediately change to that directory | # Moving around directories with pushd & popd
# You can save directries to go back to later. These can be built up in a stack.
#pushd i/like/icecream
# current stack: ~temp/i/like/icecream
#pushd i/like
# current stack: ~temp/i/like ~temp/i/like/icecream
#popd
# PS ~temp/i/like
#popd
# PS ~temp/i/like/icecream
# You can also add a directory as an argument to a pushd command to also immediately change to that directory | en | 0.851204 | # Moving around directories with pushd & popd # You can save directries to go back to later. These can be built up in a stack. #pushd i/like/icecream # current stack: ~temp/i/like/icecream #pushd i/like # current stack: ~temp/i/like ~temp/i/like/icecream #popd # PS ~temp/i/like #popd # PS ~temp/i/like/icecream # You can also add a directory as an argument to a pushd command to also immediately change to that directory | 1.842425 | 2 |
mkt/search/tests/test_filters.py | clouserw/zamboni | 0 | 9599 | <gh_stars>0
# -*- coding: utf-8 -*-
import json
from nose.tools import eq_, ok_
from rest_framework.exceptions import ParseError
from django.contrib.auth.models import AnonymousUser
from django.test.client import RequestFactory
from django.test.utils import override_settings
import mkt
from mkt.constants.applications import DEVICE_CHOICES_IDS
from mkt.constants.features import FeatureProfile
from mkt.search.filters import (DeviceTypeFilter, ProfileFilter,
PublicAppsFilter, PublicSearchFormFilter,
RegionFilter, SearchQueryFilter, SortingFilter,
ValidAppsFilter)
from mkt.search.forms import TARAKO_CATEGORIES_MAPPING
from mkt.search.views import SearchView
from mkt.site.tests import TestCase
from mkt.webapps.indexers import WebappIndexer
class FilterTestsBase(TestCase):
def setUp(self):
super(FilterTestsBase, self).setUp()
self.req = RequestFactory().get('/')
self.req.user = AnonymousUser()
self.view_class = SearchView
def _filter(self, req=None, data=None):
req = req or RequestFactory().get('/', data=data or {})
req.user = AnonymousUser()
queryset = WebappIndexer.search()
for filter_class in self.filter_classes:
queryset = filter_class().filter_queryset(req, queryset,
self.view_class)
return queryset.to_dict()
class TestQueryFilter(FilterTestsBase):
filter_classes = [SearchQueryFilter]
def test_q(self):
qs = self._filter(data={'q': 'search terms'})
# Spot check a few queries.
should = (qs['query']['function_score']['query']['bool']['should'])
ok_({'match': {'name': {'query': 'search terms', 'boost': 4,
'slop': 1, 'type': 'phrase'}}}
in should)
ok_({'prefix': {'name': {'boost': 1.5, 'value': 'search terms'}}}
in should)
ok_({'match': {'name_english': {'query': 'search terms',
'boost': 2.5}}}
in should)
ok_({'match': {'description_english': {'query': 'search terms',
'boost': 0.6,
'analyzer': 'english_analyzer',
'type': 'phrase'}}}
in should)
def test_fuzzy_single_word(self):
qs = self._filter(data={'q': 'term'})
should = (qs['query']['function_score']['query']['bool']['should'])
ok_({'fuzzy': {'tags': {'prefix_length': 1, 'value': 'term'}}}
in should)
def test_no_fuzzy_multi_word(self):
qs = self._filter(data={'q': 'search terms'})
qs_str = json.dumps(qs)
ok_('fuzzy' not in qs_str)
@override_settings(ES_USE_PLUGINS=True)
def test_polish_analyzer(self):
"""
Test that the polish analyzer is included correctly since it is an
exception to the rest b/c it is a plugin.
"""
with self.activate(locale='pl'):
qs = self._filter(data={'q': u'próba'})
should = (qs['query']['function_score']['query']['bool']['should'])
ok_({'match': {'name_polish': {'query': u'pr\xf3ba',
'boost': 2.5}}}
in should)
ok_({'match': {'description_polish': {'query': u'pr\xf3ba',
'boost': 0.6,
'analyzer': 'polish',
'type': 'phrase'}}}
in should)
class TestFormFilter(FilterTestsBase):
filter_classes = [PublicSearchFormFilter]
def test_category(self):
qs = self._filter(data={'cat': 'games'})
ok_({'terms': {'category': ['games']}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_tag(self):
qs = self._filter(data={'tag': 'tarako'})
ok_({'term': {'tags': 'tarako'}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_tarako_categories(self):
qs = self._filter(data={'cat': 'tarako-lifestyle'})
ok_({'terms':
{'category': TARAKO_CATEGORIES_MAPPING['tarako-lifestyle']}}
in qs['query']['filtered']['filter']['bool']['must'])
qs = self._filter(data={'cat': 'tarako-games'})
ok_({'terms': {'category': TARAKO_CATEGORIES_MAPPING['tarako-games']}}
in qs['query']['filtered']['filter']['bool']['must'])
qs = self._filter(data={'cat': 'tarako-tools'})
ok_({'terms': {'category': TARAKO_CATEGORIES_MAPPING['tarako-tools']}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_app_type(self):
qs = self._filter(data={'app_type': ['hosted']})
ok_({'terms': {'app_type': [1]}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_app_type_packaged(self):
"""Test packaged also includes privileged."""
qs = self._filter(data={'app_type': ['packaged']})
ok_({'terms': {'app_type': [2, 3]}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_manifest_url(self):
url = 'http://hy.fr/manifest.webapp'
qs = self._filter(data={'manifest_url': url})
ok_({'term': {'manifest_url': url}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_offline(self):
"""Ensure we are filtering by offline-capable apps."""
qs = self._filter(data={'offline': 'True'})
ok_({'term': {'is_offline': True}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_online(self):
"""Ensure we are filtering by apps that require online access."""
qs = self._filter(data={'offline': 'False'})
ok_({'term': {'is_offline': False}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_offline_and_online(self):
"""Ensure we are not filtering by offline/online by default."""
# Pass any form values other than 'offline' to create the dict.
qs = self._filter(data={'cat': 'games'})
ok_({'term': {'is_offline': True}}
not in qs['query']['filtered']['filter']['bool']['must'])
ok_({'term': {'is_offline': False}}
not in qs['query']['filtered']['filter']['bool']['must'])
def test_languages(self):
qs = self._filter(data={'languages': 'fr'})
ok_({'terms': {'supported_locales': ['fr']}}
in qs['query']['filtered']['filter']['bool']['must'])
qs = self._filter(data={'languages': 'ar,en-US'})
ok_({'terms': {'supported_locales': ['ar', 'en-US']}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_author(self):
qs = self._filter(data={'author': 'Mozilla LABS'})
ok_({'term': {'author.raw': u'mozilla labs'}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_installs_allowed_from(self):
qs = self._filter(data={'installs_allowed_from': '*'})
ok_({'term': {'installs_allowed_from': u'*'}}
in qs['query']['filtered']['filter']['bool']['must'])
# Test that we don't filter by this field if not provided.
qs = self._filter()
ok_('installs_allowed_from' not in json.dumps(qs),
"Unexpected 'installs_allowed_from' in query")
def test_premium_types(self):
def ptype(p):
return mkt.ADDON_PREMIUM_API_LOOKUP.get(p)
# Test a single premium type.
qs = self._filter(data={'premium_types': ['free']})
ok_({'terms': {'premium_type': [ptype('free')]}}
in qs['query']['filtered']['filter']['bool']['must'])
# Test many premium types.
qs = self._filter(data={'premium_types': ['free', 'free-inapp']})
ok_({'terms': {'premium_type': [ptype('free'), ptype('free-inapp')]}}
in qs['query']['filtered']['filter']['bool']['must'])
# Test a non-existent premium type.
with self.assertRaises(ParseError):
self._filter(data={'premium_types': ['free', 'platinum']})
def test_device(self):
qs = self._filter(data={'dev': 'desktop'})
ok_({'term': {'device': DEVICE_CHOICES_IDS['desktop']}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_no_device_with_device_type(self):
"""Test that providing a device type w/o device doesn't filter."""
qs = self._filter(data={'dev': '', 'device': 'firefoxos'})
ok_('filtered' not in qs['query'].keys())
class TestPublicAppsFilter(FilterTestsBase):
filter_classes = [PublicAppsFilter]
def test_status(self):
qs = self._filter(self.req)
ok_({'term': {'status': mkt.STATUS_PUBLIC}}
in qs['query']['filtered']['filter']['bool']['must'])
ok_({'term': {'is_disabled': False}}
in qs['query']['filtered']['filter']['bool']['must'])
class TestValidAppsFilter(FilterTestsBase):
filter_classes = [ValidAppsFilter]
def test_status(self):
qs = self._filter(self.req)
ok_({'terms': {'status': mkt.VALID_STATUSES}}
in qs['query']['filtered']['filter']['bool']['must'])
ok_({'term': {'is_disabled': False}}
in qs['query']['filtered']['filter']['bool']['must'])
class TestDeviceTypeFilter(FilterTestsBase):
filter_classes = [DeviceTypeFilter]
def test_no_filters(self):
qs = self._filter(self.req)
ok_('filtered' not in qs['query'].keys())
def test_mobile(self):
self.req.MOBILE = True
qs = self._filter(self.req)
ok_({'term': {'uses_flash': False}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_gaia(self):
self.req.GAIA = True
qs = self._filter(self.req)
ok_({'term': {'uses_flash': False}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_tablet(self):
self.req.TABLET = True
qs = self._filter(self.req)
ok_('filtered' not in qs['query'].keys())
def test_device_in_querystring(self):
qs = self._filter(data={'dev': 'desktop'})
ok_({'term': {'device': 1}}
in qs['query']['filtered']['filter']['bool']['must'])
qs = self._filter(data={'dev': 'android', 'device': 'mobile'})
ok_({'term': {'device': 2}}
in qs['query']['filtered']['filter']['bool']['must'])
qs = self._filter(data={'dev': 'android', 'device': 'tablet'})
ok_({'term': {'device': 3}}
in qs['query']['filtered']['filter']['bool']['must'])
qs = self._filter(data={'dev': 'firefoxos'})
ok_({'term': {'device': 4}}
in qs['query']['filtered']['filter']['bool']['must'])
class TestRegionFilter(FilterTestsBase):
filter_classes = [RegionFilter]
def test_no_region_default(self):
qs = self._filter(self.req)
ok_({'term': {'region_exclusions': mkt.regions.RESTOFWORLD.id}}
in qs['query']['filtered']['filter']['bool']['must_not'])
def test_region(self):
self.req.REGION = mkt.regions.BRA
qs = self._filter(self.req)
ok_({'term': {'region_exclusions': mkt.regions.BRA.id}}
in qs['query']['filtered']['filter']['bool']['must_not'])
class TestProfileFilter(FilterTestsBase):
filter_classes = [ProfileFilter]
def profile_qs(self, disabled_features=None):
if disabled_features is None:
disabled_features = {}
profile = FeatureProfile().fromkeys(FeatureProfile(), True)
for feature in disabled_features:
profile[feature] = False
return {'pro': profile.to_signature(), 'dev': 'firefoxos'}
def test_filter_all_features_present(self):
qs = self._filter(data=self.profile_qs())
ok_('filtered' not in qs['query'].keys())
def test_filter_one_feature_present(self):
qs = self._filter(data=self.profile_qs(disabled_features=['sms']))
ok_({'term': {'features.has_sms': True}}
in qs['query']['filtered']['filter']['bool']['must_not'])
def test_filter_one_feature_present_desktop(self):
data = self.profile_qs(disabled_features=['sms'])
data['dev'] = 'desktop'
qs = self._filter(data=data)
ok_('filtered' not in qs['query'].keys())
def test_filter_multiple_features_present(self):
qs = self._filter(
data=self.profile_qs(disabled_features=['sms', 'apps']))
ok_({'term': {'features.has_sms': True}}
in qs['query']['filtered']['filter']['bool']['must_not'])
ok_({'term': {'features.has_apps': True}}
in qs['query']['filtered']['filter']['bool']['must_not'])
class TestSortingFilter(FilterTestsBase):
filter_classes = [SortingFilter]
def test_sort(self):
for api_sort, es_sort in SortingFilter.DEFAULT_SORTING.items():
qs = self._filter(data={'sort': [api_sort]})
if es_sort.startswith('-'):
ok_({es_sort[1:]: {'order': 'desc'}} in qs['sort'], qs)
else:
eq_([es_sort], qs['sort'], qs)
def test_sort_multiple(self):
qs = self._filter(data={'sort': ['rating', 'created']})
ok_({'bayesian_rating': {'order': 'desc'}} in qs['sort'])
ok_({'created': {'order': 'desc'}} in qs['sort'])
def test_sort_regional(self):
"""Popularity and trending use regional sorting for mature regions."""
req = RequestFactory().get('/')
req.REGION = mkt.regions.BRA
# Default empty query searches use popularity.
qs = self._filter(req)
ok_({'popularity_%s'
% mkt.regions.BRA.id: {'order': 'desc'}} in qs['sort'])
# Popularity.
req = RequestFactory().get('/', data={'sort': ['popularity']})
req.REGION = mkt.regions.BRA
qs = self._filter(req)
ok_({'popularity_%s'
% mkt.regions.BRA.id: {'order': 'desc'}} in qs['sort'])
# Trending.
req = RequestFactory().get('/', data={'sort': ['trending']})
req.REGION = mkt.regions.BRA
qs = self._filter(req)
ok_({'trending_%s' % mkt.regions.BRA.id: {'order': 'desc'}}
in qs['sort'])
class TestCombinedFilter(FilterTestsBase):
"""
Basic test to ensure that when filters are combined they result in the
expected query structure.
"""
filter_classes = [SearchQueryFilter, PublicSearchFormFilter,
PublicAppsFilter, SortingFilter]
def test_combined(self):
qs = self._filter(data={'q': 'test', 'cat': 'games',
'sort': 'trending'})
ok_(qs['query']['filtered']['query']['function_score'])
ok_(qs['query']['filtered']['filter'])
must = qs['query']['filtered']['filter']['bool']['must']
ok_({'terms': {'category': ['games']}} in must)
ok_({'term': {'status': 4}} in must)
ok_({'term': {'is_disabled': False}} in must)
ok_({'trending': {'order': 'desc'}} in qs['sort'])
query = qs['query']['filtered']['query']
ok_({'field_value_factor': {'field': 'boost'}}
in query['function_score']['functions'])
ok_({'match': {'name_english': {'boost': 2.5, 'query': u'test'}}}
in query['function_score']['query']['bool']['should'])
| # -*- coding: utf-8 -*-
import json
from nose.tools import eq_, ok_
from rest_framework.exceptions import ParseError
from django.contrib.auth.models import AnonymousUser
from django.test.client import RequestFactory
from django.test.utils import override_settings
import mkt
from mkt.constants.applications import DEVICE_CHOICES_IDS
from mkt.constants.features import FeatureProfile
from mkt.search.filters import (DeviceTypeFilter, ProfileFilter,
PublicAppsFilter, PublicSearchFormFilter,
RegionFilter, SearchQueryFilter, SortingFilter,
ValidAppsFilter)
from mkt.search.forms import TARAKO_CATEGORIES_MAPPING
from mkt.search.views import SearchView
from mkt.site.tests import TestCase
from mkt.webapps.indexers import WebappIndexer
class FilterTestsBase(TestCase):
def setUp(self):
super(FilterTestsBase, self).setUp()
self.req = RequestFactory().get('/')
self.req.user = AnonymousUser()
self.view_class = SearchView
def _filter(self, req=None, data=None):
req = req or RequestFactory().get('/', data=data or {})
req.user = AnonymousUser()
queryset = WebappIndexer.search()
for filter_class in self.filter_classes:
queryset = filter_class().filter_queryset(req, queryset,
self.view_class)
return queryset.to_dict()
class TestQueryFilter(FilterTestsBase):
filter_classes = [SearchQueryFilter]
def test_q(self):
qs = self._filter(data={'q': 'search terms'})
# Spot check a few queries.
should = (qs['query']['function_score']['query']['bool']['should'])
ok_({'match': {'name': {'query': 'search terms', 'boost': 4,
'slop': 1, 'type': 'phrase'}}}
in should)
ok_({'prefix': {'name': {'boost': 1.5, 'value': 'search terms'}}}
in should)
ok_({'match': {'name_english': {'query': 'search terms',
'boost': 2.5}}}
in should)
ok_({'match': {'description_english': {'query': 'search terms',
'boost': 0.6,
'analyzer': 'english_analyzer',
'type': 'phrase'}}}
in should)
def test_fuzzy_single_word(self):
qs = self._filter(data={'q': 'term'})
should = (qs['query']['function_score']['query']['bool']['should'])
ok_({'fuzzy': {'tags': {'prefix_length': 1, 'value': 'term'}}}
in should)
def test_no_fuzzy_multi_word(self):
qs = self._filter(data={'q': 'search terms'})
qs_str = json.dumps(qs)
ok_('fuzzy' not in qs_str)
@override_settings(ES_USE_PLUGINS=True)
def test_polish_analyzer(self):
"""
Test that the polish analyzer is included correctly since it is an
exception to the rest b/c it is a plugin.
"""
with self.activate(locale='pl'):
qs = self._filter(data={'q': u'próba'})
should = (qs['query']['function_score']['query']['bool']['should'])
ok_({'match': {'name_polish': {'query': u'pr\xf3ba',
'boost': 2.5}}}
in should)
ok_({'match': {'description_polish': {'query': u'pr\xf3ba',
'boost': 0.6,
'analyzer': 'polish',
'type': 'phrase'}}}
in should)
class TestFormFilter(FilterTestsBase):
filter_classes = [PublicSearchFormFilter]
def test_category(self):
qs = self._filter(data={'cat': 'games'})
ok_({'terms': {'category': ['games']}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_tag(self):
qs = self._filter(data={'tag': 'tarako'})
ok_({'term': {'tags': 'tarako'}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_tarako_categories(self):
qs = self._filter(data={'cat': 'tarako-lifestyle'})
ok_({'terms':
{'category': TARAKO_CATEGORIES_MAPPING['tarako-lifestyle']}}
in qs['query']['filtered']['filter']['bool']['must'])
qs = self._filter(data={'cat': 'tarako-games'})
ok_({'terms': {'category': TARAKO_CATEGORIES_MAPPING['tarako-games']}}
in qs['query']['filtered']['filter']['bool']['must'])
qs = self._filter(data={'cat': 'tarako-tools'})
ok_({'terms': {'category': TARAKO_CATEGORIES_MAPPING['tarako-tools']}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_app_type(self):
qs = self._filter(data={'app_type': ['hosted']})
ok_({'terms': {'app_type': [1]}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_app_type_packaged(self):
"""Test packaged also includes privileged."""
qs = self._filter(data={'app_type': ['packaged']})
ok_({'terms': {'app_type': [2, 3]}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_manifest_url(self):
url = 'http://hy.fr/manifest.webapp'
qs = self._filter(data={'manifest_url': url})
ok_({'term': {'manifest_url': url}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_offline(self):
"""Ensure we are filtering by offline-capable apps."""
qs = self._filter(data={'offline': 'True'})
ok_({'term': {'is_offline': True}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_online(self):
"""Ensure we are filtering by apps that require online access."""
qs = self._filter(data={'offline': 'False'})
ok_({'term': {'is_offline': False}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_offline_and_online(self):
"""Ensure we are not filtering by offline/online by default."""
# Pass any form values other than 'offline' to create the dict.
qs = self._filter(data={'cat': 'games'})
ok_({'term': {'is_offline': True}}
not in qs['query']['filtered']['filter']['bool']['must'])
ok_({'term': {'is_offline': False}}
not in qs['query']['filtered']['filter']['bool']['must'])
def test_languages(self):
qs = self._filter(data={'languages': 'fr'})
ok_({'terms': {'supported_locales': ['fr']}}
in qs['query']['filtered']['filter']['bool']['must'])
qs = self._filter(data={'languages': 'ar,en-US'})
ok_({'terms': {'supported_locales': ['ar', 'en-US']}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_author(self):
qs = self._filter(data={'author': 'Mozilla LABS'})
ok_({'term': {'author.raw': u'mozilla labs'}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_installs_allowed_from(self):
qs = self._filter(data={'installs_allowed_from': '*'})
ok_({'term': {'installs_allowed_from': u'*'}}
in qs['query']['filtered']['filter']['bool']['must'])
# Test that we don't filter by this field if not provided.
qs = self._filter()
ok_('installs_allowed_from' not in json.dumps(qs),
"Unexpected 'installs_allowed_from' in query")
def test_premium_types(self):
def ptype(p):
return mkt.ADDON_PREMIUM_API_LOOKUP.get(p)
# Test a single premium type.
qs = self._filter(data={'premium_types': ['free']})
ok_({'terms': {'premium_type': [ptype('free')]}}
in qs['query']['filtered']['filter']['bool']['must'])
# Test many premium types.
qs = self._filter(data={'premium_types': ['free', 'free-inapp']})
ok_({'terms': {'premium_type': [ptype('free'), ptype('free-inapp')]}}
in qs['query']['filtered']['filter']['bool']['must'])
# Test a non-existent premium type.
with self.assertRaises(ParseError):
self._filter(data={'premium_types': ['free', 'platinum']})
def test_device(self):
qs = self._filter(data={'dev': 'desktop'})
ok_({'term': {'device': DEVICE_CHOICES_IDS['desktop']}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_no_device_with_device_type(self):
"""Test that providing a device type w/o device doesn't filter."""
qs = self._filter(data={'dev': '', 'device': 'firefoxos'})
ok_('filtered' not in qs['query'].keys())
class TestPublicAppsFilter(FilterTestsBase):
filter_classes = [PublicAppsFilter]
def test_status(self):
qs = self._filter(self.req)
ok_({'term': {'status': mkt.STATUS_PUBLIC}}
in qs['query']['filtered']['filter']['bool']['must'])
ok_({'term': {'is_disabled': False}}
in qs['query']['filtered']['filter']['bool']['must'])
class TestValidAppsFilter(FilterTestsBase):
filter_classes = [ValidAppsFilter]
def test_status(self):
qs = self._filter(self.req)
ok_({'terms': {'status': mkt.VALID_STATUSES}}
in qs['query']['filtered']['filter']['bool']['must'])
ok_({'term': {'is_disabled': False}}
in qs['query']['filtered']['filter']['bool']['must'])
class TestDeviceTypeFilter(FilterTestsBase):
filter_classes = [DeviceTypeFilter]
def test_no_filters(self):
qs = self._filter(self.req)
ok_('filtered' not in qs['query'].keys())
def test_mobile(self):
self.req.MOBILE = True
qs = self._filter(self.req)
ok_({'term': {'uses_flash': False}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_gaia(self):
self.req.GAIA = True
qs = self._filter(self.req)
ok_({'term': {'uses_flash': False}}
in qs['query']['filtered']['filter']['bool']['must'])
def test_tablet(self):
self.req.TABLET = True
qs = self._filter(self.req)
ok_('filtered' not in qs['query'].keys())
def test_device_in_querystring(self):
qs = self._filter(data={'dev': 'desktop'})
ok_({'term': {'device': 1}}
in qs['query']['filtered']['filter']['bool']['must'])
qs = self._filter(data={'dev': 'android', 'device': 'mobile'})
ok_({'term': {'device': 2}}
in qs['query']['filtered']['filter']['bool']['must'])
qs = self._filter(data={'dev': 'android', 'device': 'tablet'})
ok_({'term': {'device': 3}}
in qs['query']['filtered']['filter']['bool']['must'])
qs = self._filter(data={'dev': 'firefoxos'})
ok_({'term': {'device': 4}}
in qs['query']['filtered']['filter']['bool']['must'])
class TestRegionFilter(FilterTestsBase):
filter_classes = [RegionFilter]
def test_no_region_default(self):
qs = self._filter(self.req)
ok_({'term': {'region_exclusions': mkt.regions.RESTOFWORLD.id}}
in qs['query']['filtered']['filter']['bool']['must_not'])
def test_region(self):
self.req.REGION = mkt.regions.BRA
qs = self._filter(self.req)
ok_({'term': {'region_exclusions': mkt.regions.BRA.id}}
in qs['query']['filtered']['filter']['bool']['must_not'])
class TestProfileFilter(FilterTestsBase):
filter_classes = [ProfileFilter]
def profile_qs(self, disabled_features=None):
if disabled_features is None:
disabled_features = {}
profile = FeatureProfile().fromkeys(FeatureProfile(), True)
for feature in disabled_features:
profile[feature] = False
return {'pro': profile.to_signature(), 'dev': 'firefoxos'}
def test_filter_all_features_present(self):
qs = self._filter(data=self.profile_qs())
ok_('filtered' not in qs['query'].keys())
def test_filter_one_feature_present(self):
qs = self._filter(data=self.profile_qs(disabled_features=['sms']))
ok_({'term': {'features.has_sms': True}}
in qs['query']['filtered']['filter']['bool']['must_not'])
def test_filter_one_feature_present_desktop(self):
data = self.profile_qs(disabled_features=['sms'])
data['dev'] = 'desktop'
qs = self._filter(data=data)
ok_('filtered' not in qs['query'].keys())
def test_filter_multiple_features_present(self):
qs = self._filter(
data=self.profile_qs(disabled_features=['sms', 'apps']))
ok_({'term': {'features.has_sms': True}}
in qs['query']['filtered']['filter']['bool']['must_not'])
ok_({'term': {'features.has_apps': True}}
in qs['query']['filtered']['filter']['bool']['must_not'])
class TestSortingFilter(FilterTestsBase):
filter_classes = [SortingFilter]
def test_sort(self):
for api_sort, es_sort in SortingFilter.DEFAULT_SORTING.items():
qs = self._filter(data={'sort': [api_sort]})
if es_sort.startswith('-'):
ok_({es_sort[1:]: {'order': 'desc'}} in qs['sort'], qs)
else:
eq_([es_sort], qs['sort'], qs)
def test_sort_multiple(self):
qs = self._filter(data={'sort': ['rating', 'created']})
ok_({'bayesian_rating': {'order': 'desc'}} in qs['sort'])
ok_({'created': {'order': 'desc'}} in qs['sort'])
def test_sort_regional(self):
"""Popularity and trending use regional sorting for mature regions."""
req = RequestFactory().get('/')
req.REGION = mkt.regions.BRA
# Default empty query searches use popularity.
qs = self._filter(req)
ok_({'popularity_%s'
% mkt.regions.BRA.id: {'order': 'desc'}} in qs['sort'])
# Popularity.
req = RequestFactory().get('/', data={'sort': ['popularity']})
req.REGION = mkt.regions.BRA
qs = self._filter(req)
ok_({'popularity_%s'
% mkt.regions.BRA.id: {'order': 'desc'}} in qs['sort'])
# Trending.
req = RequestFactory().get('/', data={'sort': ['trending']})
req.REGION = mkt.regions.BRA
qs = self._filter(req)
ok_({'trending_%s' % mkt.regions.BRA.id: {'order': 'desc'}}
in qs['sort'])
class TestCombinedFilter(FilterTestsBase):
"""
Basic test to ensure that when filters are combined they result in the
expected query structure.
"""
filter_classes = [SearchQueryFilter, PublicSearchFormFilter,
PublicAppsFilter, SortingFilter]
def test_combined(self):
qs = self._filter(data={'q': 'test', 'cat': 'games',
'sort': 'trending'})
ok_(qs['query']['filtered']['query']['function_score'])
ok_(qs['query']['filtered']['filter'])
must = qs['query']['filtered']['filter']['bool']['must']
ok_({'terms': {'category': ['games']}} in must)
ok_({'term': {'status': 4}} in must)
ok_({'term': {'is_disabled': False}} in must)
ok_({'trending': {'order': 'desc'}} in qs['sort'])
query = qs['query']['filtered']['query']
ok_({'field_value_factor': {'field': 'boost'}}
in query['function_score']['functions'])
ok_({'match': {'name_english': {'boost': 2.5, 'query': u'test'}}}
in query['function_score']['query']['bool']['should']) | en | 0.929766 | # -*- coding: utf-8 -*- # Spot check a few queries. Test that the polish analyzer is included correctly since it is an exception to the rest b/c it is a plugin. Test packaged also includes privileged. Ensure we are filtering by offline-capable apps. Ensure we are filtering by apps that require online access. Ensure we are not filtering by offline/online by default. # Pass any form values other than 'offline' to create the dict. # Test that we don't filter by this field if not provided. # Test a single premium type. # Test many premium types. # Test a non-existent premium type. Test that providing a device type w/o device doesn't filter. Popularity and trending use regional sorting for mature regions. # Default empty query searches use popularity. # Popularity. # Trending. Basic test to ensure that when filters are combined they result in the expected query structure. | 1.866415 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.