code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import configparser
import json
import os
import typing
from ..version import __version__ as version
from .globals import CONFIG
from . import config
__all__ = ('NoConfig', 'load', 'new', 'autosave', 'save', 'load_save',
'delete_autosave')
class NoConfig(Exception):
'''
Raised when item layout config is not available.
'''
pass
def load(ltype: str) -> dict:
'''
Load item/dungeon layout.
Args:
ltype: 'Items' or 'Dungeons'
Returns:
dict: layout in format {identifier: (column, row)}
Raise:
NoConfig, configparser.Error: if no item layout is available
'''
inp = configparser.ConfigParser(allow_no_value=True)
try:
fid = open(os.path.join(
config.config_directory(), CONFIG['layout']), 'r')
except FileNotFoundError as err:
raise NoConfig() from err
try:
inp.read_file(fid)
finally:
fid.close()
if ltype not in inp:
raise NoConfig()
try:
if inp['version']['version'] != version:
raise NoConfig()
except (KeyError, configparser.NoSectionError,
configparser.NoOptionError) as err:
if version != version:
raise NoConfig() from err
layout = {}
for item in inp[ltype]:
if not inp[ltype][item]:
continue
try:
sep = tuple(int(c) for c in inp[ltype][item].split(','))
except ValueError as err:
raise NoConfig() from err
if len(sep) != 2:
raise NoConfig()
layout[item] = sep
return layout
def new(layouts: typing.Mapping[
str, typing.Mapping[str, typing.Sequence[int]]]):
'''
Create new layout file.
Args:
layouts: layout in format {identifier: (column, row)}
'''
out = configparser.ConfigParser(allow_no_value=True)
out.add_section('Items')
for item in layouts['Items']:
out['Items'][item] = ', '.join(str(c) for c in layouts['Items'][item])
out.add_section('Dungeons')
for dungeon in layouts['Dungeons']:
out['Dungeons'][dungeon] = ', '.join(
str(c) for c in layouts['Dungeons'][dungeon])
with open(os.path.join(
config.config_directory(), CONFIG['layout']), 'w') as fid:
out.write(fid)
def autosave(ltype: str, tracker) -> None:
'''
Perform autosave.
Args:
ltype: 'Items' or 'Dungeons', 'Hints' or starting with 'Maps,'
tracker: tracker providing info
'''
autosavefile = os.path.join(config.config_directory(), CONFIG['autosave'])
save = load_save()
save[ltype] = tracker.store()
save['version'] = version
with open(autosavefile, 'w') as fid:
json.dump(save, fid)
def save(trackers: dict, filepath: str) -> None:
'''
Save current item setup.
Args:
trackers: tracker providing info
filepath: savefile path
'''
to_store = {}
for dtype in trackers:
try:
to_store[dtype] = trackers[dtype].store()
except AttributeError:
to_store[dtype] = {}
for mtype in trackers[dtype].gui:
to_store[dtype][mtype.identifier] = mtype.store()
to_store['version'] = version
with open(filepath, 'w') as fid:
json.dump(to_store, fid)
def load_save(filepath: str = os.path.join(
config.config_directory(), CONFIG['autosave'])) -> None or dict:
'''
Load save file.
Args:
filepath: full path to file to load
Return:
dict: save data
Raises:
FileNotFoundError: if file doesn't exist (unless it's the autosave)
'''
try:
fid = open(filepath, 'r')
except FileNotFoundError:
if filepath == os.path.join(
config.config_directory(), CONFIG['autosave']):
return {}
else:
raise
try:
data = json.load(fid)
except json.JSONDecodeError:
data = {}
finally:
fid.close()
try:
if data['version'] != version:
data = {}
except KeyError:
if version != '1.0.0':
data = {}
return data
def delete_autosave() -> None:
'''
Delete autosave file.
'''
autosavefile = os.path.join(config.config_directory(), CONFIG['autosave'])
try:
os.remove(autosavefile)
except FileNotFoundError:
pass
def save_autosave(filepath: str) -> None:
'''
Copy autosave into dedicated file.
Args:
filepath: savefile path
'''
autosavefile = os.path.join(config.config_directory(), CONFIG['autosave'])
with open(autosavefile, 'r') as infid:
with open(filepath, 'w') as outfid:
outfid.write(infid.read())
def restore_autosave(filepath: str) -> None:
'''
Copy dedicated file into autosave.
Args:
filepath: savefile path
'''
autosavefile = os.path.join(config.config_directory(), CONFIG['autosave'])
with open(filepath, 'r') as infid:
with open(autosavefile, 'w') as outfid:
outfid.write(infid.read()) | z5-tracker | /z5_tracker-1.2.1-py3-none-any.whl/z5tracker/config/layout.py | layout.py |
from .State import State
from .Region import Region
from .Entrance import Entrance
from .Location import Location, LocationFactory
from .LocationList import business_scrubs
from .DungeonList import create_dungeons
from .Rules import set_rules, set_shop_rules
from .Item import Item
from .RuleParser import parse_rule_string
from .SettingsList import get_setting_info
import logging
import copy
import io
import json
import random
class World(object):
def __init__(self, settings):
self.shuffle = 'vanilla'
self.dungeons = []
self.regions = []
self.itempool = []
self.state = State(self)
self._cached_locations = None
self._entrance_cache = {}
self._region_cache = {}
self._location_cache = {}
self.required_locations = []
self.shop_prices = {}
self.scrub_prices = {}
self.light_arrow_location = None
# dump settings directly into world's namespace
# this gives the world an attribute for every setting listed in Settings.py
self.settings = settings
self.__dict__.update(settings.__dict__)
# evaluate settings (important for logic, nice for spoiler)
if self.big_poe_count_random:
self.big_poe_count = random.randint(1, 10)
if self.starting_tod == 'random':
setting_info = get_setting_info('starting_tod')
choices = [ch for ch in setting_info.args_params['choices'] if ch not in ['default', 'random']]
self.starting_tod = random.choice(choices)
# rename a few attributes...
self.keysanity = self.shuffle_smallkeys != 'dungeon'
self.check_beatable_only = not self.all_reachable
# trials that can be skipped will be decided later
self.skipped_trials = {
'Forest': False,
'Fire': False,
'Water': False,
'Spirit': False,
'Shadow': False,
'Light': False
}
# dungeon forms will be decided later
self.dungeon_mq = {
'Deku Tree': False,
'Dodongos Cavern': False,
'Jabu Jabus Belly': False,
'Bottom of the Well': False,
'Ice Cavern': False,
'Gerudo Training Grounds': False,
'Forest Temple': False,
'Fire Temple': False,
'Water Temple': False,
'Spirit Temple': False,
'Shadow Temple': False,
'Ganons Castle': False
}
self.can_take_damage = True
def copy(self):
new_world = World(self.settings)
new_world.skipped_trials = copy.copy(self.skipped_trials)
new_world.dungeon_mq = copy.copy(self.dungeon_mq)
new_world.big_poe_count = copy.copy(self.big_poe_count)
new_world.can_take_damage = self.can_take_damage
new_world.shop_prices = copy.copy(self.shop_prices)
new_world.id = self.id
new_world.regions = [region.copy(new_world) for region in self.regions]
for region in new_world.regions:
for exit in region.exits:
exit.connect(new_world.get_region(exit.connected_region))
new_world.dungeons = [dungeon.copy(new_world) for dungeon in self.dungeons]
new_world.itempool = [item.copy(new_world) for item in self.itempool]
new_world.state = self.state.copy(new_world)
return new_world
def load_regions_from_json(self, file_path):
json_string = ""
with io.open(file_path, 'r') as file:
for line in file.readlines():
json_string += line.split('#')[0].replace('\n', ' ')
region_json = json.loads(json_string)
for region in region_json:
new_region = Region(region['region_name'])
new_region.world = self
if 'dungeon' in region:
new_region.dungeon = region['dungeon']
if 'locations' in region:
for location, rule in region['locations'].items():
new_location = LocationFactory(location)
new_location.parent_region = new_region
if self.logic_rules != 'none':
new_location.access_rule = parse_rule_string(rule, self)
new_location.world = self
new_region.locations.append(new_location)
if 'exits' in region:
for exit, rule in region['exits'].items():
new_exit = Entrance('%s -> %s' % (new_region.name, exit), new_region)
new_exit.connected_region = exit
if self.logic_rules != 'none':
new_exit.access_rule = parse_rule_string(rule, self)
new_region.exits.append(new_exit)
self.regions.append(new_region)
def initialize_entrances(self):
for region in self.regions:
for exit in region.exits:
exit.connect(self.get_region(exit.connected_region))
def initialize_regions(self):
for region in self.regions:
region.world = self
for location in region.locations:
location.world = self
def initialize_items(self):
for item in self.itempool:
item.world = self
for region in self.regions:
for location in region.locations:
if location.item != None:
location.item.world = self
for item in [item for dungeon in self.dungeons for item in dungeon.all_items]:
item.world = self
def random_shop_prices(self):
shop_item_indexes = ['7', '5', '8', '6']
self.shop_prices = {}
for region in self.regions:
if self.shopsanity == 'random':
shop_item_count = random.randint(0,4)
else:
shop_item_count = int(self.shopsanity)
for location in region.locations:
if location.type == 'Shop':
if location.name[-1:] in shop_item_indexes[:shop_item_count]:
self.shop_prices[location.name] = int(random.betavariate(1.5, 2) * 60) * 5
def set_scrub_prices(self):
# Get Deku Scrub Locations
scrub_locations = [location for location in self.get_locations() if 'Deku Scrub' in location.name]
scrub_dictionary = {}
for location in scrub_locations:
if location.default not in scrub_dictionary:
scrub_dictionary[location.default] = []
scrub_dictionary[location.default].append(location)
# Loop through each type of scrub.
for (scrub_item, default_price, text_id, text_replacement) in business_scrubs:
price = default_price
if self.shuffle_scrubs == 'low':
price = 10
elif self.shuffle_scrubs == 'random':
# this is a random value between 0-99
# average value is ~33 rupees
price = int(random.betavariate(1, 2) * 99)
# Set price in the dictionary as well as the location.
self.scrub_prices[scrub_item] = price
if scrub_item in scrub_dictionary:
for location in scrub_dictionary[scrub_item]:
location.price = price
if location.item is not None:
location.item.price = price
def get_region(self, regionname):
if isinstance(regionname, Region):
return regionname
try:
return self._region_cache[regionname]
except KeyError:
for region in self.regions:
if region.name == regionname:
self._region_cache[regionname] = region
return region
raise KeyError('No such region %s' % regionname)
def get_entrance(self, entrance):
if isinstance(entrance, Entrance):
return entrance
try:
return self._entrance_cache[entrance]
except KeyError:
for region in self.regions:
for exit in region.exits:
if exit.name == entrance:
self._entrance_cache[entrance] = exit
return exit
raise KeyError('No such entrance %s' % entrance)
def get_location(self, location):
if isinstance(location, Location):
return location
try:
return self._location_cache[location]
except KeyError:
for region in self.regions:
for r_location in region.locations:
if r_location.name == location:
self._location_cache[location] = r_location
return r_location
raise KeyError('No such location %s' % location)
def get_items(self):
return [loc.item for loc in self.get_filled_locations()] + self.itempool
# get a list of items that should stay in their proper dungeon
def get_restricted_dungeon_items(self):
itempool = []
if self.shuffle_mapcompass == 'dungeon':
itempool.extend([item for dungeon in self.dungeons for item in dungeon.dungeon_items])
if self.shuffle_smallkeys == 'dungeon':
itempool.extend([item for dungeon in self.dungeons for item in dungeon.small_keys])
if self.shuffle_bosskeys == 'dungeon':
itempool.extend([item for dungeon in self.dungeons for item in dungeon.boss_key])
for item in itempool:
item.world = self
return itempool
# get a list of items that don't have to be in their proper dungeon
def get_unrestricted_dungeon_items(self):
itempool = []
if self.shuffle_mapcompass == 'keysanity':
itempool.extend([item for dungeon in self.dungeons for item in dungeon.dungeon_items])
if self.shuffle_smallkeys == 'keysanity':
itempool.extend([item for dungeon in self.dungeons for item in dungeon.small_keys])
if self.shuffle_bosskeys == 'keysanity':
itempool.extend([item for dungeon in self.dungeons for item in dungeon.boss_key])
for item in itempool:
item.world = self
return itempool
def find_items(self, item):
return [location for location in self.get_locations() if location.item is not None and location.item.name == item]
def push_item(self, location, item):
if not isinstance(location, Location):
location = self.get_location(location)
# This check should never be false normally, but is here as a sanity check
if location.can_fill_fast(item):
location.item = item
item.location = location
item.price = location.price if location.price is not None else item.price
location.price = item.price
logging.getLogger('').debug('Placed %s [World %d] at %s [World %d]', item, item.world.id if hasattr(item, 'world') else -1, location, location.world.id if hasattr(location, 'world') else -1)
else:
raise RuntimeError('Cannot assign item %s to location %s.' % (item, location))
def get_locations(self):
if self._cached_locations is None:
self._cached_locations = []
for region in self.regions:
self._cached_locations.extend(region.locations)
return self._cached_locations
def get_unfilled_locations(self):
return [location for location in self.get_locations() if location.item is None]
def get_filled_locations(self):
return [location for location in self.get_locations() if location.item is not None]
def get_reachable_locations(self, state=None):
if state is None:
state = self.state
return [location for location in self.get_locations() if state.can_reach(location)]
def get_placeable_locations(self, state=None):
if state is None:
state = self.state
return [location for location in self.get_locations() if location.item is None and state.can_reach(location)]
def unlocks_new_location(self, item):
temp_state = self.state.copy()
temp_state.clear_cached_unreachable()
temp_state.collect(item)
for location in self.get_unfilled_locations():
if temp_state.can_reach(location) and not self.state.can_reach(location):
return True
return False
def has_beaten_game(self, state):
return state.has('Triforce')
# Useless areas are areas that have contain no items that could ever
# be used to complete the seed. Unfortunately this is very difficult
# to calculate since that involves trying every possible path and item
# set collected to know this. To simplify this we instead just get areas
# that don't have any items that could ever be required in any seed.
# We further cull this list with woth info. This is an overestimate of
# the true list of possible useless areas, but this will generate a
# reasonably sized list of areas that fit this property.
def update_useless_areas(self, spoiler):
areas = {}
# Link's Pocket and None are not real areas
excluded_areas = [None, "Link's Pocket"]
for location in self.get_locations():
# We exclude event and locked locations. This means that medallions
# and stones are not considered here. This is not really an accurate
# way of doing this, but it's the only way to allow dungeons to appear.
# So barren hints do not include these dungeon rewards.
if location.hint in excluded_areas or \
location.locked or \
location.item is None or \
location.item.type == "Event":
continue
area = location.hint
# Build the area list and their items
if area not in areas:
areas[area] = {
'locations': [],
}
areas[area]['locations'].append(location)
# Generate area list meta data
for area,area_info in areas.items():
# whether an area is a dungeon is calculated to prevent too many
# dungeon barren hints since they are quite powerful. The area
# names don't quite match the internal dungeon names so we need to
# check if any location in the area has a dungeon.
area_info['dungeon'] = False
for location in area_info['locations']:
if location.parent_region.dungeon is not None:
area_info['dungeon'] = True
break
# Weight the area's chance of being chosen based on its size.
# Small areas are more likely to barren, so we apply this weight
# to make all areas have a more uniform chance of being chosen
area_info['weight'] = len(area_info['locations'])
# these are items that can never be required but are still considered major items
exclude_item_list = [
'Double Defense',
'Ice Arrows',
'Serenade of Water',
'Prelude of Light',
'Biggoron Sword',
]
if self.damage_multiplier != 'ohko' and self.damage_multiplier != 'quadruple' and self.shuffle_scrubs == 'off':
# nayru's love may be required to prevent forced damage
exclude_item_list.append('Nayrus Love')
if self.hints != 'agony':
# Stone of Agony only required if it's used for hints
exclude_item_list.append('Stone of Agony')
# The idea here is that if an item shows up in woth, then the only way
# that another copy of that major item could ever be required is if it
# is a progressive item. Normally this applies to things like bows, bombs
# bombchus, bottles, slingshot, magic and ocarina. However if plentiful
# item pool is enabled this could be applied to any item.
duplicate_item_woth = {}
woth_loc = [location for world_woth in spoiler.required_locations.values() for location in world_woth]
for world in spoiler.worlds:
duplicate_item_woth[world.id] = {}
for location in woth_loc:
if not location.item.special.get('progressive', False):
# Progressive items may need multiple copies to make progression
# so we can't make this culling for those kinds of items.
duplicate_item_woth[location.item.world.id][location.item.name] = location
if 'Bottle' in location.item.name and \
location.item.name not in ['Bottle with Letter', 'Bottle with Big Poe']:
# Bottles can have many names but they are all generally the same in logic
# The problem is that Ruto's Letter and Big Poe might not be usuable as a
# Bottle immediately, so they might need to use a regular bottle in
# addition to that one. Conversely finding a bottle might mean you still
# need ruto's letter or big poe. So to work with this, we ignore those
# two special bottles as being bottles
duplicate_item_woth[location.item.world.id]['Bottle'] = location
# generate the empty area list
self.empty_areas = {}
for area,area_info in areas.items():
useless_area = True
for location in area_info['locations']:
if location.item.majoritem:
if (location.item.name in exclude_item_list):
continue
if 'Bottle' in location.item.name and location.item.name not in ['Bottle with Letter', 'Bottle with Big Poe']:
dupe_location = duplicate_item_woth[location.item.world.id].get('Bottle', location)
else:
dupe_location = duplicate_item_woth[location.item.world.id].get(location.item.name, location)
if (dupe_location.world.id != location.world.id or dupe_location.name != location.name):
continue
useless_area = False
break
if useless_area:
self.empty_areas[area] = area_info | z5-tracker | /z5_tracker-1.2.1-py3-none-any.whl/z5tracker/rulesets/rando_aa_v4/World.py | World.py |
import collections
import logging
from .Location import DisableType
def set_rules(world):
logger = logging.getLogger('')
# ganon can only carry triforce
world.get_location('Ganon').item_rule = lambda location, item: item.name == 'Triforce'
# these are default save&quit points and always accessible
world.get_region('Links House').can_reach = lambda state: True
for location in world.get_locations():
if not world.shuffle_song_items:
if location.type == 'Song':
if not world.start_with_fast_travel:
add_item_rule(location, lambda location, item: item.type == 'Song' and item.world.id == location.world.id)
else:
# allow junk items, but songs must still have matching world
add_item_rule(location, lambda location, item: item.type != 'Song' or (item.type == 'Song' and item.world.id == location.world.id))
else:
add_item_rule(location, lambda location, item: item.type != 'Song')
if location.type == 'Shop':
if location.name in world.shop_prices:
add_item_rule(location, lambda location, item: item.type != 'Shop')
location.price = world.shop_prices[location.name]
if location.price > 200:
set_rule(location, lambda state: state.has('Progressive Wallet', 2))
elif location.price > 99:
set_rule(location, lambda state: state.has('Progressive Wallet'))
else:
add_item_rule(location, lambda location, item: item.type == 'Shop' and item.world.id == location.world.id)
if location.parent_region.name in ['Castle Town Bombchu Shop', 'Castle Town Potion Shop', 'Castle Town Bazaar']:
if not world.check_beatable_only:
forbid_item(location, 'Buy Goron Tunic')
forbid_item(location, 'Buy Zora Tunic')
elif not 'Deku Scrub' in location.name:
add_item_rule(location, lambda location, item: item.type != 'Shop')
if location.name == 'Forest Temple MQ First Chest' and world.shuffle_bosskeys == 'dungeon' and world.shuffle_smallkeys == 'dungeon' and world.tokensanity == 'off':
# This location needs to be a small key. Make sure the boss key isn't placed here.
forbid_item(location, 'Boss Key (Forest Temple)')
for location in world.disabled_locations:
try:
world.get_location(location).disabled = DisableType.PENDING
except:
logger.debug('Tried to disable location that does not exist: %s' % location)
def set_rule(spot, rule):
spot.access_rule = rule
def add_rule(spot, rule, combine='and'):
old_rule = spot.access_rule
if combine == 'or':
spot.access_rule = lambda state: rule(state) or old_rule(state)
else:
spot.access_rule = lambda state: rule(state) and old_rule(state)
def add_item_rule(spot, rule, combine='and'):
old_rule = spot.item_rule
if combine == 'or':
spot.item_rule = lambda location, item: rule(location, item) or old_rule(location, item)
else:
spot.item_rule = lambda location, item: rule(location, item) and old_rule(location, item)
def forbid_item(location, item_name):
old_rule = location.item_rule
location.item_rule = lambda loc, item: item.name != item_name and old_rule(loc, item)
def item_in_locations(state, item, locations):
for location in locations:
if state.item_name(location) == item:
return True
return False
# This function should be ran once after the shop items are placed in the world.
# It should be ran before other items are placed in the world so that logic has
# the correct checks for them. This is save to do since every shop is still
# accessible when all items are obtained and every shop item is not.
# This function should also be called when a world is copied if the original world
# had called this function because the world.copy does not copy the rules
def set_shop_rules(world):
for location in world.get_filled_locations():
if location.item.type == 'Shop':
# Add wallet requirements
if location.item.name in ['Buy Arrows (50)', 'Buy Fish', 'Buy Goron Tunic', 'Buy Bombchu (20)', 'Buy Bombs (30)']:
add_rule(location, lambda state: state.has('Progressive Wallet'))
elif location.item.name in ['Buy Zora Tunic', 'Buy Blue Fire']:
add_rule(location, lambda state: state.has('Progressive Wallet', 2))
# Add adult only checks
if location.item.name in ['Buy Goron Tunic', 'Buy Zora Tunic']:
if location.parent_region.name == 'Goron Shop':
add_rule(
location,
lambda state: state.is_adult() and (state.has_explosives() or state.has('Progressive Strength Upgrade') or state.has_bow()))
elif location.parent_region.name == 'Zora Shop':
add_rule(location, lambda state: state.can_reach('Zoras Domain Frozen -> Zora Shop', 'Entrance'))
elif location.parent_region.name in ['Castle Town Bombchu Shop', 'Castle Town Potion Shop', 'Castle Town Bazaar']:
set_rule(location, lambda state: False)
else:
add_rule(location, lambda state: state.is_adult())
# Add item prerequisit checks
if location.item.name in ['Buy Blue Fire',
'Buy Blue Potion',
'Buy Bottle Bug',
'Buy Fish',
'Buy Green Potion',
'Buy Poe',
'Buy Red Potion [30]',
'Buy Red Potion [40]',
'Buy Red Potion [50]',
'Buy Fairy\'s Spirit']:
add_rule(location, lambda state: state.has_bottle())
if location.item.name in ['Buy Bombchu (10)', 'Buy Bombchu (20)', 'Buy Bombchu (5)']:
add_rule(location, lambda state: state.has_bombchus_item()) | z5-tracker | /z5_tracker-1.2.1-py3-none-any.whl/z5tracker/rulesets/rando_aa_v4/Rules.py | Rules.py |
import random
def link_entrances(world):
# setup mandatory connections
for exitname, regionname in mandatory_connections:
connect_simple(world, exitname, regionname)
if world.dungeon_mq['DT']:
for exitname, regionname in DT_MQ_connections:
connect_simple(world, exitname, regionname)
else:
for exitname, regionname in DT_vanilla_connections:
connect_simple(world, exitname, regionname)
if world.dungeon_mq['DC']:
for exitname, regionname in DC_MQ_connections:
connect_simple(world, exitname, regionname)
else:
for exitname, regionname in DC_vanilla_connections:
connect_simple(world, exitname, regionname)
if world.dungeon_mq['JB']:
for exitname, regionname in JB_MQ_connections:
connect_simple(world, exitname, regionname)
else:
for exitname, regionname in JB_vanilla_connections:
connect_simple(world, exitname, regionname)
if world.dungeon_mq['FoT']:
for exitname, regionname in FoT_MQ_connections:
connect_simple(world, exitname, regionname)
else:
for exitname, regionname in FoT_vanilla_connections:
connect_simple(world, exitname, regionname)
if world.dungeon_mq['FiT']:
for exitname, regionname in FiT_MQ_connections:
connect_simple(world, exitname, regionname)
else:
for exitname, regionname in FiT_vanilla_connections:
connect_simple(world, exitname, regionname)
if world.dungeon_mq['WT']:
for exitname, regionname in WT_MQ_connections:
connect_simple(world, exitname, regionname)
else:
for exitname, regionname in WT_vanilla_connections:
connect_simple(world, exitname, regionname)
if world.dungeon_mq['GTG']:
for exitname, regionname in GTG_MQ_connections:
connect_simple(world, exitname, regionname)
else:
for exitname, regionname in GTG_vanilla_connections:
connect_simple(world, exitname, regionname)
if world.dungeon_mq['SpT']:
for exitname, regionname in SpT_MQ_connections:
connect_simple(world, exitname, regionname)
else:
for exitname, regionname in SpT_vanilla_connections:
connect_simple(world, exitname, regionname)
if world.dungeon_mq['ShT']:
for exitname, regionname in ShT_MQ_connections:
connect_simple(world, exitname, regionname)
else:
for exitname, regionname in ShT_vanilla_connections:
connect_simple(world, exitname, regionname)
# if we do not shuffle, set default connections
if world.shuffle == 'vanilla':
for exitname, regionname in default_connections:
connect_simple(world, exitname, regionname)
for exitname, regionname in default_dungeon_connections:
connect_simple(world, exitname, regionname)
else:
raise NotImplementedError('Shuffling not supported yet')
def connect_simple(world, exitname, regionname):
world.get_entrance(exitname).connect(world.get_region(regionname))
def connect_entrance(world, entrancename, exitname):
entrance = world.get_entrance(entrancename)
# check if we got an entrance or a region to connect to
try:
region = world.get_region(exitname)
exit = None
except RuntimeError:
exit = world.get_entrance(exitname)
region = exit.parent_region
# if this was already connected somewhere, remove the backreference
if entrance.connected_region is not None:
entrance.connected_region.entrances.remove(entrance)
target = exit_ids[exit.name][0] if exit is not None else exit_ids.get(region.name, None)
addresses = door_addresses[entrance.name][0]
entrance.connect(region, addresses, target)
def connect_exit(world, exitname, entrancename):
entrance = world.get_entrance(entrancename)
exit = world.get_entrance(exitname)
# if this was already connected somewhere, remove the backreference
if exit.connected_region is not None:
exit.connected_region.entrances.remove(exit)
exit.connect(entrance.parent_region, door_addresses[entrance.name][1], exit_ids[exit.name][1])
def connect_random(world, exitlist, targetlist, two_way=False):
targetlist = list(targetlist)
random.shuffle(targetlist)
for exit, target in zip(exitlist, targetlist):
if two_way:
connect_two_way(world, exit, target)
else:
connect_entrance(world, exit, target)
def connect_doors(world, doors, targets):
"""This works inplace"""
random.shuffle(doors)
random.shuffle(targets)
while doors:
door = doors.pop()
target = targets.pop()
connect_entrance(world, door, target)
# these are connections that cannot be shuffled and always exist. They link together separate parts of the world we need to divide into regions
mandatory_connections = [('Adult Forest Warp Pad', 'Forest Temple Entry Area'),
('Child Forest Warp Pad', 'Sacred Forest Meadow'),
('Temple Warp Pad', 'Temple of Time'),
('Crater Warp Pad', 'Death Mountain Crater Central'),
('Lake Warp Pad', 'Lake Hylia'),
('Graveyard Warp Pad', 'Shadow Temple Warp Region'),
('Colossus Warp Pad', 'Desert Colossus'),
('Lost Woods', 'Lost Woods'),
('Lost Woods Front', 'Kokiri Forest'),
('Woods to Goron City', 'Goron City Woods Warp'),
('Goron City to Woods', 'Lost Woods'),
('Goron City from Woods', 'Goron City'),
('Goron City Bomb Wall', 'Goron City Woods Warp'),
('Lost Woods Dive Warp', 'Zora River Child'),
('Zora River Dive Warp', 'Lost Woods'),
('Meadow Entrance', 'Sacred Forest Meadow Entryway'),
('Meadow Exit', 'Lost Woods'),
('Meadow Gate', 'Sacred Forest Meadow'),
('Meadow Gate Exit', 'Sacred Forest Meadow Entryway'),
('Adult Meadow Access', 'Forest Temple Entry Area'),
('Adult Meadow Exit', 'Lost Woods'),
('Lost Woods Bridge', 'Lost Woods Bridge'),
('Kokiri Forest Entrance', 'Kokiri Forest'),
('Field to Forest', 'Lost Woods Bridge'),
('Forest Exit', 'Hyrule Field'),
('Field to Lake', 'Lake Hylia'),
('Lake Hylia Dive Warp', 'Zoras Domain'),
('Zoras Domain Dive Warp', 'Lake Hylia'),
('Lake Exit', 'Hyrule Field'),
('Field to Valley', 'Gerudo Valley'),
('Valley Exit', 'Hyrule Field'),
('Valley River', 'Lake Hylia'),
('Bridge Crossing', 'Gerudo Valley Far Side'),
('Fortress Entrance', 'Gerudo Fortress'),
('Haunted Wasteland Entrance', 'Haunted Wasteland'),
('Haunted Wasteland Crossing', 'Desert Colossus'),
('Field to Castle Town', 'Castle Town'),
('Castle Town Exit', 'Hyrule Field'),
('Hyrule Castle Grounds', 'Hyrule Castle Grounds'),
('Hyrule Castle Grounds Exit', 'Castle Town'),
('Hyrule Castle Garden', 'Hyrule Castle Garden'),
('Hyrule Castle Garden Exit', 'Hyrule Castle Grounds'),
('Ganons Castle Grounds', 'Ganons Castle Grounds'),
('Ganons Castle Grounds Exit', 'Castle Town'),
('Field to Kakariko', 'Kakariko Village'),
('Kakariko Exit', 'Hyrule Field'),
('Graveyard Entrance', 'Graveyard'),
('Graveyard Exit', 'Kakariko Village'),
('Drop to Graveyard', 'Graveyard'),
('Death Mountain Entrance', 'Death Mountain'),
('Death Mountain Exit', 'Kakariko Village'),
('Goron City Entrance', 'Goron City'),
('Goron City Exit', 'Death Mountain'),
('Darunias Chamber', 'Darunias Chamber'),
('Darunias Chamber Exit', 'Goron City'),
('Mountain Crater Entrance', 'Death Mountain Crater Upper'),
('Crater Exit', 'Death Mountain'),
('Crater Hover Boots', 'Death Mountain Crater Lower'),
('Crater Ascent', 'Death Mountain Crater Upper'),
('Crater Scarecrow', 'Death Mountain Crater Central'),
('Crater Bridge', 'Death Mountain Crater Central'),
('Crater Bridge Reverse', 'Death Mountain Crater Lower'),
('Crater to City', 'Goron City'),
('Crater Access', 'Death Mountain Crater Lower'),
('Dodongos Cavern Rocks', 'Dodongos Cavern Entryway'),
('Mountain Access from Behind Rock', 'Death Mountain'),
('Field to Zora River', 'Zora River Front'),
('Zora River Exit', 'Hyrule Field'),
('Zora River Rocks', 'Zora River Child'),
('Zora River Downstream', 'Zora River Front'),
('Zora River Child to Shared', 'Zora River Shared'),
('Zora River Adult to Shared', 'Zora River Shared'),
('Zora River Waterfall', 'Zoras Domain'),
('Zoras Domain Exit', 'Zora River Child'),
('Behind King Zora', 'Zoras Fountain'),
('Zoras Fountain Exit', 'Zoras Domain'),
('Zora River Adult', 'Zora River Adult'),
('Zoras Domain Adult Access', 'Zoras Domain Frozen'),
('Zoras Fountain Adult Access', 'Outside Ice Cavern'),
('Lon Lon Rance Entrance', 'Lon Lon Ranch'),
('Lon Lon Exit', 'Hyrule Field'),
('Ganons Castle Deku Scrubs', 'Ganons Castle Deku Scrubs'),
('Ganons Castle Forest Trial', 'Ganons Castle Forest Trial'),
('Ganons Castle Fire Trial', 'Ganons Castle Fire Trial'),
('Ganons Castle Water Trial', 'Ganons Castle Water Trial'),
('Ganons Castle Shadow Trial', 'Ganons Castle Shadow Trial'),
('Ganons Castle Spirit Trial', 'Ganons Castle Spirit Trial'),
('Ganons Castle Light Trial', 'Ganons Castle Light Trial'),
('Ganons Castle Tower', 'Ganons Castle Tower')
]
DT_vanilla_connections = [('Deku Tree Slingshot Passage', 'Deku Tree Slingshot Room'),
('Deku Tree Slingshot Exit', 'Deku Tree Lobby'),
('Deku Tree Basement Path', 'Deku Tree Boss Room'),
('Deku Tree Basement Vines', 'Deku Tree Lobby')
]
DT_MQ_connections = [('Deku Tree Compass Passage', 'Deku Tree Compass Room'),
('Deku Tree Compass Exit', 'Deku Tree Lobby'),
('Deku Tree Basement Path', 'Deku Tree Boss Room'),
('Deku Tree Basement Vines', 'Deku Tree Lobby')
]
DC_vanilla_connections = [('Dodongos Cavern Lobby', 'Dodongos Cavern Lobby'),
('Dodongos Cavern Retreat', 'Dodongos Cavern Beginning'),
('Dodongos Cavern Left Door', 'Dodongos Cavern Climb'),
('Dodongos Cavern Bridge Fall', 'Dodongos Cavern Lobby'),
('Dodongos Cavern Slingshot Target', 'Dodongos Cavern Far Bridge'),
('Dodongos Cavern Bridge Fall 2', 'Dodongos Cavern Lobby'),
('Dodongos Cavern Bomb Drop', 'Dodongos Cavern Boss Area'),
('Dodongos Cavern Exit Skull', 'Dodongos Cavern Lobby')
]
DC_MQ_connections = [('Dodongos Cavern Lobby', 'Dodongos Cavern Lobby'),
('Dodongos Cavern Bomb Drop', 'Dodongos Cavern Boss Area')
]
JB_vanilla_connections = [('Jabu Jabus Belly Ceiling Switch', 'Jabu Jabus Belly Main'),
('Jabu Jabus Belly Retreat', 'Jabu Jabus Belly Beginning'),
('Jabu Jabus Belly Tentacles', 'Jabu Jabus Belly Depths'),
('Jabu Jabus Belly Elevator', 'Jabu Jabus Belly Main'),
('Jabu Jabus Belly Octopus', 'Jabu Jabus Belly Boss Area'),
('Jabu Jabus Belly Final Backtrack', 'Jabu Jabus Belly Main')
]
JB_MQ_connections = [('Jabu Jabus Belly Cow Switch', 'Jabu Jabus Belly Main'),
('Jabu Jabus Belly Retreat', 'Jabu Jabus Belly Beginning'),
('Jabu Jabus Belly Tentacle Access', 'Jabu Jabus Belly Depths'),
('Jabu Jabus Belly Elevator', 'Jabu Jabus Belly Main'),
('Jabu Jabus Belly Octopus', 'Jabu Jabus Belly Boss Area'),
('Jabu Jabus Belly Final Backtrack', 'Jabu Jabus Belly Main')
]
FoT_vanilla_connections = [('Forest Temple Song of Time Block', 'Forest Temple NW Outdoors'),
('Forest Temple Lobby Eyeball Switch', 'Forest Temple NE Outdoors'),
('Forest Temple Lobby Locked Door', 'Forest Temple Block Push Room'),
('Forest Temple Through Map Room', 'Forest Temple NE Outdoors'),
('Forest Temple Well Connection', 'Forest Temple NW Outdoors'),
('Forest Temple Outside to Lobby', 'Forest Temple Lobby'),
('Forest Temple Scarecrows Song', 'Forest Temple Falling Room'),
('Forest Temple Falling Room Exit', 'Forest Temple NE Outdoors'),
('Forest Temple Elevator', 'Forest Temple Boss Region'),
('Forest Temple Outside Backdoor', 'Forest Temple Outside Upper Ledge'),
('Forest Temple Twisted Hall', 'Forest Temple Bow Region'),
('Forest Temple Straightened Hall', 'Forest Temple Straightened Hall'),
('Forest Temple Boss Key Chest Drop', 'Forest Temple Outside Upper Ledge'),
('Forest Temple Outside Ledge Drop', 'Forest Temple NW Outdoors'),
('Forest Temple Drop to Falling Room', 'Forest Temple Falling Room')
]
FoT_MQ_connections = [('Forest Temple Lobby Locked Door', 'Forest Temple Central Area'),
('Forest Temple West Eye Switch', 'Forest Temple NW Outdoors'),
('Forest Temple East Eye Switch', 'Forest Temple NE Outdoors'),
('Forest Temple Block Puzzle Solve', 'Forest Temple After Block Puzzle'),
('Forest Temple Crystal Switch Jump', 'Forest Temple Outdoor Ledge'),
('Forest Temple Drop to NW Outdoors', 'Forest Temple NW Outdoors'),
('Forest Temple Well Connection', 'Forest Temple NE Outdoors'),
('Forest Temple Webs', 'Forest Temple Outdoors Top Ledges'),
('Forest Temple Climb to Top Ledges', 'Forest Temple Outdoors Top Ledges'),
('Forest Temple Longshot to NE Outdoors Ledge', 'Forest Temple NE Outdoors Ledge'),
('Forest Temple Top Drop to NE Outdoors', 'Forest Temple NE Outdoors'),
('Forest Temple Drop to NE Outdoors', 'Forest Temple NE Outdoors'),
('Forest Temple Song of Time Block Climb', 'Forest Temple Falling Room'),
('Forest Temple Twisted Hall', 'Forest Temple Bow Region'),
('Forest Temple Drop to Falling Room', 'Forest Temple Falling Room'),
('Forest Temple Falling Room Exit', 'Forest Temple NE Outdoors Ledge'),
('Forest Temple Elevator', 'Forest Temple Boss Region')
]
FiT_vanilla_connections = [('Fire Temple Early Climb', 'Fire Temple Middle'),
('Fire Temple Fire Maze Escape', 'Fire Temple Upper')
]
FiT_MQ_connections = [('Fire Temple Boss Door', 'Fire Boss Room'),
('Fire Temple Lower Locked Door', 'Fire Lower Locked Door'),
('Fire Temple Hammer Statue', 'Fire Big Lava Room'),
('Fire Temple Early Climb', 'Fire Lower Maze'),
('Fire Temple Maze Climb', 'Fire Upper Maze'),
('Fire Temple Maze Escape', 'Fire Temple Upper')
]
WT_vanilla_connections = [('Water Temple Central Pillar', 'Water Temple Middle Water Level'),
('Water Temple Upper Locked Door', 'Water Temple Dark Link Region')
]
WT_MQ_connections = [('Water Temple Water Level Switch', 'Water Temple Lowered Water Levels'),
('Water Temple Locked Door', 'Water Temple Dark Link Region'),
('Water Temple Basement Gates Switch', 'Water Temple Basement Gated Areas')
]
GTG_vanilla_connections = [('Gerudo Training Ground Left Silver Rupees', 'Gerudo Training Grounds Heavy Block Room'),
('Gerudo Training Ground Beamos', 'Gerudo Training Grounds Lava Room'),
('Gerudo Training Ground Central Door', 'Gerudo Training Grounds Central Maze'),
('Gerudo Training Grounds Right Locked Doors', 'Gerudo Training Grounds Central Maze Right'),
('Gerudo Training Grounds Maze Exit', 'Gerudo Training Grounds Lava Room'),
('Gerudo Training Grounds Maze Ledge', 'Gerudo Training Grounds Central Maze Right'),
('Gerudo Training Grounds Right Hookshot Target', 'Gerudo Training Grounds Hammer Room'),
('Gerudo Training Grounds Hammer Target', 'Gerudo Training Grounds Eye Statue Lower'),
('Gerudo Training Grounds Hammer Room Clear', 'Gerudo Training Grounds Lava Room'),
('Gerudo Training Grounds Eye Statue Exit', 'Gerudo Training Grounds Hammer Room'),
('Gerudo Training Grounds Eye Statue Drop', 'Gerudo Training Grounds Eye Statue Lower'),
('Gerudo Training Grounds Hidden Hookshot Target', 'Gerudo Training Grounds Eye Statue Upper')
]
GTG_MQ_connections = [('Gerudo Training Grounds Left Door', 'Gerudo Training Grounds Left Side'),
('Gerudo Training Grounds Right Door', 'Gerudo Training Grounds Right Side'),
('Gerudo Training Grounds Longshot Target', 'Gerudo Training Grounds Stalfos Room'),
('Gerudo Training Grounds Song of Time Block', 'Gerudo Training Grounds Back Areas'),
('Gerudo Training Grounds Rusted Switch', 'Gerudo Training Grounds Central Maze Right'),
('Gerudo Training Grounds Loop Around', 'Gerudo Training Grounds Right Side')
]
SpT_vanilla_connections = [('Spirit Temple Crawl Passage', 'Child Spirit Temple'),
('Spirit Temple Silver Block', 'Early Adult Spirit Temple'),
('Child Spirit Temple Climb', 'Child Spirit Temple Climb'),
('Child Spirit Temple Passthrough', 'Spirit Temple Central Chamber'),
('Adult Spirit Temple Passthrough', 'Spirit Temple Central Chamber'),
('Spirit Temple Middle Child Door', 'Child Spirit Temple Climb'),
('Spirit Temple to Hands', 'Spirit Temple Outdoor Hands'),
('Spirit Temple Central Locked Door', 'Spirit Temple Beyond Central Locked Door'),
('Spirit Temple Final Locked Door', 'Spirit Temple Beyond Final Locked Door'),
]
SpT_MQ_connections = [('Spirit Temple Crawl Passage', 'Child Spirit Temple'),
('Spirit Temple Ceiling Passage', 'Adult Spirit Temple'),
('Child Spirit Temple to Shared', 'Spirit Temple Shared'),
('Adult Spirit Temple to Shared', 'Spirit Temple Shared'),
('Adult Spirit Temple Descent', 'Lower Adult Spirit Temple'),
('Spirit Temple Climbable Wall', 'Spirit Temple Boss Area'),
('Mirror Shield Exit', 'Mirror Shield Hand'),
('Silver Gauntlets Exit', 'Silver Gauntlets Hand')
]
ShT_vanilla_connections = [('Shadow Temple First Pit', 'Shadow Temple First Beamos'),
('Shadow Temple Bomb Wall', 'Shadow Temple Huge Pit'),
('Shadow Temple Hookshot Target', 'Shadow Temple Wind Tunnel'),
('Shadow Temple Boat', 'Shadow Temple Beyond Boat')
]
ShT_MQ_connections = [('Shadow Temple First Pit', 'Shadow Temple First Beamos'),
('Shadow Temple Beginning Locked Door', 'Shadow Temple Dead Hand Area'),
('Shadow Temple Bomb Wall', 'Shadow Temple Huge Pit'),
('Shadow Temple Hookshot Target', 'Shadow Temple Wind Tunnel'),
('Shadow Temple Boat', 'Shadow Temple Beyond Boat'),
('Shadow Temple Longshot Target', 'Shadow Temple Invisible Maze')
]
# non-shuffled entrance links
default_connections = [('Links House Exit', 'Kokiri Forest'),
('Links House', 'Links House'),
('Mido House Exit', 'Kokiri Forest'),
('Mido House', 'Mido House'),
('Saria House Exit', 'Kokiri Forest'),
('Saria House', 'Saria House'),
('House of Twins Exit', 'Kokiri Forest'),
('House of Twins', 'House of Twins'),
('Know It All House Exit', 'Kokiri Forest'),
('Know It All House', 'Know It All House'),
('Kokiri Shop Exit', 'Kokiri Forest'),
('Kokiri Shop', 'Kokiri Shop'),
('Lake Hylia Lab', 'Lake Hylia Lab'),
('Fishing Hole', 'Fishing Hole'),
('Colossus Fairy', 'Colossus Fairy'),
('Temple of Time', 'Temple of Time'),
('Temple of Time Exit', 'Castle Town'),
('Door of Time', 'Beyond Door of Time'),
('Emerge as Adult', 'Temple of Time'),
('Hyrule Castle Fairy', 'Hyrule Castle Fairy'),
('Ganons Castle Fairy', 'Ganons Castle Fairy'),
('Castle Town Rupee Room', 'Castle Town Rupee Room'),
('Castle Town Bazaar', 'Castle Town Bazaar'),
('Castle Town Mask Shop', 'Castle Town Mask Shop'),
('Castle Town Shooting Gallery', 'Castle Town Shooting Gallery'),
('Castle Town Bombchu Bowling', 'Castle Town Bombchu Bowling'),
('Castle Town Potion Shop', 'Castle Town Potion Shop'),
('Castle Town Treasure Chest Game', 'Castle Town Treasure Chest Game'),
('Castle Town Bombchu Shop', 'Castle Town Bombchu Shop'),
('Castle Town Dog Lady', 'Castle Town Dog Lady'),
('Castle Town Man in Green House', 'Castle Town Man in Green House'),
('Carpenter Boss House', 'Carpenter Boss House'),
('House of Skulltula', 'House of Skulltula'),
('Impas House', 'Impas House'),
('Impas House Back', 'Impas House Back'),
('Windmill', 'Windmill'),
('Kakariko Bazaar', 'Kakariko Bazaar'),
('Kakariko Shooting Gallery', 'Kakariko Shooting Gallery'),
('Kakariko Potion Shop Front', 'Kakariko Potion Shop Front'),
('Kakariko Potion Shop Back', 'Kakariko Potion Shop Back'),
('Odd Medicine Building', 'Odd Medicine Building'),
('Shield Grave', 'Shield Grave'),
('Heart Piece Grave', 'Heart Piece Grave'),
('Composer Grave', 'Composer Grave'),
('Dampes Grave', 'Dampes Grave'),
('Crater Fairy', 'Crater Fairy'),
('Mountain Summit Fairy', 'Mountain Summit Fairy'),
('Dampes House', 'Dampes House'),
('Talon House', 'Talon House'),
('Ingo Barn', 'Ingo Barn'),
('Lon Lon Corner Tower', 'Lon Lon Corner Tower'),
('Zora Shop Child Access', 'Zora Shop'),
('Goron Shop', 'Goron Shop'),
('Zoras Fountain Fairy', 'Zoras Fountain Fairy'),
('Kokiri Forest Storms Grotto', 'Kokiri Forest Storms Grotto'),
('Lost Woods Generic Grotto', 'Lost Woods Generic Grotto'),
('Deku Theater', 'Deku Theater'),
('Lost Woods Sales Grotto', 'Lost Woods Sales Grotto'),
('Meadow Fairy Grotto', 'Meadow Fairy Grotto'),
('Front of Meadow Grotto', 'Front of Meadow Grotto'),
('Lon Lon Grotto', 'Lon Lon Grotto'),
('Remote Southern Grotto', 'Remote Southern Grotto'),
('Field Near Lake Outside Fence Grotto', 'Field Near Lake Outside Fence Grotto'),
('Field Near Lake Inside Fence Grotto', 'Field Near Lake Inside Fence Grotto'),
('Field Valley Grotto', 'Field Valley Grotto'),
('Field West Castle Town Grotto', 'Field West Castle Town Grotto'),
('Field Far West Castle Town Grotto', 'Field Far West Castle Town Grotto'),
('Field Kakariko Grotto', 'Field Kakariko Grotto'),
('Kakariko Bombable Grotto', 'Kakariko Bombable Grotto'),
('Kakariko Back Grotto', 'Kakariko Back Grotto'),
('Mountain Bombable Grotto', 'Mountain Bombable Grotto'),
('Mountain Storms Grotto', 'Mountain Storms Grotto'),
('Top of Crater Grotto', 'Top of Crater Grotto'),
('Field North Lon Lon Grotto', 'Field North Lon Lon Grotto'),
('Castle Storms Grotto', 'Castle Storms Grotto'),
('Zora River Plateau Open Grotto', 'Zora River Plateau Open Grotto'),
('Zora River Plateau Bombable Grotto', 'Zora River Plateau Bombable Grotto'),
('Lake Hylia Grotto', 'Lake Hylia Grotto'),
('Meadow Storms Grotto Child Access', 'Meadow Storms Grotto'),
('Meadow Storms Grotto Adult Access', 'Meadow Storms Grotto'),
('Gerudo Valley Storms Grotto','Gerudo Valley Storms Grotto'),
('Desert Colossus Grotto','Desert Colossus Grotto'),
('Goron City Grotto', 'Goron City Grotto'),
('DMC Hammer Grotto', 'DMC Hammer Grotto'),
('Zora River Storms Grotto', 'Zora River Storms Grotto'),
('Zora Shop Adult Access', 'Zora Shop'),
]
# non shuffled dungeons
default_dungeon_connections = [('Deku Tree', 'Deku Tree Lobby'),
('Deku Tree Exit', 'Kokiri Forest'),
('Dodongos Cavern', 'Dodongos Cavern Beginning'),
('Dodongos Cavern Exit', 'Dodongos Cavern Entryway'),
('Jabu Jabus Belly', 'Jabu Jabus Belly Beginning'),
('Jabu Jabus Belly Exit', 'Zoras Fountain'),
('Forest Temple Entrance', 'Forest Temple Lobby'),
('Forest Temple Exit', 'Forest Temple Entry Area'),
('Bottom of the Well', 'Bottom of the Well'),
('Bottom of the Well Exit', 'Kakariko Village'),
('Fire Temple Entrance', 'Fire Temple Lower'),
('Fire Temple Exit', 'Death Mountain Crater Central'),
('Ice Cavern Entrance', 'Ice Cavern'),
('Ice Cavern Exit', 'Outside Ice Cavern'),
('Water Temple Entrance', 'Water Temple Lobby'),
('Water Temple Exit', 'Lake Hylia'),
('Shadow Temple Entrance', 'Shadow Temple Beginning'),
('Shadow Temple Exit', 'Shadow Temple Warp Region'),
('Gerudo Training Grounds Entrance', 'Gerudo Training Grounds Lobby'),
('Gerudo Training Grounds Exit', 'Gerudo Fortress'),
('Spirit Temple Entrance', 'Spirit Temple Lobby'),
('Spirit Temple Exit', 'Desert Colossus'),
('Rainbow Bridge', 'Ganons Castle Lobby'),
('Ganons Castle Exit', 'Ganons Castle Grounds')
] | z5-tracker | /z5_tracker-1.2.1-py3-none-any.whl/z5tracker/rulesets/rando_aa_v4/EntranceShuffle.py | EntranceShuffle.py |
from .LocationList import location_table
from enum import Enum
class Location(object):
def __init__(self, name='', address=None, address2=None, default=None, type='Chest', scene=None, hint='Termina', parent=None):
self.name = name
self.parent_region = parent
self.item = None
self.address = address
self.address2 = address2
self.default = default
self.type = type
self.scene = scene
self.hint = hint
self.spot_type = 'Location'
self.recursion_count = 0
self.staleness_count = 0
self.access_rule = lambda state: True
self.item_rule = lambda location, item: True
self.locked = False
self.price = None
self.minor_only = False
self.world = None
self.disabled = DisableType.ENABLED
def copy(self, new_region):
new_location = Location(self.name, self.address, self.address2, self.default, self.type, self.scene, self.hint, new_region)
new_location.world = new_region.world
if self.item:
new_location.item = self.item.copy(new_region.world)
new_location.item.location = new_location
new_location.spot_type = self.spot_type
new_location.access_rule = self.access_rule
new_location.item_rule = self.item_rule
new_location.locked = self.locked
new_location.minor_only = self.minor_only
new_location.disabled = self.disabled
return new_location
def can_fill(self, state, item, check_access=True):
if self.minor_only and item.majoritem:
return False
return (
not self.is_disabled() and
self.can_fill_fast(item) and
(not check_access or state.can_reach(self)))
def can_fill_fast(self, item):
return (self.parent_region.can_fill(item) and self.item_rule(self, item))
def can_reach(self, state):
if not self.is_disabled() and \
self.access_rule(state) and \
state.can_reach(self.parent_region):
return True
return False
def is_disabled(self):
return (self.disabled == DisableType.DISABLED) or \
(self.disabled == DisableType.PENDING and self.locked)
def __str__(self):
return str(self.__unicode__())
def __unicode__(self):
return '%s' % self.name
def LocationFactory(locations, world=None):
ret = []
singleton = False
if isinstance(locations, str):
locations = [locations]
singleton = True
for location in locations:
if location in location_table:
type, scene, default, hint, addresses = location_table[location]
if addresses is None:
addresses = (None, None)
address, address2 = addresses
ret.append(Location(location, address, address2, default, type, scene, hint, ret))
else:
raise KeyError('Unknown Location: %s', location)
if singleton:
return ret[0]
return ret
class DisableType(Enum):
ENABLED = 0
PENDING = 1
DISABLED = 2 | z5-tracker | /z5_tracker-1.2.1-py3-none-any.whl/z5tracker/rulesets/rando_aa_v4/Location.py | Location.py |
import random
class Hint(object):
name = ""
text = ""
type = ""
def __init__(self, name, text, type, choice=None):
self.name = name
self.type = type
if isinstance(text, str):
self.text = text
else:
if choice == None:
self.text = random.choice(text)
else:
self.text = text[choice]
def getHint(name, clearer_hint=False):
textOptions, clearText, type = hintTable[name]
if clearer_hint:
if clearText == None:
return Hint(name, textOptions, type, 0)
return Hint(name, clearText, type)
else:
return Hint(name, textOptions, type)
def getHintGroup(group, world):
ret = []
for name in hintTable:
hint = getHint(name, world.clearer_hints)
# 10 Big Poes does not require hint if 3 or less required.
if name == '10 Big Poes' and world.big_poe_count <= 3:
hint.type = 'location'
if hint.type == group and not (name in hintExclusions(world)):
ret.append(hint)
return ret
#table of hints, format is (name, hint text, clear hint text, type of hint) there are special characters that are read for certain in game commands:
# ^ is a box break
# & is a new line
# @ will print the player name
# # sets color to white (currently only used for dungeon reward hints).
hintTable = {
'Magic Meter': (["mystic training", "pixie dust", "a green rectangle"], "a Magic Meter", 'item'),
'Double Defense': (["a white outline", "damage decrease", "strengthened love"], "Double Defense", 'item'),
'Slingshot': (["a seed shooter", "a rubberband", "a child's catapult"], "a Slingshot", 'item'),
'Boomerang': (["a banana", "a stun stick"], "the Boomerang", 'item'),
'Bow': (["an archery enabler", "a danger dart launcher"], "a Bow", 'item'),
'Bomb Bag': (["an explosive container", "a blast bag"], "a Bomb Bag", 'item'),
'Progressive Hookshot': (["Dampe's keepsake", "the Grapple Beam", "the BOING! chain"], "a Hookshot", 'item'),
'Progressive Strength Upgrade': (["power gloves", "metal mittens", "the heavy lifty"], "a Strength Upgrade", 'item'),
'Progressive Scale': (["a deeper dive", "a piece of Zora"], "a Zora Scale", 'item'),
'Hammer': (["the dragon smasher", "the metal mallet", "the heavy hitter"], "the Megaton Hammer", 'item'),
'Iron Boots': (["sink shoes", "clank cleats"], "the Iron Boots", 'item'),
'Hover Boots': (["butter boots", "sacred slippers", "spacewalkers"], "the Hover Boots", 'item'),
'Kokiri Sword': (["a butter knife", "a starter slasher", "a switchblade"], "the Kokiri Sword", 'item'),
'Biggoron Sword': (["the biggest blade", "a colossal cleaver"], "the Biggoron Sword", 'item'),
'Master Sword': (["evil's bane"], "the Master Sword", 'item'),
'Deku Shield': (["a wooden ward", "a burnable barrier"], "a Deku Shield", 'item'),
'Hylian Shield': (["a steel safeguard", "Like Like's metal meal"], "a Hylian Shield", 'item'),
'Mirror Shield': (["the reflective rampart", "Medusa's weakness", "a silvered surface"], "the Mirror Shield", 'item'),
'Farores Wind': (["teleportation", "a relocation rune", "a green ball", "a green gust"], "Farore's Wind", 'item'),
'Nayrus Love': (["a safe space", "an impregnable aura", "a blue barrier", "a blue crystal"], "Nayru's Love", 'item'),
'Dins Fire': (["an inferno", "a heat wave", "a red ball"], "Din's Fire", 'item'),
'Fire Arrows': (["the furnace firearm", "the burning bolts", "a magma missile"], "the Fire Arrows", 'item'),
'Ice Arrows': (["the refrigerator rocket", "the frostbite bolts", "an iceberg maker"], "the Ice Arrows", 'item'),
'Light Arrows': (["the shining shot", "the luminous launcher", "Ganondorf's bane", "the lighting bolts"], "the Light Arrows", 'item'),
'Lens of Truth': (["a lie detector", "a ghost tracker", "true sight", "a detective's tool"], "the Lens of Truth", 'item'),
'Ocarina': (["a flute", "a music maker"], "an Ocarina", 'item'),
'Goron Tunic': (["ruby robes", "fireproof fabric", "cooking clothes"], "a Goron Tunic", 'item'),
'Zora Tunic': (["a sapphire suit", "scuba gear", "a swimsuit"], "a Zora Tunic", 'item'),
'Epona': (["a horse", "a four legged friend"], "Epona", 'item'),
'Zeldas Lullaby': (["a song of royal slumber", "a triforce tune"], "Zelda's Lullaby", 'item'),
'Eponas Song': (["an equestrian etude", "Malon's melody", "a ranch song"], "Epona's Song", 'item'),
'Sarias Song': (["a song of dancing Gorons", "Saria's phone number"], "Saria's Song", 'item'),
'Suns Song': (["Sunny Day", "the ReDead's bane", "the Gibdo's bane"], "the Sun's Song", 'item'),
'Song of Time': (["a song 7 years long", "the tune of ages"], "the Song of Time", 'item'),
'Song of Storms': (["Rain Dance", "a thunderstorm tune", "windmill acceleration"], "the Song of Storms", 'item'),
'Minuet of Forest': (["the song of tall trees", "an arboreal anthem", "a green spark trail"], "the Minuet of Forest", 'item'),
'Bolero of Fire': (["a song of lethal lava", "a red spark trail", "a volcanic verse"], "the Bolero of Fire", 'item'),
'Serenade of Water': (["a song of a damp ditch", "a blue spark trail", "the lake's lyric"], "the Serenade of Water", 'item'),
'Requiem of Spirit': (["a song of sandy statues", "an orange spark trail", "the desert ditty"], "the Requiem of Spirit", 'item'),
'Nocturne of Shadow': (["a song of spooky spirits", "a graveyard boogie", "a haunted hymn", "a purple spark trail"], "the Nocturne of Shadow", 'item'),
'Prelude of Light': (["a luminous prologue melody", "a yellow spark trail", "the temple traveler"], "the Prelude of Light", 'item'),
'Bottle': (["a glass container", "an empty jar", "encased air"], "a Bottle", 'item'),
'Bottle with Letter': (["a call for help", "the note that Mweeps", "an SOS call", "a fishy stationery"], "Ruto's Letter", 'item'),
'Bottle with Milk': (["cow juice", "a white liquid", "a baby's breakfast"], "a Milk Bottle", 'item'),
'Bottle with Red Potion': (["a vitality vial", "a red liquid"], "a Red Potion Bottle", 'item'),
'Bottle with Green Potion': (["a magic mixture", "a green liquid"], "a Green Potion Bottle", 'item'),
'Bottle with Blue Potion': (["an ailment antidote", "a blue liquid"], "a Blue Potion Bottle", 'item'),
'Bottle with Fairy': (["an imprisoned fairy", "an extra life", "Navi's cousin"], "a Fairy Bottle", 'item'),
'Bottle with Fish': (["an aquarium", "a deity's snack"], "a Fish Bottle", 'item'),
'Bottle with Blue Fire': (["a conflagration canteen", "an icemelt jar"], "a Blue Fire Bottle", 'item'),
'Bottle with Bugs': (["an insectarium", "Skulltula finders"], "a Bug Bottle", 'item'),
'Bottle with Poe': (["a spooky ghost", "a face in the jar"], "a Poe Bottle", 'item'),
'Bottle with Big Poe': (["the spookiest ghost", "a sidequest spirit"], "a Big Poe Bottle", 'item'),
'Stone of Agony': (["the shake stone", "the Rumble Pak (TM)"], "the Stone of Agony", 'item'),
'Gerudo Membership Card': (["a girl club membership", "a desert tribe's pass"], "the Gerudo Card", 'item'),
'Progressive Wallet': (["a mo' money holder", "a gem purse", "a portable bank"], "a Wallet", 'item'),
'Deku Stick Capacity': (["a lumber rack", "more flammable twigs"], "Deku Stick Capacity", 'item'),
'Deku Nut Capacity': (["more nuts", "flashbang storage"], "Deku Nut Capacity", 'item'),
'Heart Container': (["a lot of love", "a Valentine's gift", "a boss's organ"], "a Heart Container", 'item'),
'Piece of Heart': (["a little love", "a broken heart"], "a Piece of Heart", 'item'),
'Piece of Heart (Treasure Chest Game)': ("a victory valentine", "a Piece of Heart", 'item'),
'Recovery Heart': (["a free heal", "a hearty meal", "a Band-Aid"], "a Recovery Heart", 'item'),
'Rupee (Treasure Chest Game)': ("the dollar of defeat", 'a Green Rupee', 'item'),
'Deku Stick (1)': ("a breakable branch", 'a Deku Stick', 'item'),
'Rupee (1)': (["a unique coin", "a penny", "a green gem"], "a Green Rupee", 'item'),
'Rupees (5)': (["a common coin", "a blue gem"], "a Blue Rupee", 'item'),
'Rupees (20)': (["couch cash", "a red gem"], "a Red Rupee", 'item'),
'Rupees (50)': (["big bucks", "a purple gem", "wealth"], "a Purple Rupee", 'item'),
'Rupees (200)': (["a juicy jackpot", "a yellow gem", "a giant gem", "great wealth"], "a Huge Rupee", 'item'),
'Weird Egg': (["a chicken dilemma"], "the Weird Egg", 'item'),
'Zeldas Letter': (["an autograph", "royal stationery", "royal snail mail"], "Zelda's Letter", 'item'),
'Pocket Egg': (["a Cucco container", "a Cucco, eventually", "a fowl youth"], "the Pocket Egg", 'item'),
'Pocket Cucco': (["a little clucker"], "the Pocket Cucco", 'item'),
'Cojiro': (["a cerulean capon"], "Cojiro", 'item'),
'Odd Mushroom': (["a powder ingredient"], "an Odd Mushroom", 'item'),
'Odd Potion': (["Granny's goodies"], "an Odd Potion", 'item'),
'Poachers Saw': (["a tree killer"], "the Poacher's Saw", 'item'),
'Broken Sword': (["a shattered slicer"], "the Broken Sword", 'item'),
'Prescription': (["a pill pamphlet", "a doctor's note"], "the Prescription", 'item'),
'Eyeball Frog': (["a perceiving polliwog"], "the Eyeball Frog", 'item'),
'Eyedrops': (["a vision vial"], "the Eyedrops", 'item'),
'Claim Check': (["a three day wait"], "the Claim Check", 'item'),
'Map': (["a dungeon atlas", "blueprints"], "a Map", 'item'),
'Compass': (["a treasure tracker", "a magnetic needle"], "a Compass", 'item'),
'BossKey': (["a master of unlocking", "a dungeon's master pass"], "a Boss Key", 'item'),
'SmallKey': (["a tool for unlocking", "a dungeon pass", "a lock remover", "a lockpick"], "a Small Key", 'item'),
'FortressSmallKey': (["a get out of jail free card"], "a Jail Key", 'item'),
'KeyError': (["something mysterious", "an unknown treasure"], "An Error (Please Report This)", 'item'),
'Arrows (5)': (["a few danger darts", "a few sharp shafts"], "Arrows (5 pieces)", 'item'),
'Arrows (10)': (["some danger darts", "some sharp shafts"], "Arrows (10 pieces)", 'item'),
'Arrows (30)': (["plenty of danger darts", "plenty of sharp shafts"], "Arrows (30 pieces)", 'item'),
'Bombs (5)': (["a few explosives", "a few blast balls"], "Bombs (5 pieces)", 'item'),
'Bombs (10)': (["some explosives", "some blast balls"], "Bombs (10 pieces)", 'item'),
'Bombs (20)': (["lots-o-explosives", "plenty of blast balls"], "Bombs (20 pieces)", 'item'),
'Ice Trap': (["a gift from Ganon", "a chilling discovery", "frosty fun"], "an Ice Trap", 'item'),
'Magic Bean': (["wizardly legumes"], "a Magic Bean", 'item'),
'Bombchus': (["mice bombs", "proximity mice", "wall crawlers", "trail blazers"], "Bombchus", 'item'),
'Bombchus (5)': (["a few mice bombs", "a few proximity mice", "a few wall crawlers", "a few trail blazers"], "Bombchus (5 pieces)", 'item'),
'Bombchus (10)': (["some mice bombs", "some proximity mice", "some wall crawlers", "some trail blazers"], "Bombchus (10 pieces)", 'item'),
'Bombchus (20)': (["plenty of mice bombs", "plenty of proximity mice", "plenty of wall crawlers", "plenty of trail blazers"], "Bombchus (20 pieces)", 'item'),
'Deku Nuts (5)': (["some nuts", "some flashbangs", "some scrub spit"], "Deku Nuts (5 pieces)", 'item'),
'Deku Nuts (10)': (["lots-o-nuts", "plenty of flashbangs", "plenty of scrub spit"], "Deku Nuts (10 pieces)", 'item'),
'Deku Seeds (30)': (["catapult ammo", "lots-o-seeds"], "Deku Seeds (30 pieces)", 'item'),
'Gold Skulltula Token': (["proof of destruction", "an arachnid chip", "spider remains", "one percent of a curse"], "a Gold Skulltula Token", 'item'),
'10 Big Poes': (["#Big Poes# leads to", "#ghost hunters# will be rewarded with"], None, 'alwaysLocation'),
'Deku Theater Skull Mask': ("the #Skull Mask# yields", None, 'location'),
'Deku Theater Mask of Truth': ("the #Mask of Truth# yields", None, 'alwaysLocation'),
'20 Gold Skulltula Reward': ("slaying #20 Gold Skulltulas# reveals", None, 'location'),
'30 Gold Skulltula Reward': ("slaying #30 Gold Skulltulas# reveals", None, 'alwaysLocation'),
'40 Gold Skulltula Reward': ("slaying #40 Gold Skulltulas# reveals", None, 'alwaysLocation'),
'50 Gold Skulltula Reward': ("slaying #50 Gold Skulltulas# reveals", None, 'alwaysLocation'),
'Ocarina of Time': ("the #treasure thrown by Princess Zelda# is", None, 'alwaysLocation'),
'Song from Ocarina of Time': ("the #Ocarina of Time# teaches", None, 'alwaysLocation'),
'Biggoron': ("#Biggoron# crafts", None, 'alwaysLocation'),
'Frog Ocarina Game': (["an #amphibian feast# yields", "the #croaking choir's magnum opus# awards", "the #froggy finale# yields"], "the final reward from the #Frogs of Zora's River# is", 'alwaysLocation'),
'Child Fishing': ("#fishing in youth# bestows", None, 'location'),
'Adult Fishing': ("#fishing in maturity# bestows", None, 'location'),
'Treasure Chest Game': (["#gambling# grants", "there is a #1/32 chance# to win"], "the #treasure chest game# grants", 'location'),
'Darunias Joy': ("#Darunia's dance# leads to", None, 'location'),
'Horseback Archery 1500 Points': ("mastery of #horseback archery# grants", "scoring 1500 in #horseback archery# grants", 'location'),
'Lake Hylia Sun': ("staring into #the sun# grants", "shooting #the sun# grants", 'location'),
'Heart Piece Grave Chest': ("playing #Sun's Song# in a grave spawns", None, 'location'),
'Goron City Leftmost Maze Chest': ("in #Goron City# the hammer unlocks", None, 'location'),
'GS Hyrule Castle Grotto': ("a #storm near the castle# reveals", None, 'location'),
'GS Hyrule Field Near Gerudo Valley': ("buried near #the valley# a spider holds", None, 'location'),
'GS Zora\'s Fountain Hidden Cave': ("a spider high above the #icy waters# holds", None, 'location'),
'Forest Temple Floormaster Chest': ("deep in #the forest#, shadows guard a chest containing", "a Floormaster in #Forest Temple# guards", 'location'),
'Fire Temple Scarecrow Chest': ("high in the #Fire Temple#, Pierre hid", None, 'location'),
'Fire Temple Megaton Hammer Chest': ("high in the #Fire Temple#, Flare Dancers hid", None, 'location'),
'Fire Temple MQ West Tower Top Chest': ("high in the #Fire Temple#, Flare Dancers hid", None, 'location'),
'Water Temple River Chest': ("deep under #the lake#, beyond the currents, hides", "the #Water Temple River Chest# holds", 'location'),
'Water Temple Boss Key Chest': ("deep under #the lake#, the gilded chest contains", "the #Water Temple Gilded Chest# holds", 'location'),
'Water Temple MQ Boss Key Chest': ("deep under #the lake#, the gilded chest contains", "the #Water Temple Gilded Chest# holds", 'location'),
'Water Temple MQ Freestanding Key': ("deep under #the lake#, the apparent key is really", None, 'location'),
'GS Water Temple MQ North Basement': ("deep under #the lake#, the locked spider holds", None, 'location'),
'Gerudo Training Grounds Underwater Silver Rupee Chest': ("those who seek #sunken silver rupees# will find", None, 'location'),
'Gerudo Training Grounds MQ Underwater Silver Rupee Chest': ("those who seek #sunken silver rupees# will find", None, 'location'),
'Gerudo Training Grounds Maze Path Final Chest': ("the final prize of #the thieves\' training# is", None, 'location'),
'Gerudo Training Grounds MQ Ice Arrows Chest': ("the final prize of #the thieves\' training# is", None, 'location'),
'Bottom of the Well Defeat Boss': ("#Dead Hand# holds", "#Dead Hand# in the well holds", 'location'),
'Bottom of the Well MQ Compass Chest': ("#Dead Hand# holds", "#Dead Hand# in the well holds", 'location'),
'Silver Gauntlets Chest': ("upon the #Colossus's right hand# is", None, 'location'),
'Mirror Shield Chest': ("upon the #Colossus's left hand# is", None, 'location'),
'Spirit Temple MQ Child Center Chest': ("within #the Colossus# a temporal paradox yields", None, 'location'),
'Spirit Temple MQ Lower Adult Right Chest': ("within #the Colossus# a symphony yields", None, 'location'),
'GS Spirit Temple MQ Lower Adult Right': ("within #the Colossus# a spider's symphony yields", None, 'location'),
'Shadow Temple Hidden Floormaster Chest': (["shadows in an #invisible maze# guard", "after a free #boat ride# comes"], None, 'location'),
'Shadow Temple MQ Bomb Flower Chest': (["shadows in an #invisible maze# guard", "after a free #boat ride# comes"], None, 'location'),
'Haunted Wasteland Structure Chest': (["deep in the #Wasteland# is", "beneath #the sands#, flames reveal"], None, 'location'),
'Composer Grave Chest': (["in the #Composers' Grave#, darkness hides", "the #Composer Brothers# hid"], None, 'location'),
'Song from Composer Grave': (["in the #Composers' Grave#, ReDead guard", "the #Composer Brothers# wrote"], None, 'location'),
'Sheik Forest Song': ("deep in #the forest# Sheik teaches", None, 'location'),
'Sheik at Temple': ("Sheik waits at a #monument to time# to teach", None, 'location'),
'Sheik in Crater': ("the #crater's melody# is", None, 'location'),
'Sheik in Ice Cavern': ("the #frozen cavern# echoes with", None, 'location'),
'Sheik in Kakariko': ("a #ravaged village# mourns with", None, 'location'),
'Sheik at Colossus': ("a hero ventures beyond #the Wasteland# to learn", None, 'location'),
'Zoras Fountain Bottom Freestanding PoH': ("under the #icy waters# lies", None, 'location'),
'Colossus Freestanding PoH': ("riding a #beanstalk in the desert# leads to", None, 'location'),
'DM Crater Volcano Freestanding PoH': ("riding a #beanstalk in the crater# leads to", None, 'location'),
'Goron City Pot Freestanding PoH': ("spinning #Goron pottery# contains", None, 'location'),
'Deku Tree MQ After Spinning Log Chest': ("within #a tree#, a temporal stone contains", None, 'location'),
'GS Jabu Jabu MQ Invisible Enemies Room': ("in the #belly of a deity#, a spider surrounded by shadows holds", None, 'location'),
'1001': ("Ganondorf 2020!", None, 'junkHint'),
'1002': ("They say that monarchy is a terrible system of governance.", None, 'junkHint'),
'1003': ("They say that Zelda is a poor leader.", None, 'junkHint'),
'1004': ("These hints can be quite useful. This is an exception.", None, 'junkHint'),
'1006': ("They say that all the Zora drowned in Wind Waker.", None, 'junkHint'),
'1007': ("They say that PJ64 is a terrible emulator.", None, 'junkHint'),
'1008': ("'Member when Ganon was a blue pig?^I 'member.", None, 'junkHint'),
'1009': ("One who does not have Triforce can't go in.", None, 'junkHint'),
'1010': ("Save your future, end the Happy Mask Salesman.", None, 'junkHint'),
'1012': ("I'm stoned. Get it?", None, 'junkHint'),
'1013': ("Hoot! Hoot! Would you like me to repeat that?", None, 'junkHint'),
'1014': ("Gorons are stupid. They eat rocks.", None, 'junkHint'),
'1015': ("They say that Lon Lon Ranch prospered under Ingo.", None, 'junkHint'),
'1016': ("The single rupee is a unique item.", None, 'junkHint'),
'1017': ("Without the Lens of Truth, the Treasure Chest Mini-Game is a 1 out of 32 chance.^Good luck!", None, 'junkHint'),
'1018': ("Use bombs wisely.", None, 'junkHint'),
'1021': ("I found you, faker!", None, 'junkHint'),
'1022': ("You're comparing yourself to me?^Ha! You're not even good enough to be my fake.", None, 'junkHint'),
'1023': ("I'll make you eat those words.", None, 'junkHint'),
'1024': ("What happened to Sheik?", None, 'junkHint'),
'1025': ("L2P @.", None, 'junkHint'),
'1026': ("I heard @ isn't very good at Zelda.", None, 'junkHint'),
'1027': ("I'm Lonk from Pennsylvania.", None, 'junkHint'),
'1028': ("I bet you'd like to have more bombs.", None, 'junkHint'),
'1029': ("When all else fails, use Fire.", None, 'junkHint'),
'1030': ("Here's a hint, @. Don't be bad.", None, 'junkHint'),
'1031': ("Game Over. Return of Ganon.", None, 'junkHint'),
'1032': ("May the way of the Hero lead to the Triforce.", None, 'junkHint'),
'1033': ("Can't find an item? Scan an Amiibo.", None, 'junkHint'),
'1034': ("They say this game has just a few glitches.", None, 'junkHint'),
'1035': ("BRRING BRRING This is Ulrira. Wrong number?", None, 'junkHint'),
'1036': ("Tingle Tingle Kooloo Limpah", None, 'junkHint'),
'1037': ("L is real 2041", None, 'junkHint'),
'1038': ("They say that Ganondorf will appear in the next Mario Tennis.", None, 'junkHint'),
'1039': ("Medigoron sells the earliest Breath of the Wild demo.", None, 'junkHint'),
'1040': ("There's a reason why I am special inquisitor!", None, 'junkHint'),
'1041': ("You were almost a @ sandwich.", None, 'junkHint'),
'1042': ("I'm a helpful hint Gossip Stone!^See, I'm helping.", None, 'junkHint'),
'1043': ("Dear @, please come to the castle. I've baked a cake for you.&&Yours truly, princess Zelda.", None, 'junkHint'),
'1044': ("They say all toasters toast toast.", None, 'junkHint'),
'1045': ("They say that Okami is the best Zelda game.", None, 'junkHint'),
'1046': ("They say that quest guidance can be found at a talking rock.", None, 'junkHint'),
'1047': ("They say that the final item you're looking for can be found somewhere in Hyrule.", None, 'junkHint'),
'1048': ("Mweep.^Mweep.^Mweep.^Mweep.^Mweep.^Mweep.^Mweep.^Mweep.^Mweep.^Mweep.^Mweep.^Mweep.", None, 'junkHint'),
'1049': ("They say that Barinade fears Deku Nuts.", None, 'junkHint'),
'1050': ("They say that Flare Dancers do not fear Goron-crafted blades.", None, 'junkHint'),
'1051': ("They say that Morpha is easily trapped in a corner.", None, 'junkHint'),
'1052': ("They say that Bongo Bongo really hates the cold.", None, 'junkHint'),
'1053': ("They say that crouch stabs mimic the effects of your last attack.", None, 'junkHint'),
'1054': ("They say that bombing the hole Volvagia last flew into can be rewarding.", None, 'junkHint'),
'1055': ("They say that invisible ghosts can be exposed with Deku Nuts.", None, 'junkHint'),
'1056': ("They say that the real Phantom Ganon is bright and loud.", None, 'junkHint'),
'1057': ("They say that walking backwards is very fast.", None, 'junkHint'),
'1058': ("They say that leaping above the Castle Town entrance enriches most children.", None, 'junkHint'),
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx handy marker for how long one line should be in a text box
'Deku Tree': ("an ancient tree", "Deku Tree", 'dungeon'),
'Dodongos Cavern': ("an immense cavern", "Dodongo's Cavern", 'dungeon'),
'Jabu Jabus Belly': ("the belly of a deity", "Jabu Jabu's Belly", 'dungeon'),
'Forest Temple': ("a deep forest", "Forest Temple", 'dungeon'),
'Fire Temple': ("a high mountain", "Fire Temple", 'dungeon'),
'Water Temple': ("a vast lake", "Water Temple", 'dungeon'),
'Shadow Temple': ("the house of the dead", "Shadow Temple", 'dungeon'),
'Spirit Temple': ("the goddess of the sand", "Spirit Temple", 'dungeon'),
'Ice Cavern': ("a frozen maze", "Ice Cavern", 'dungeon'),
'Bottom of the Well': ("a shadow\'s prison", "Bottom of the Well", 'dungeon'),
'Gerudo Training Grounds': ("the test of thieves", "Gerudo Training Grounds", 'dungeon'),
'Ganons Castle': ("a conquered citadel", "Ganon's Castle", 'dungeon'),
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx handy marker for how long one line should be in a text box
'Queen Gohma': ("One inside an #ancient tree#...^", "One in the #Deku Tree#...^", 'boss'),
'King Dodongo': ("One within an #immense cavern#...^", "One in #Dodongo's Cavern#...^", 'boss'),
'Barinade': ("One in the #belly of a deity#...^", "One in #Jabu Jabu's Belly#...^", 'boss'),
'Phantom Ganon': ("One in a #deep forest#...^", "One in the #Forest Temple#...^", 'boss'),
'Volvagia': ("One on a #high mountain#...^", "One in the #Fire Temple#...^", 'boss'),
'Morpha': ("One under a #vast lake#...^", "One in the #Water Temple#...^", 'boss'),
'Bongo Bongo': ("One within the #house of the dead#...^", "One in the #Shadow Temple#...^", 'boss'),
'Twinrova': ("One inside a #goddess of the sand#...^", "One in the #Spirit Temple#...^", 'boss'),
'Links Pocket': ("One in #@'s pocket#...^", "One #@ already has#...^", 'boss'),
'Spiritual Stone Text Start': ("Ye who owns 3 Spiritual Stones...^", None, 'boss'),
'Spiritual Stone Text End': ("\x13\x08Stand with the Ocarina of Time&and play the Song of Time.", None, 'boss'),
'Medallion Text Start': ("When evil rules all, an awakening&voice from the Sacred Realm will&call those destined to be Sages,&who dwell in the \x05\x41five temples\x05\x40.^", None, 'boss'),
'Medallion Text End': ("\x13\x12Together with the Hero of Time,&the awakened ones will bind&the evil and return the light&of peace to the world.", None, 'boss'),
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx handy marker for how long one line should be in a text box
'Validation Line': ("Hmph... Since you made it this far,&I'll let you know what glorious&prize of Ganon's you likely&missed out on in my tower.^Behold...^", None, 'validation line'),
'Light Arrow Location': ("Ha ha ha... You'll never beat me by&reflecting my lightning bolts&and unleashing the arrows from&", None, 'Light Arrow Location'),
'2001': ("Oh! It's @.&I was expecting someone called&Sheik. Do you know what&happened to them?", None, 'ganonLine'),
'2002': ("I knew I shouldn't have put the key&on the other side of my door.", None, 'ganonLine'),
'2003': ("Looks like it's time for a&round of tennis.", None, 'ganonLine'),
'2004': ("You'll never deflect my bolts of&energy with your sword,&then shoot me with those Light&Arrows you happen to have.", None, 'ganonLine'),
'2005': ("Why did I leave my trident&back in the desert?", None, 'ganonLine'),
'2006': ("Zelda is probably going to do&something stupid, like send you&back to your own timeline.^So this is quite meaningless.&Do you really want&to save this moron?", None, 'ganonLine'),
'2007': ("What about Zelda makes you think&she'd be a better ruler than I?^I saved Lon Lon Ranch,&fed the hungry,&and my castle floats.", None, 'ganonLine'),
'2008': ("I've learned this spell,&it's really neat,&I'll keep it later&for your treat!", None, 'ganonLine'),
'2009': ("Many tricks are up my sleeve,&to save yourself&you'd better leave!", None, 'ganonLine'),
'2010': ("After what you did to&Koholint Island, how can&you call me the bad guy?", None, 'ganonLine'),
'2011': ("Today, let's begin down&'The Hero is Defeated' timeline.", None, 'ganonLine'),
}
# This specifies which hints will never appear due to either having known or known useless contents or due to the locations not existing.
def hintExclusions(world, clear_cache=False):
if not clear_cache and hintExclusions.exclusions is not None:
return hintExclusions.exclusions
hintExclusions.exclusions = []
hintExclusions.exclusions.extend(world.disabled_locations)
for location in world.get_locations():
if location.locked:
hintExclusions.exclusions.append(location.name)
world_location_names = [location.name for location in world.get_locations()]
location_hints = []
for name in hintTable:
hint = getHint(name, world.clearer_hints)
if hint.type in ['location', 'alwaysLocation']:
location_hints.append(hint)
for hint in location_hints:
if hint.name not in world_location_names:
hintExclusions.exclusions.append(hint.name)
return hintExclusions.exclusions
hintExclusions.exclusions = None | z5-tracker | /z5_tracker-1.2.1-py3-none-any.whl/z5tracker/rulesets/rando_aa_v4/HintList.py | HintList.py |
import argparse
import re
import math
from .Cosmetics import get_tunic_color_options, get_navi_color_options, get_sword_color_options
from .LocationList import location_table
from . import Sounds as sfx
# holds the info for a single setting
class Setting_Info():
def __init__(self, name, type, bitwidth=0, shared=False, args_params={}, gui_params=None):
self.name = name # name of the setting, used as a key to retrieve the setting's value everywhere
self.type = type # type of the setting's value, used to properly convert types in GUI code
self.bitwidth = bitwidth # number of bits needed to store the setting, used in converting settings to a string
self.shared = shared # whether or not the setting is one that should be shared, used in converting settings to a string
self.args_params = args_params # parameters that should be pased to the command line argument parser's add_argument() function
self.gui_params = gui_params # parameters that the gui uses to build the widget components
# create the choices parameters from the gui options if applicable
if gui_params and 'options' in gui_params and 'choices' not in args_params \
and not ('type' in args_params and callable(args_params['type'])):
if isinstance(gui_params['options'], list):
self.args_params['choices'] = list(gui_params['options'])
elif isinstance(gui_params['options'], dict):
self.args_params['choices'] = list(gui_params['options'].values())
class Setting_Widget(Setting_Info):
def __init__(self, name, type, choices, default, args_params={},
gui_params=None, shared=False):
assert 'default' not in args_params and 'default' not in gui_params, \
'Setting {}: default shouldn\'t be defined in '\
'args_params or in gui_params'.format(name)
assert 'choices' not in args_params, \
'Setting {}: choices shouldn\'t be defined in '\
'args_params'.format(name)
assert 'options' not in gui_params, \
'Setting {}: options shouldn\'t be defined in '\
'gui_params'.format(name)
if 'type' not in args_params: args_params['type'] = type
if 'type' not in gui_params: gui_params['type'] = type
self.choices = choices
self.default = default
args_params['choices'] = list(choices.keys())
args_params['default'] = default
gui_params['options'] = {v: k for k, v in choices.items()}
gui_params['default'] = choices[default]
super().__init__(name, type, self.calc_bitwidth(choices), shared, args_params, gui_params)
def calc_bitwidth(self, choices):
count = len(choices)
if count > 0:
return math.ceil(math.log(count, 2))
return 0
class Checkbutton(Setting_Widget):
def __init__(self, name, args_help, gui_text, gui_group=None,
gui_tooltip=None, gui_dependency=None, default=False,
shared=False):
choices = {
True: 'checked',
False: 'unchecked',
}
gui_params = {
'text': gui_text,
'widget': 'Checkbutton',
}
if gui_group is not None: gui_params['group'] = gui_group
if gui_tooltip is not None: gui_params['tooltip'] = gui_tooltip
if gui_dependency is not None: gui_params['dependency'] = gui_dependency
args_params = {
'help': args_help,
}
super().__init__(name, bool, choices, default, args_params, gui_params,
shared)
self.args_params['type'] = Checkbutton.parse_bool
def parse_bool(s):
if s.lower() in ['yes', 'true', 't', 'y', '1']:
return True
elif s.lower() in ['no', 'false', 'f', 'n', '0']:
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
class Combobox(Setting_Widget):
def __init__(self, name, choices, default, args_help, gui_text=None,
gui_group=None, gui_tooltip=None, gui_dependency=None,
shared=False):
type = str
gui_params = {
'widget': 'Combobox',
}
if gui_text is not None: gui_params['text'] = gui_text
if gui_group is not None: gui_params['group'] = gui_group
if gui_tooltip is not None: gui_params['tooltip'] = gui_tooltip
if gui_dependency is not None: gui_params['dependency'] = gui_dependency
args_params = {
'help': args_help,
}
super().__init__(name, type, choices, default, args_params, gui_params,
shared)
class Scale(Setting_Widget):
def __init__(self, name, min, max, default, args_help, step=1,
gui_text=None, gui_group=None, gui_tooltip=None,
gui_dependency=None, shared=False):
type = int
choices = {}
for i in range(min, max+1, step):
choices = {**choices, i: str(i)}
gui_params = {
'min': min,
'max': max,
'step': step,
'widget': 'Scale',
}
if gui_text is not None: gui_params['text'] = gui_text
if gui_group is not None: gui_params['group'] = gui_group
if gui_tooltip is not None: gui_params['tooltip'] = gui_tooltip
if gui_dependency is not None: gui_params['dependency'] = gui_dependency
args_params = {
'help': args_help,
}
super().__init__(name, type, choices, default, args_params, gui_params,
shared)
def parse_custom_tunic_color(s):
return parse_color(s, get_tunic_color_options())
def parse_custom_navi_color(s):
return parse_color(s, get_navi_color_options())
def parse_custom_sword_color(s):
return parse_color(s, get_sword_color_options())
def parse_color(s, color_choices):
if s == 'Custom Color':
raise argparse.ArgumentTypeError('Specify custom color by using \'Custom (#xxxxxx)\'')
elif re.match(r'^Custom \(#[A-Fa-f0-9]{6}\)$', s):
return re.findall(r'[A-Fa-f0-9]{6}', s)[0]
elif s in color_choices:
return s
else:
raise argparse.ArgumentTypeError('Invalid color specified')
def logic_tricks_entry_tooltip(widget, pos):
val = widget.get()
if val in logic_tricks:
text = val + '\n\n' + logic_tricks[val]['tooltip']
text = '\n'.join([line.strip() for line in text.splitlines()]).strip()
return text
else:
return None
def logic_tricks_list_tooltip(widget, pos):
index = widget.index("@%s,%s" % (pos))
val = widget.get(index)
if val in logic_tricks:
text = val + '\n\n' + logic_tricks[val]['tooltip']
text = '\n'.join([line.strip() for line in text.splitlines()]).strip()
return text
else:
return None
logic_tricks = {
'Morpha with Gold Scale': {
'name' : 'logic_morpha_with_scale',
'tooltip' : '''\
Allows entering Water Temple and beating
Morpha with Gold Scale instead of Iron Boots.
Only applicable for keysanity and keysy due
to the logic always seeing every chest in
Water Temple that could contain the Boss Key
as requiring Iron Boots.
'''},
'Fewer Tunic Requirements': {
'name' : 'logic_fewer_tunic_requirements',
'tooltip' : '''\
Allows the following possible without Tunics:
- Enter Water Temple. The key below the center
pillar still requires Zora Tunic.
- Enter Fire Temple. Only the first floor is
accessible, and not Volvagia.
- Zora's Fountain Bottom Freestanding PoH.
Might not have enough health to resurface.
- Gerudo Training Grounds Underwater
Silver Rupee Chest. May need to make multiple
trips.
'''},
'Child Deadhand without Kokiri Sword': {
'name' : 'logic_child_deadhand',
'tooltip' : '''\
Requires 9 sticks or 5 jump slashes.
'''},
'Man on Roof without Hookshot': {
'name' : 'logic_man_on_roof',
'tooltip' : '''\
Can be reached by side-hopping off
the watchtower.
'''},
'Dodongo\'s Cavern Staircase with Bow': {
'name' : 'logic_dc_staircase',
'tooltip' : '''\
The Bow can be used to knock down the stairs
with two well-timed shots.
'''},
'Dodongo\'s Cavern Spike Trap Room Jump without Hover Boots': {
'name' : 'logic_dc_jump',
'tooltip' : '''\
Jump is adult only.
'''},
'Gerudo Fortress "Kitchen" with No Additional Items': {
'name' : 'logic_gerudo_kitchen',
'tooltip' : '''\
The logic normally guarantees one of Bow, Hookshot,
or Hover Boots.
'''},
'Deku Tree Basement Vines GS with Jump Slash': {
'name' : 'logic_deku_basement_gs',
'tooltip' : '''\
Can be defeated by doing a precise jump slash.
'''},
'Hammer Rusted Switches Through Walls': {
'name' : 'logic_rusted_switches',
'tooltip' : '''\
Applies to:
- Fire Temple Highest Goron Chest.
- MQ Fire Temple Lizalfos Maze.
- MQ Spirit Trial.
'''},
'Bottom of the Well Basement Chest with Strength & Sticks': {
'name' : 'logic_botw_basement',
'tooltip' : '''\
The chest in the basement can be reached with
strength by doing a jump slash with a lit
stick to access the bomb flowers.
'''},
'Skip Forest Temple MQ Block Puzzle with Bombchu': {
'name' : 'logic_forest_mq_block_puzzle',
'tooltip' : '''\
Send the Bombchu straight up the center of the
wall directly to the left upon entering the room.
'''},
'Spirit Temple Child Side Bridge with Bombchu': {
'name' : 'logic_spirit_child_bombchu',
'tooltip' : '''\
A carefully-timed Bombchu can hit the switch.
'''},
'Windmill PoH as Adult with Nothing': {
'name' : 'logic_windmill_poh',
'tooltip' : '''\
Can jump up to the spinning platform from
below as adult.
'''},
'Crater\'s Bean PoH with Hover Boots': {
'name' : 'logic_crater_bean_poh_with_hovers',
'tooltip' : '''\
Hover from the base of the bridge
near Goron City and walk up the
very steep slope.
'''},
'Gerudo Training Grounds MQ Left Side Silver Rupees with Hookshot': {
'name' : 'logic_gtg_mq_with_hookshot',
'tooltip' : '''\
The highest silver rupee can be obtained by
hookshotting the target and then immediately jump
slashing toward the rupee.
'''},
'Forest Temple East Courtyard Vines with Hookshot': {
'name' : 'logic_forest_vines',
'tooltip' : '''\
The vines in Forest Temple leading to where the well
drain switch is in the standard form can be barely
reached with just the Hookshot.
'''},
'Swim Through Forest Temple MQ Well with Hookshot': {
'name' : 'logic_forest_well_swim',
'tooltip' : '''\
Shoot the vines in the well as low and as far to
the right as possible, and then immediately swim
under the ceiling to the right. This can only be
required if Forest Temple is in its Master Quest
form.
'''},
'Death Mountain Trail Bombable Chest with Strength': {
'name' : 'logic_dmt_bombable',
'tooltip' : '''\
Child Link can blow up the wall using a nearby bomb
flower. You must backwalk with the flower and then
quickly throw it toward the wall.
'''},
'Water Temple Boss Key Chest with No Additional Items': {
'name' : 'logic_water_bk_chest',
'tooltip' : '''\
After reaching the Boss Key chest's area with Iron Boots
and Longshot, the chest can be reached with no additional
items aside from Small Keys. Stand on the blue switch
with the Iron Boots, wait for the water to rise all the
way up, and then swim straight to the exit. You should
grab the ledge as you surface. It works best if you don't
mash B.
'''},
'Adult Kokiri Forest GS with Hover Boots': {
'name' : 'logic_adult_kokiri_gs',
'tooltip' : '''\
Can be obtained without Hookshot by using the Hover
Boots off of one of the roots.
'''},
'Spirit Temple MQ Frozen Eye Switch without Fire': {
'name' : 'logic_spirit_mq_frozen_eye',
'tooltip' : '''\
You can melt the ice by shooting an arrow through a
torch. The only way to find a line of sight for this
shot is to first spawn a Song of Time block, and then
stand on the very edge of it.
'''},
'Fire Temple MQ Boss Key Chest without Bow': {
'name' : 'logic_fire_mq_bk_chest',
'tooltip' : '''\
Din\'s alone can be used to unbar the door to
the boss key chest's room thanks to an
oversight in the way the game counts how many
torches have been lit.
'''},
'Zora\'s Domain Entry with Cucco': {
'name' : 'logic_zora_with_cucco',
'tooltip' : '''\
Can fly behind the waterfall with
a cucco as child.
'''},
'Zora\'s Domain Entry with Hover Boots': {
'name' : 'logic_zora_with_hovers',
'tooltip' : '''\
Can hover behind the waterfall as adult.
This is very difficult.
'''},
}
# a list of the possible settings
setting_infos = [
Setting_Info('check_version', bool, 0, False,
{
'help': '''\
Checks if you are on the latest version
''',
'action': 'store_true'
}),
Setting_Info('checked_version', str, 0, False, {
'default': '',
'help': 'Supress version warnings if checked_version is less than __version__.'}),
Setting_Info('rom', str, 0, False, {
'default': '',
'help': 'Path to an OoT 1.0 rom to use as a base.'}),
Setting_Info('output_dir', str, 0, False, {
'default': '',
'help': 'Path to output directory for rom generation.'}),
Setting_Info('output_file', str, 0, False, {
'default': '',
'help': 'File name base to use for all generated files.'}),
Setting_Info('seed', str, 0, False, {
'help': 'Define seed number to generate.'}),
Setting_Info('patch_file', str, 0, False, {
'default': '',
'help': 'Path to a patch file.'}),
Setting_Info('cosmetics_only', bool, 0, False,
{
'help': 'Patched file will only have cosmetics applied.',
'action': 'store_true',
}),
Setting_Info('count', int, 0, False, {
'help': '''\
Use to batch generate multiple seeds with same settings.
If --seed is provided, it will be used for the first seed, then
used to derive the next seed (i.e. generating 10 seeds with
--seed given will produce the same 10 (different) roms each
time).
''',
'type': int}),
Setting_Info('world_count', int, 5, True, {
'default': 1,
'help': '''\
Use to create a multi-world generation for co-op seeds.
World count is the number of players. Warning: Increasing
the world count will drastically increase generation time.
''',
'type': int}, {}),
Setting_Info('player_num', int, 0, False,
{
'default': 1,
'help': '''\
Use to select world to generate when there are multiple worlds.
''',
'type': int
},
{
'dependency': lambda settings: 1 if settings.compress_rom in ['None', 'Patch'] else None,
}),
Checkbutton(
name = 'create_spoiler',
args_help = '''\
Output a Spoiler File
''',
gui_text = 'Create Spoiler Log',
gui_group = 'rom_tab',
gui_tooltip = '''\
Enabling this will change the seed.
''',
default = True,
shared = True,
),
Checkbutton(
name='create_cosmetics_log',
args_help='''\
Output a Cosmetics Log
''',
gui_text='Create Cosmetics Log',
gui_group='rom_tab',
gui_dependency=lambda settings: False if settings.compress_rom in ['None', 'Patch'] else None,
default=True,
shared=False,
),
Setting_Widget(
name='compress_rom',
type=str,
default='True',
choices={
'True': 'Compressed [Stable]',
'False': 'Uncompressed [Crashes]',
'Patch': 'Patch File',
'None': 'No Output',
},
args_params={
'help': '''\
Create a compressed version of the output ROM file.
True: Compresses. Improves stability. Will take longer to generate
False: Uncompressed. Unstable. Faster generation
Patch: Patch file. No ROM, but used to send the patch data
None: No ROM Output. Creates spoiler log only
''',
},
gui_params={
'text': 'Output Type',
'group': 'rom_tab',
'widget': 'Radiobutton',
'horizontal': True,
'tooltip':'''\
The first time compressed generation will take a while,
but subsequent generations will be quick. It is highly
recommended to compress or the game will crash
frequently except on real N64 hardware.
Patch files are used to send the patched data to other
people without sending the ROM file.
'''
},
shared=False,
),
Checkbutton(
name = 'open_forest',
args_help = '''\
Mido no longer blocks the path to the Deku Tree, and
the Kokiri boy no longer blocks the path out of the forest.
''',
gui_text = 'Open Forest',
gui_group = 'open',
gui_tooltip = '''\
Mido no longer blocks the path to the Deku Tree,
and the Kokiri boy no longer blocks the path out
of the forest.
When this option is off, the Kokiri Sword and
Slingshot are always available somewhere
in the forest.
''',
default = True,
shared = True,
),
Checkbutton(
name = 'open_kakariko',
args_help = '''\
The gate in Kakariko Village to Death Mountain Trail
is always open instead of needing Zelda's Letter.
''',
gui_text = 'Open Kakariko Gate',
gui_group = 'open',
gui_tooltip = '''\
The gate in Kakariko Village to Death Mountain Trail
is always open instead of needing Zelda's Letter.
Either way, the gate is always open as an adult.
''',
shared = True,
),
Checkbutton(
name = 'open_door_of_time',
args_help = '''\
The Door of Time is open from the beginning of the game.
''',
gui_text = 'Open Door of Time',
gui_group = 'open',
gui_tooltip = '''\
The Door of Time starts opened instead of needing to
play the Song of Time. If this is not set, only
an Ocarina and Song of Time must be found to open
the Door of Time.
''',
shared = True,
),
Checkbutton(
name = 'open_fountain',
args_help = '''\
King Zora is moved from the beginning of the game.
''',
gui_text = 'Open Zora\'s Fountain',
gui_group = 'open',
gui_tooltip = '''\
King Zora starts out as moved. This also removes
Ruto's Letter from the item pool.
''',
shared = True,
),
Combobox(
name = 'gerudo_fortress',
default = 'normal',
choices = {
'normal': 'Default Behavior',
'fast': 'Rescue One Carpenter',
'open': 'Start with Gerudo Card',
},
args_help = '''\
Select how much of Gerudo Fortress is required. (default: %(default)s)
Normal: Free all four carpenters to get the Gerudo Card.
Fast: Free only the carpenter closest to Link's prison to get the Gerudo Card.
Open: Start with the Gerudo Card and all its benefits.
''',
gui_text = 'Gerudo Fortress',
gui_group = 'open',
gui_tooltip = '''\
'Rescue One Carpenter': Only the bottom left
carpenter must be rescued.
'Start with Gerudo Card': The carpenters are rescued from
the start of the game, and the player starts with the Gerudo
Card in the inventory allowing access to Gerudo Training Grounds.
''',
shared = True,
),
Combobox(
name = 'bridge',
default = 'medallions',
choices = {
'open': 'Always Open',
'vanilla': 'Vanilla Requirements',
'stones': 'All Spiritual Stones',
'medallions': 'All Medallions',
'dungeons': 'All Dungeons',
'tokens': '100 Gold Skulltula Tokens'
},
args_help = '''\
Select requirement to spawn the Rainbow Bridge to reach Ganon's Castle. (default: %(default)s)
open: The bridge will spawn without an item requirement.
vanilla: Collect only the Shadow and Spirit Medallions and possess the Light Arrows.
stones: Collect all three Spiritual Stones to create the bridge.
medallions: Collect all six Medallions to create the bridge.
dungeons: Collect all Spiritual Stones and all Medallions to create the bridge.
tokens: Collect all 100 Gold Skulltula tokens.
''',
gui_text = 'Rainbow Bridge Requirement',
gui_group = 'open',
gui_tooltip = '''\
'Always Open': Rainbow Bridge is always present.
'Vanilla Requirements': Spirit/Shadow Medallions and Light Arrows.
'All Spiritual Stones': All 3 Spiritual Stones.
'All Medallions': All 6 Medallions.
'All Dungeons': All Medallions and Spiritual Stones.
'100 Gold Skulltula Tokens': All 100 Gold Skulltula Tokens.
''',
shared = True,
),
Combobox(
name = 'logic_rules',
default = 'glitchless',
choices = {
'glitchless': 'Glitchless',
'none': 'No Logic',
},
args_help = '''\
Sets the rules the logic uses to determine accessibility:
glitchless: No glitches are required, but may require some minor tricks
none: All locations are considered available. May not be beatable.
''',
gui_text = 'Logic Rules',
gui_group = 'world',
gui_tooltip = '''\
Sets the rules the logic uses
to determine accessibility.
'Glitchless': No glitches are
required, but may require some
minor tricks
'No Logic': All locations are
considered available. May not
be beatable.
''',
shared = True,
),
Checkbutton(
name = 'all_reachable',
args_help = '''\
When disabled, only check if the game is beatable with
placement. Do not ensure all locations are reachable.
''',
gui_text = 'All Locations Reachable',
gui_group = 'world',
gui_tooltip = '''\
When this option is enabled, the randomizer will
guarantee that every item is obtainable and every
location is reachable.
When disabled, only required items and locations
to beat the game will be guaranteed reachable.
Even when enabled, some locations may still be able
to hold the keys needed to reach them.
''',
default = True,
shared = True,
),
Checkbutton(
name = 'bombchus_in_logic',
args_help = '''\
Bombchus will be considered in logic. This has a few effects:
-Back Alley shop will open once you've found Bombchus.
-It will sell an affordable pack (5 for 60) and never sell out.
-Bombchus refills cannot be bought until Bombchus have been obtained.
''',
gui_text = 'Bombchus Are Considered in Logic',
gui_group = 'world',
gui_tooltip = '''\
Bombchus are properly considered in logic.
The first Bombchu pack will always be 20.
Subsequent packs will be 5 or 10 based on
how many you have.
Bombchus can be purchased for 60/99/180
rupees once they have been found.
Bombchu Bowling opens with Bombchus.
Bombchus are available at Kokiri Shop
and the Bazaar. Bombchu refills cannot
be bought until Bombchus have been
obtained.
''',
default = True,
shared = True,
),
Checkbutton(
name = 'one_item_per_dungeon',
args_help = '''\
Each dungeon will have exactly one major item.
Does not include dungeon items or GS Tokens.
''',
gui_text = 'Dungeons Have One Major Item',
gui_group = 'world',
gui_tooltip = '''\
Dungeons have exactly one major
item. This naturally makes each
dungeon similar in value instead
of valued based on chest count.
Spirit Temple Colossus hands count
as part of the dungeon. Spirit
Temple has TWO items to match
vanilla distribution.
Dungeon items and GS Tokens do
not count as major items.
''',
shared = True,
),
Checkbutton(
name = 'trials_random',
args_help = '''\
Sets the number of trials must be cleared to enter
Ganon's Tower to a random value.
''',
gui_text = 'Random Number of Ganon\'s Trials',
gui_group = 'open',
gui_tooltip = '''\
Sets a random number of trials to
enter Ganon's Tower.
''',
shared = True,
),
Scale(
name = 'trials',
default = 6,
min = 0,
max = 6,
args_help = '''\
Select how many trials must be cleared to enter Ganon's Tower.
The trials you must complete will be selected randomly.
''',
gui_group = 'open',
gui_tooltip = '''\
Trials are randomly selected. If hints are
enabled, then there will be hints for which
trials need to be completed.
''',
gui_dependency = lambda settings: 0 if settings.trials_random else None,
shared = True,
),
Checkbutton(
name = 'no_escape_sequence',
args_help = '''\
The tower escape sequence between Ganondorf and Ganon will be skipped.
''',
gui_text = 'Skip Tower Escape Sequence',
gui_group = 'convenience',
gui_tooltip = '''\
The tower escape sequence between
Ganondorf and Ganon will be skipped.
''',
shared = True,
),
Checkbutton(
name = 'no_guard_stealth',
args_help = '''\
The crawlspace into Hyrule Castle will take you straight to Zelda.
''',
gui_text = 'Skip Child Stealth',
gui_group = 'convenience',
gui_tooltip = '''\
The crawlspace into Hyrule Castle goes
straight to Zelda, skipping the guards.
''',
shared = True,
),
Checkbutton(
name = 'no_epona_race',
args_help = '''\
Having Epona's Song will allow you to summon Epona without racing Ingo.
''',
gui_text = 'Skip Epona Race',
gui_group = 'convenience',
gui_tooltip = '''\
Epona can be summoned with Epona's Song
without needing to race Ingo.
''',
shared = True,
),
Checkbutton(
name = 'fast_chests',
args_help = '''\
Makes all chests open without the large chest opening cutscene.
''',
gui_text = 'Fast Chest Cutscenes',
gui_group = 'convenience',
gui_tooltip = '''\
All chest animations are fast. If disabled,
the animation time is slow for major items.
''',
default = True,
shared = True,
),
Checkbutton(
name = 'logic_no_night_tokens_without_suns_song',
args_help = '''\
You will not be expected to collect nighttime-only skulltulas
unless you have Sun's Song
''',
gui_text = 'Nighttime Skulltulas Expect Sun\'s Song',
gui_group = 'convenience',
gui_tooltip = '''\
GS Tokens that can only be obtained
during the night expect you to have Sun's
Song to collect them. This prevents needing
to wait until night for some locations.
''',
shared = True,
),
Checkbutton(
name = 'free_scarecrow',
args_help = '''\
Scarecrow's Song is no longer needed to summon Pierre.
''',
gui_text = 'Free Scarecrow\'s Song',
gui_group = 'convenience',
gui_tooltip = '''\
Pulling out the Ocarina near a
spot at which Pierre can spawn will
do so, without needing the song.
''',
shared = True,
),
Checkbutton(
name = 'start_with_fast_travel',
args_help = '''\
Start with two warp songs and Farore's Wind.
''',
gui_text = 'Start with Fast Travel',
gui_group = 'convenience',
gui_tooltip = '''\
Start the game with Prelude of Light,
Serenade of Water, and Farore's Wind.
Two song locations will give items,
instead of Prelude and Serenade.
''',
shared = True,
),
Checkbutton(
name = 'start_with_rupees',
args_help = '''\
Start with 99 rupees.
''',
gui_text = 'Start with Max Rupees',
gui_group = 'convenience',
gui_tooltip = '''\
Start the game with 99 rupees.
''',
shared = True,
),
Checkbutton(
name = 'start_with_wallet',
args_help = '''\
Start with Tycoon's Wallet.
''',
gui_text = 'Start with Tycoon\'s Wallet',
gui_group = 'convenience',
gui_tooltip = '''\
Start the game with the largest wallet (999 max).
''',
shared = True,
),
Checkbutton(
name = 'start_with_deku_equipment',
args_help = '''\
Start with full Deku sticks, nuts, and a shield.
''',
gui_text = 'Start with Deku Equipment',
gui_group = 'convenience',
gui_tooltip = '''\
Start the game with 10 Deku sticks and 20 Deku nuts.
Additionally, start the game with a Deku shield equipped,
unless playing with the Shopsanity setting.
''',
shared = True,
),
Checkbutton(
name = 'big_poe_count_random',
args_help = '''\
Sets a random number of Big Poes to receive an item from the buyer.
''',
gui_text = 'Random Big Poe Target Count',
gui_group = 'convenience',
gui_tooltip = '''\
The Poe buyer will give a reward for turning
in a random number of Big Poes.
''',
shared = True,
),
Scale(
name = 'big_poe_count',
default = 10,
min = 1,
max = 10,
args_help = '''\
Select the number of Big Poes to receive an item from the buyer.
''',
gui_group = 'convenience',
gui_tooltip = '''\
The Poe buyer will give a reward for turning
in the chosen number of Big Poes.
''',
gui_dependency = lambda settings: 1 if settings.big_poe_count_random else None,
shared = True,
),
Checkbutton(
name = 'shuffle_kokiri_sword',
args_help = '''\
Shuffles the Kokiri Sword into the pool.
''',
gui_text = 'Shuffle Kokiri Sword',
gui_group = 'shuffle',
gui_tooltip = '''\
Enabling this shuffles the Kokiri Sword into the pool.
This will require extensive use of sticks until the
sword is found.
''',
default = True,
shared = True,
),
Checkbutton(
name = 'shuffle_ocarinas',
args_help = '''\
Shuffles the Fairy Ocarina and the Ocarina of Time into the pool.
''',
gui_text = 'Shuffle Ocarinas',
gui_group = 'shuffle',
gui_tooltip = '''\
Enabling this shuffles the Fairy Ocarina and the Ocarina
of Time into the pool.
This will require finding an Ocarina before being able
to play songs.
''',
default = True,
shared = True,
),
Checkbutton(
name = 'shuffle_weird_egg',
args_help = '''\
Shuffles the Weird Egg from Malon into the pool.
''',
gui_text = 'Shuffle Weird Egg',
gui_group = 'shuffle',
gui_tooltip = '''\
Enabling this shuffles the Weird Egg from Malon into the pool.
This will require finding the Weird Egg to talk to Zelda in
Hyrule Castle, which in turn locks rewards from Impa, Saria,
Malon, and Talon, as well as the Happy Mask sidequest.
If Open Kakariko Gate is disabled, the Weird Egg will also
be required for Zelda's Letter to open the gate as child.
''',
default = True,
shared = True,
),
Checkbutton(
name = 'shuffle_gerudo_card',
args_help = '''\
Shuffles the Gerudo Card into the pool.
''',
gui_text = 'Shuffle Gerudo Card',
gui_group = 'shuffle',
gui_tooltip = '''\
Enabling this shuffles the Gerudo Card into the item pool.
The Gerudo Card is required to enter the Gerudo Training Grounds,
however it does not prevent the guards throwing you in jail.
This has no effect if the option to Start with Gerudo Card is set.
''',
shared = True,
),
Checkbutton(
name = 'shuffle_song_items',
args_help = '''\
Shuffles the songs into the rest of the item pool so that
they can appear at other locations and items can appear at
the song locations.
''',
gui_text = 'Shuffle Songs with Items',
gui_group = 'shuffle',
gui_tooltip = '''\
Enabling this shuffles the songs into the rest of the
item pool.
This means that song locations can contain other items,
and any location can contain a song. Otherwise, songs
are only shuffled among themselves.
''',
default = True,
shared = True,
),
Combobox(
name = 'shuffle_scrubs',
default = 'off',
choices = {
'off': 'Off',
'low': 'On (Affordable)',
'regular': 'On (Expensive)',
'random': 'On (Random Prices)',
},
args_help = '''\
Deku Scrub Salesmen are randomized:
off: Only the 3 Scrubs that give one-time items
in the vanilla game will have random items.
low: All Scrubs will have random items and their
prices will be reduced to 10 rupees each.
regular: All Scrubs will have random items and each
of them will demand their vanilla prices.
random: All Scrubs will have random items and their
price will also be random between 10-99 rupees.
''',
gui_text = 'Scrub Shuffle',
gui_group = 'shuffle',
gui_tooltip = '''\
'Off': Only the 3 Scrubs that give one-time
items in the vanilla game (PoH, Deku Nut
capacity, and Deku Stick capacity) will
have random items.
'Affordable': All Scrub prices will be
reduced to 10 rupees each.
'Expensive': All Scrub prices will be
their vanilla prices. This will require
spending over 1000 rupees on Scrubs.
'Random Prices': All Scrub prices will be
between 0-99 rupees. This will on average
be very, very expensive overall.
''',
shared = True,
),
Combobox(
name = 'shopsanity',
default = 'off',
choices = {
'off': 'Off',
'0': 'Shuffled Shops (0 Items)',
'1': 'Shuffled Shops (1 Items)',
'2': 'Shuffled Shops (2 Items)',
'3': 'Shuffled Shops (3 Items)',
'4': 'Shuffled Shops (4 Items)',
'random': 'Shuffled Shops (Random)',
},
args_help = '''\
Shop contents are randomized. Non-shop items
are one time purchases. This setting also
changes the item pool to introduce a new Wallet
upgrade and more money.
off: Normal Shops*
0-4: Shop contents are shuffled and N non-shop
items are added to every shop. So more
possible item locations.
random: Shop contents are shuffles and each shop
will have a random number of non-shop items
''',
gui_text = 'Shopsanity',
gui_group = 'shuffle',
gui_tooltip = '''\
Shop contents are randomized.
(X Items): Shops have X random non-shop (Special
Deal!) items. They will always be on the left
side, and some of the lower value shop items
will be replaced to make room for these.
(Random): Each shop will have a random number
of non-shop items up to a maximum of 4.
The non-shop items have no requirements except
money, while the normal shop items (such as
200/300 rupee tunics) have normal vanilla
requirements. This means that, for example,
as a child you cannot buy 200/300 rupee
tunics, but you can buy non-shop tunics.
Non-shop Bombchus will unlock the chu slot
in your inventory, which, if Bombchus are in
logic, is needed to buy Bombchu refills.
Otherwise, the Bomb Bag is required.
''',
shared = True,
),
Combobox(
name = 'tokensanity',
default = 'off',
choices = {
'off': 'Off',
'dungeons': 'Dungeons Only',
'all': 'All Tokens',
},
args_help = '''\
Gold Skulltula Tokens will be shuffled into the pool,
and Gold Skulltula locations can have any item.
off: Don't use this feature
dungeons: Only dungeon Skulltulas will be shuffled
all: All Gold Skulltulas will be shuffled
''',
gui_text = 'Tokensanity',
gui_group = 'shuffle',
gui_tooltip = '''\
Token reward from Gold Skulltulas are
shuffled into the pool.
'Dungeons Only': This only shuffles
the GS locations that are within
dungeons, increasing the value of
most dungeons and making internal
dungeon exploration more diverse.
'All Tokens': Effectively adds 100
new locations for items to appear.
''',
shared = True,
),
Combobox(
name = 'shuffle_mapcompass',
default = 'dungeon',
choices = {
'remove': 'Maps/Compasses: Remove',
'startwith': 'Maps/Compasses: Start With',
'dungeon': 'Maps/Compasses: Dungeon Only',
'keysanity': 'Maps/Compasses: Anywhere'
},
args_help = '''\
Sets the Map and Compass placement rules
remove: Maps and Compasses are removed from the world.
startwith: Start with all Maps and Compasses.
dungeon: Maps and Compasses are put in their dungeon.
keysanity: Maps and Compasses can appear anywhere.
''',
gui_text = 'Shuffle Dungeon Items',
gui_group = 'shuffle',
gui_tooltip = '''\
'Remove': Maps and Compasses are removed.
This will add a small amount of money and
refill items to the pool.
'Start With': Maps and Compasses are given to
you from the start. This will add a small
amount of money and refill items to the pool.
'Dungeon': Maps and Compasses can only appear
in their respective dungeon.
'Anywhere': Maps and Compasses can appear
anywhere in the world.
Setting 'Remove', 'Start With, or 'Anywhere' will
add 2 more possible locations to each Dungeons.
This makes dungeons more profitable, especially
Ice Cavern, Water Temple, and Jabu Jabu's Belly.
''',
shared = True,
),
Combobox(
name = 'shuffle_smallkeys',
default = 'dungeon',
choices = {
'remove': 'Small Keys: Remove (Keysy)',
'dungeon': 'Small Keys: Dungeon Only',
'keysanity': 'Small Keys: Anywhere (Keysanity)'
},
args_help = '''\
Sets the Small Keys placement rules
remove: Small Keys are removed from the world.
dungeon: Small Keys are put in their dungeon.
keysanity: Small Keys can appear anywhere.
''',
gui_group = 'shuffle',
gui_tooltip = '''\
'Remove': Small Keys are removed. All locked
doors in dungeons will be unlocked. An easier
mode.
'Dungeon': Small Keys can only appear in their
respective dungeon. If Fire Temple is not a
Master Quest dungeon, the door to the Boss Key
chest will be unlocked
'Anywhere': Small Keys can appear
anywhere in the world. A difficult mode since
it is more likely to need to enter a dungeon
multiple times.
Try different combination out, such as:
'Small Keys: Dungeon' + 'Boss Keys: Anywhere'
for a milder Keysanity experience.
''',
shared=True,
),
Combobox(
name = 'shuffle_bosskeys',
default = 'dungeon',
choices = {
'remove': 'Boss Keys: Remove (Keysy)',
'dungeon': 'Boss Keys: Dungeon Only',
'keysanity': 'Boss Keys: Anywhere (Keysanity)',
},
args_help = '''\
Sets the Boss Keys placement rules
remove: Boss Keys are removed from the world.
dungeon: Boss Keys are put in their dungeon.
keysanity: Boss Keys can appear anywhere.
''',
gui_group = 'shuffle',
gui_tooltip = '''\
'Remove': Boss Keys are removed. All locked
doors in dungeons will be unlocked. An easier
mode.
'Dungeon': Boss Keys can only appear in their
respective dungeon.
'Anywhere': Boss Keys can appear
anywhere in the world. A difficult mode since
it is more likely to need to enter a dungeon
multiple times.
Try different combination out, such as:
'Small Keys: Dungeon' + 'Boss Keys: Anywhere'
for a milder Keysanity experience.
''',
shared = True,
),
Checkbutton(
name = 'enhance_map_compass',
args_help = '''\
Gives the Map and Compass extra functionality.
Map will tell if a dungeon is vanilla or Master Quest.
Compass will tell what medallion or stone is within.
The Temple of Time Altar will no longer provide any
information. If the maps and compasses are removed then
the information will be unavailable.
''',
gui_text = 'Maps and Compasses Give Information',
gui_group = 'shuffle',
gui_tooltip = '''\
Gives the Map and Compass extra functionality.
Map will tell if a dungeon is vanilla or Master Quest.
Compass will tell what medallion or stone is within.
The Temple of Time Altar will no longer provide any
information.
'Maps/Compasses: Remove': The dungeon information is
not available anywhere in the game.
'Maps/Compasses: Start With': The dungeon information
is available immediately from the dungeon menu.
''',
default = False,
shared = True,
),
Checkbutton(
name = 'unlocked_ganondorf',
args_help = '''\
The Boss Key door in Ganon's Tower will start unlocked.
''',
gui_text = 'Remove Ganon\'s Boss Door Lock',
gui_group = 'shuffle',
gui_tooltip = '''\
The Boss Key door in Ganon's Tower
will start unlocked. This is intended
to be used with reduced trial
requirements to make it more likely
that skipped trials can be avoided.
''',
shared = True,
),
Checkbutton(
name = 'mq_dungeons_random',
args_help = '''\
If set, a uniformly random number of dungeons will have Master Quest designs.
''',
gui_text = 'Random Number of MQ Dungeons',
gui_group = 'world',
gui_tooltip = '''\
If set, a random number of dungeons
will have Master Quest designs.
''',
shared = True,
),
Scale(
name = 'mq_dungeons',
default = 0,
min = 0,
max = 12,
args_help = '''\
Select a number (0-12) of Master Quest dungeons to appear in the game.
0: (default) All dungeon will have their original designs.
...
6: 50/50 split; Half of all dungeons will be from Master Quest.
...
12: All dungeons will have Master Quest redesigns.
''',
gui_group = 'world',
gui_tooltip = '''\
Select a number of Master Quest
dungeons to appear in the game.
0: All dungeon will have their
original designs. (default)
6: Half of all dungeons will
be from Master Quest.
12: All dungeons will have
Master Quest redesigns.
''',
gui_dependency = lambda settings: 0 if settings.mq_dungeons_random else None,
shared = True,
),
Setting_Info('disabled_locations', list, math.ceil(math.log(len(location_table) + 2, 2)), True,
{
'default': [],
'help': '''\
Choose a list of locations that will never be required to beat the game.
'''
},
{
'text': 'Exclude Locations',
'widget': 'SearchBox',
'group': 'logic_tab',
'options': list(location_table.keys()),
'tooltip':'''
Prevent locations from being required. Major
items can still appear there, however they
will never be required to beat the game.
Most dungeon locations have a MQ alternative.
If the location does not exist because of MQ
then it will be ignored. So make sure to
disable both versions if that is the intent.
'''
}),
Setting_Info('allowed_tricks', list, math.ceil(math.log(len(logic_tricks) + 2, 2)), True,
{
'default': [],
'help': '''\
Choose a list of allowed logic tricks logic may expect to beat the game.
'''
},
{
'text': 'Enable Tricks',
'widget': 'SearchBox',
'group': 'logic_tab',
'options': {gui_text: val['name'] for gui_text, val in logic_tricks.items()},
'entry_tooltip': logic_tricks_entry_tooltip,
'list_tooltip': logic_tricks_list_tooltip,
}),
Combobox(
name = 'logic_earliest_adult_trade',
default = 'pocket_egg',
choices = {
'pocket_egg': 'Earliest: Pocket Egg',
'pocket_cucco': 'Earliest: Pocket Cucco',
'cojiro': 'Earliest: Cojiro',
'odd_mushroom': 'Earliest: Odd Mushroom',
'poachers_saw': "Earliest: Poacher's Saw",
'broken_sword': 'Earliest: Broken Sword',
'prescription': 'Earliest: Prescription',
'eyeball_frog': 'Earliest: Eyeball Frog',
'eyedrops': 'Earliest: Eyedrops',
'claim_check': 'Earliest: Claim Check',
},
args_help = '''\
Select the earliest item that can appear in the adult trade sequence:
'pocket_egg'
'pocket_cucco'
'cojiro'
'odd_mushroom'
'poachers_saw'
'broken_sword'
'prescription'
'eyeball_frog'
'eyedrops'
'claim_check'
''',
gui_group = 'checks',
gui_tooltip = '''\
Select the earliest item that can appear in the adult trade sequence.
''',
shared = True,
),
Combobox(
name = 'logic_latest_adult_trade',
default = 'claim_check',
choices = {
'pocket_egg': 'Latest: Pocket Egg',
'pocket_cucco': 'Latest: Pocket Cucco',
'cojiro': 'Latest: Cojiro',
'odd_mushroom': 'Latest: Odd Mushroom',
'poachers_saw': "Latest: Poacher's Saw",
'broken_sword': 'Latest: Broken Sword',
'prescription': 'Latest: Prescription',
'eyeball_frog': 'Latest: Eyeball Frog',
'eyedrops': 'Latest: Eyedrops',
'claim_check': 'Latest: Claim Check',
},
args_help = '''\
Select the latest item that can appear in the adult trade sequence:
'pocket_egg'
'pocket_cucco'
'cojiro'
'odd_mushroom'
'poachers_saw'
'broken_sword'
'prescription'
'eyeball_frog'
'eyedrops'
'claim_check'
''',
gui_group = 'checks',
gui_tooltip = '''\
Select the latest item that can appear in the adult trade sequence.
''',
shared = True,
),
Combobox(
name = 'logic_lens',
default = 'all',
choices = {
'all': 'Required Everywhere',
'chest-wasteland': 'Wasteland and Chest Minigame',
'chest': 'Only Chest Minigame',
},
args_help = '''\
Choose what expects the Lens of Truth:
all: All lens spots expect the lens (except those that did not in the original game)
chest-wasteland: Only wasteland and chest minigame expect the lens
chest: Only the chest minigame expects the lens
''',
gui_group = 'tricks',
gui_tooltip = '''\
'Required everywhere': every invisible or
fake object will expect you to have the
Lens of Truth and Magic. The exception is
passing through the first wall in Bottom of
the Well, since that is required in vanilla.
'Wasteland': The lens is needed to follow
the ghost guide across the Haunted Wasteland.
''',
shared = True,
),
Checkbutton(
name = 'ocarina_songs',
args_help = '''\
Randomizes the notes needed to play each ocarina song.
''',
gui_text = 'Randomize Ocarina Song Notes',
gui_group = 'other',
gui_tooltip = '''\
Will need to memorize a new set of songs.
Can be silly, but difficult. Songs are
generally sensible, and warp songs are
typically more difficult.
''',
shared = True,
),
Checkbutton(
name = 'correct_chest_sizes',
args_help = '''\
Updates the chest sizes to match their contents.
Small Chest = Non-required Item
Big Chest = Progression Item
''',
gui_text = 'Chest Size Matches Contents',
gui_group = 'other',
gui_tooltip = '''\
Chests will be large if they contain a major
item and small if they don't. Boss keys will
be in gold chests. This allows skipping
chests if they are small. However, skipping
small chests will mean having low health,
ammo, and rupees, so doing so is a risk.
''',
shared = True,
),
Checkbutton(
name = 'clearer_hints',
args_help = '''\
The hints provided by Gossip Stones are
very direct.
''',
gui_text = 'Clearer Hints',
gui_group = 'other',
gui_tooltip = '''\
The hints provided by Gossip Stones will
be very direct if this option is enabled.
''',
shared = True,
),
Combobox(
name = 'hints',
default = 'agony',
choices = {
'none': 'No Hints',
'mask': 'Hints; Need Mask of Truth',
'agony': 'Hints; Need Stone of Agony',
'always': 'Hints; Need Nothing',
},
args_help = '''\
Choose how Gossip Stones behave
none: Default behavior
mask: Have useful hints that are read with the Mask of Truth.
agony: Have useful hints that are read with Stone of Agony.
always: Have useful hints which can always be read.
''',
gui_text = 'Gossip Stones',
gui_group = 'other',
gui_tooltip = '''\
Gossip Stones can be made to give hints
about where items can be found.
Different settings can be chosen to
decide which item is needed to
speak to Gossip Stones. Choosing to
stick with the Mask of Truth will
make the hints very difficult to
obtain.
Hints for 'on the way of the hero' are
locations that contain items that are
required to beat the game.
''',
shared = True,
),
Combobox(
name = 'hint_dist',
default = 'balanced',
choices = {
'useless': 'Useless',
'balanced': 'Balanced',
'strong': 'Strong',
'very_strong': 'Very Strong',
'tournament': 'Tournament',
},
args_help = '''\
Choose how Gossip Stones hints are distributed
useless: Nothing but junk hints.
balanced: Use a balanced distribution of hint types
strong: Use a strong distribution of hint types
very_strong: Use a very strong distribution of hint types
tournament: Similar to strong but has no variation in hint types
''',
gui_text = 'Hint Distribution',
gui_group = 'other',
gui_tooltip = '''\
Useless has nothing but junk
hints.
Strong distribution has some
duplicate hints and no junk
hints.
Very Strong distribution has
only very useful hints.
Tournament distribution is
similar to Strong but with no
variation in hint types.
''',
shared = True,
),
Combobox(
name = 'text_shuffle',
default = 'none',
choices = {
'none': 'No Text Shuffled',
'except_hints': 'Shuffled except Hints and Keys',
'complete': 'All Text Shuffled',
},
args_help = '''\
Choose how to shuffle the game's messages.
none: Default behavior
except_hints: All non-useful text is shuffled.
complete: All text is shuffled.
''',
gui_text = 'Text Shuffle',
gui_group = 'other',
gui_tooltip = '''\
Will make things confusing for comedic value.
'Shuffled except Hints and Keys': Key texts
are not shuffled because in keysanity it is
inconvenient to figure out which keys are which
without the correct text. Similarly, non-shop
items sold in shops will also retain standard
text for the purpose of accurate price checks.
''',
shared = True,
),
Combobox(
name = 'junk_ice_traps',
default = 'normal',
choices = {
'off': 'No Ice Traps',
'normal': 'Normal Ice Traps',
'on': 'Extra Ice Traps',
'mayhem': 'Ice Trap Mayhem',
'onslaught': 'Ice Trap Onslaught',
},
args_help = '''\
Choose how Ice Traps will be placed in the junk item pool
off: Ice traps are removed.
normal: Default behavior; no ice traps in the junk item pool.
on: Ice Traps will be placed in the junk item pool.
mayhem: All added junk items will be ice traps.
onslaught: All junk items will be ice traps, even those in the base item pool.
''',
gui_text = 'Ice Traps',
gui_group = 'other',
gui_tooltip = '''\
Off: All Ice Traps are removed.
Normal: Only Ice Traps from the base item pool
are placed.
Extra Ice Traps: Chance to add extra Ice Traps
when junk items are added to the itempool.
Ice Trap Mayhem: All added junk items will
be Ice Traps.
Ice Trap Onslaught: All junk items will be
replaced by Ice Traps, even those in the
base pool.
''',
shared = True,
),
Combobox(
name = 'item_pool_value',
default = 'balanced',
choices = {
'plentiful': 'Plentiful',
'balanced': 'Balanced',
'scarce': 'Scarce',
'minimal': 'Minimal'
},
args_help = '''\
Change the item pool for an added challenge.
plentiful: Duplicates most of the major items, making it easier to find progression.
balanced: Default items
scarce: Double defense, double magic, and all 8 heart containers are removed. Ammo
for each type can only be expanded once and you can only find three Bombchu packs.
minimal: Double defense, double magic, Nayru's Love, and all health upgrades are removed.
No ammo expansions are available and you can only find one Bombchu pack.
''',
gui_text = 'Item Pool',
gui_group = 'other',
gui_tooltip = '''\
Changes the amount of bonus items that
are available in the game.
'Plentiful': Extra major items are added.
'Balanced': Original item pool.
'Scarce': Some excess items are removed,
including health upgrades.
'Minimal': Most excess items are removed.
''',
shared = True,
),
Combobox(
name = 'damage_multiplier',
default = 'normal',
choices = {
'half': 'Half',
'normal': 'Normal',
'double': 'Double',
'quadruple': 'Quadruple',
'ohko': 'OHKO',
},
args_help = '''\
Change the amount of damage taken.
half: Half damage taken.
normal: Normal damage taken.
double: Double damage taken.
quadruple: Quadruple damage taken.
ohko: Link will die in one hit.
''',
gui_text = 'Damage Multiplier',
gui_group = 'other',
gui_tooltip = '''\
Changes the amount of damage taken.
'OHKO': Link dies in one hit.
''',
shared = True,
),
Combobox(
name = 'starting_tod',
default = 'default',
choices = {
'default': 'Default',
'random': 'Random Choice',
'early-morning': 'Early Morning',
'morning': 'Morning',
'noon': 'Noon',
'afternoon': 'Afternoon',
'evening': 'Evening',
'dusk': 'Dusk',
'midnight': 'Midnight',
'witching-hour': 'Witching Hour',
},
args_help = '''\
Change up Link's sleep routine.
Daytime officially starts at 6:30,
nighttime at 18:00 (6:00 PM).
Default is 10:00 in the morning.
The alternatives are multiples of 3 hours.
''',
gui_text = 'Starting Time of Day',
gui_group = 'other',
gui_tooltip = '''\
Change up Link's sleep routine.
Daytime officially starts at 6:30,
nighttime at 18:00 (6:00 PM).
Default is 10:00 in the morning.
The alternatives are multiples of 3 hours.
''',
shared = True,
),
Combobox(
name = 'default_targeting',
default = 'hold',
choices = {
'hold': 'Hold',
'switch': 'Switch',
},
args_help = '''\
Choose what the default Z-targeting is.
''',
gui_text = 'Default Targeting Option',
gui_group = 'cosmetic',
),
Combobox(
name = 'background_music',
default = 'normal',
choices = {
'normal': 'Normal',
'off': 'No Music',
'random': 'Random',
},
args_help = '''\
Sets the background music behavior
normal: Areas play their normal background music
off: No background music
random: Areas play random background music
''',
gui_text = 'Background Music',
gui_group = 'sfx',
gui_tooltip = '''\
'No Music': No background music.
is played.
'Random': Area background music is
randomized.
''',
),
Checkbutton(
name = 'display_dpad',
args_help = '''\
Shows an additional HUD element displaying current available options on the DPAD
''',
gui_text = 'Display D-Pad HUD',
gui_group = 'cosmetic',
gui_tooltip = '''\
Shows an additional HUD element displaying
current available options on the D-Pad.
''',
default = True,
),
Setting_Info('kokiri_color', str, 0, False,
{
'default': 'Kokiri Green',
'type': parse_custom_tunic_color,
'help': '''\
Choose the color for Link's Kokiri Tunic. (default: %(default)s)
Color: Make the Kokiri Tunic this color.
Random Choice: Choose a random color from this list of colors.
Completely Random: Choose a random color from any color the N64 can draw.
'''
},
{
'text': 'Kokiri Tunic',
'group': 'tunic_colors',
'widget': 'Combobox',
'default': 'Kokiri Green',
'options': get_tunic_color_options(),
'tooltip':'''\
'Random Choice': Choose a random
color from this list of colors.
'Completely Random': Choose a random
color from any color the N64 can draw.
'''
}),
Setting_Info('goron_color', str, 0, False,
{
'default': 'Goron Red',
'type': parse_custom_tunic_color,
'help': '''\
Choose the color for Link's Goron Tunic. (default: %(default)s)
Color: Make the Goron Tunic this color.
Random Choice: Choose a random color from this list of colors.
Completely Random: Choose a random color from any color the N64 can draw.
'''
},
{
'text': 'Goron Tunic',
'group': 'tunic_colors',
'widget': 'Combobox',
'default': 'Goron Red',
'options': get_tunic_color_options(),
'tooltip':'''\
'Random Choice': Choose a random
color from this list of colors.
'Completely Random': Choose a random
color from any color the N64 can draw.
'''
}),
Setting_Info('zora_color', str, 0, False,
{
'default': 'Zora Blue',
'type': parse_custom_tunic_color,
'help': '''\
Choose the color for Link's Zora Tunic. (default: %(default)s)
Color: Make the Zora Tunic this color.
Random Choice: Choose a random color from this list of colors.
Completely Random: Choose a random color from any color the N64 can draw.
'''
},
{
'text': 'Zora Tunic',
'group': 'tunic_colors',
'widget': 'Combobox',
'default': 'Zora Blue',
'options': get_tunic_color_options(),
'tooltip':'''\
'Random Choice': Choose a random
color from this list of colors.
'Completely Random': Choose a random
color from any color the N64 can draw.
'''
}),
Setting_Info('navi_color_default', str, 0, False,
{
'default': 'White',
'type': parse_custom_navi_color,
'help': '''\
Choose the color for Navi when she is idle. (default: %(default)s)
Color: Make the Navi this color.
Random Choice: Choose a random color from this list of colors.
Completely Random: Choose a random color from any color the N64 can draw.
'''
},
{
'text': 'Navi Idle',
'group': 'navi_colors',
'widget': 'Combobox',
'default': 'White',
'options': get_navi_color_options(),
'tooltip':'''\
'Random Choice': Choose a random
color from this list of colors.
'Completely Random': Choose a random
color from any color the N64 can draw.
'''
}),
Setting_Info('navi_color_enemy', str, 0, False,
{
'default': 'Yellow',
'type': parse_custom_navi_color,
'help': '''\
Choose the color for Navi when she is targeting an enemy. (default: %(default)s)
Color: Make the Navi this color.
Random Choice: Choose a random color from this list of colors.
Completely Random: Choose a random color from any color the N64 can draw.
'''
},
{
'text': 'Navi Targeting Enemy',
'group': 'navi_colors',
'widget': 'Combobox',
'default': 'Yellow',
'options': get_navi_color_options(),
'tooltip':'''\
'Random Choice': Choose a random
color from this list of colors.
'Completely Random': Choose a random
color from any color the N64 can draw.
'''
}),
Setting_Info('navi_color_npc', str, 0, False,
{
'default': 'Light Blue',
'type': parse_custom_navi_color,
'help': '''\
Choose the color for Navi when she is targeting an NPC. (default: %(default)s)
Color: Make the Navi this color.
Random Choice: Choose a random color from this list of colors.
Completely Random: Choose a random color from any color the N64 can draw.
'''
},
{
'text': 'Navi Targeting NPC',
'group': 'navi_colors',
'widget': 'Combobox',
'default': 'Light Blue',
'options': get_navi_color_options(),
'tooltip':'''\
'Random Choice': Choose a random
color from this list of colors.
'Completely Random': Choose a random
color from any color the N64 can draw.
'''
}),
Setting_Info('navi_color_prop', str, 0, False,
{
'default': 'Green',
'type': parse_custom_navi_color,
'help': '''\
Choose the color for Navi when she is targeting a prop. (default: %(default)s)
Color: Make the Navi this color.
Random Choice: Choose a random color from this list of colors.
Completely Random: Choose a random color from any color the N64 can draw.
'''
},
{
'text': 'Navi Targeting Prop',
'group': 'navi_colors',
'widget': 'Combobox',
'default': 'Green',
'options': get_navi_color_options(),
'tooltip':'''\
'Random Choice': Choose a random
color from this list of colors.
'Completely Random': Choose a random
color from any color the N64 can draw.
'''
}),
Combobox(
name = 'sword_trail_duration',
default = 4,
choices = {
4: 'Default',
10: 'Long',
15: 'Very Long',
20: 'Lightsaber',
},
args_help = '''\
Select the duration of the sword trail
''',
gui_text = 'Sword Trail Duration',
gui_group = 'sword_trails',
gui_tooltip = '''\
Select the duration for sword trails.
''',
),
Setting_Info('sword_trail_color_inner', str, 0, False,
{
'default': 'White',
'type': parse_custom_sword_color,
'help': '''\
Choose the color for your sword trail when you swing. This controls the inner color. (default: %(default)s)
Color: Make your sword trail this color.
Random Choice: Choose a random color from this list of colors.
Completely Random: Choose a random color from any color the N64 can draw.
Rainbow: Rainbow sword trails.
'''
},
{
'text': 'Inner Color',
'group': 'sword_trails',
'widget': 'Combobox',
'default': 'White',
'options': get_sword_color_options(),
'tooltip':'''\
'Random Choice': Choose a random
color from this list of colors.
'Completely Random': Choose a random
color from any color the N64 can draw.
'Rainbow': Rainbow sword trails.
'''
}),
Setting_Info('sword_trail_color_outer', str, 0, False,
{
'default': 'White',
'type': parse_custom_sword_color,
'help': '''\
Choose the color for your sword trail when you swing. This controls the outer color. (default: %(default)s)
Color: Make your sword trail this color.
Random Choice: Choose a random color from this list of colors.
Completely Random: Choose a random color from any color the N64 can draw.
Rainbow: Rainbow sword trails.
'''
},
{
'text': 'Outer Color',
'group': 'sword_trails',
'widget': 'Combobox',
'default': 'White',
'options': get_sword_color_options(),
'tooltip':'''\
'Random Choice': Choose a random
color from this list of colors.
'Completely Random': Choose a random
color from any color the N64 can draw.
'Rainbow': Rainbow sword trails.
'''
}),
Combobox(
name = 'sfx_low_hp',
default = 'default',
choices = sfx.get_setting_choices(sfx.SoundHooks.HP_LOW),
args_help = '''\
Select the sound effect that loops at low health. (default: %(default)s)
Sound: Replace the sound effect with the chosen sound.
Random Choice: Replace the sound effect with a random sound from this list.
None: Eliminate heart beeps.
''',
gui_text = 'Low HP',
gui_group = 'sfx',
gui_tooltip = '''\
'Random Choice': Choose a random
sound from this list.
'Default': Beep. Beep. Beep.
''',
),
Combobox(
name = 'sfx_navi_overworld',
default = 'default',
choices = sfx.get_setting_choices(sfx.SoundHooks.NAVI_OVERWORLD),
args_help = '''\
''',
gui_text = 'Navi Overworld',
gui_group = 'npc_sfx',
),
Combobox(
name = 'sfx_navi_enemy',
default = 'default',
choices = sfx.get_setting_choices(sfx.SoundHooks.NAVI_ENEMY),
args_help = '''\
''',
gui_text = 'Navi Enemy',
gui_group = 'npc_sfx',
),
Combobox(
name = 'sfx_menu_cursor',
default = 'default',
choices = sfx.get_setting_choices(sfx.SoundHooks.MENU_CURSOR),
args_help = '''\
''',
gui_text = 'Menu Cursor',
gui_group = 'menu_sfx',
),
Combobox(
name = 'sfx_menu_select',
default = 'default',
choices = sfx.get_setting_choices(sfx.SoundHooks.MENU_SELECT),
args_help = '''\
''',
gui_text = 'Menu Select',
gui_group = 'menu_sfx',
),
Combobox(
name = 'sfx_horse_neigh',
default = 'default',
choices = sfx.get_setting_choices(sfx.SoundHooks.HORSE_NEIGH),
args_help = '''\
''',
gui_text = 'Horse',
gui_group = 'sfx',
),
Combobox(
name = 'sfx_nightfall',
default = 'default',
choices = sfx.get_setting_choices(sfx.SoundHooks.NIGHTFALL),
args_help = '''\
''',
gui_text = 'Nightfall',
gui_group = 'sfx',
),
Combobox(
name = 'sfx_hover_boots',
default = 'default',
choices = sfx.get_setting_choices(sfx.SoundHooks.BOOTS_HOVER),
args_help = '''\
''',
gui_text = 'Hover Boots',
gui_group = 'sfx',
),
Combobox(
name = 'sfx_ocarina',
default = 'ocarina',
choices = {
'ocarina': 'Default',
'random-choice': 'Random Choice',
'flute': 'Flute',
'harp': 'Harp',
'whistle': 'Whistle',
'malon': 'Malon',
'grind-organ': 'Grind Organ',
},
args_help = '''\
Change the sound of the ocarina.
default: ocarina
''',
gui_text = 'Ocarina',
gui_group = 'sfx',
gui_tooltip = '''\
Change the sound of the ocarina.
''',
),
]
si_dict = {si.name: si for si in setting_infos}
def get_setting_info(name):
return si_dict[name] | z5-tracker | /z5_tracker-1.2.1-py3-none-any.whl/z5tracker/rulesets/rando_aa_v4/SettingsList.py | SettingsList.py |
import ast
from .ItemList import item_table
from .State import State
import re
escaped_items = {}
for item in item_table:
escaped_items[re.sub(r'[\'()[\]]', '', item.replace(' ', '_'))] = item
class Rule_AST_Transformer(ast.NodeTransformer):
def __init__(self, world):
self.world = world
def visit_Name(self, node):
if node.id in escaped_items:
return ast.Call(
func=ast.Attribute(
value=ast.Name(id='state', ctx=ast.Load()),
attr='has',
ctx=ast.Load()),
args=[ast.Str(escaped_items[node.id])],
keywords=[])
elif node.id in self.world.__dict__:
return ast.Attribute(
value=ast.Attribute(
value=ast.Name(id='state', ctx=ast.Load()),
attr='world',
ctx=ast.Load()),
attr=node.id,
ctx=ast.Load())
elif node.id in State.__dict__:
return ast.Call(
func=ast.Attribute(
value=ast.Name(id='state', ctx=ast.Load()),
attr=node.id,
ctx=ast.Load()),
args=[],
keywords=[])
else:
return ast.Str(node.id.replace('_', ' '))
def visit_Tuple(self, node):
if len(node.elts) != 2:
raise Exception('Parse Error: Tuple must has 2 values')
item, count = node.elts
if isinstance(item, ast.Str):
item = ast.Name(id=item.s, ctx=ast.Load())
if not isinstance(item, ast.Name):
raise Exception('Parse Error: first value must be an item. Got %s' % item.__class__.__name__)
if not (isinstance(count, ast.Name) or isinstance(count, ast.Num)):
raise Exception('Parse Error: second value must be a number. Got %s' % item.__class__.__name__)
if isinstance(count, ast.Name):
count = ast.Attribute(
value=ast.Attribute(
value=ast.Name(id='state', ctx=ast.Load()),
attr='world',
ctx=ast.Load()),
attr=count.id,
ctx=ast.Load())
if item.id in escaped_items:
item.id = escaped_items[item.id]
if not item.id in item_table:
raise Exception('Parse Error: invalid item name')
return ast.Call(
func=ast.Attribute(
value=ast.Name(id='state', ctx=ast.Load()),
attr='has',
ctx=ast.Load()),
args=[ast.Str(item.id), count],
keywords=[])
def visit_Call(self, node):
new_args = []
for child in node.args:
if isinstance(child, ast.Name):
if child.id in self.world.__dict__:
child = ast.Attribute(
value=ast.Attribute(
value=ast.Name(id='state', ctx=ast.Load()),
attr='world',
ctx=ast.Load()),
attr=child.id,
ctx=ast.Load())
elif child.id in escaped_items:
child = ast.Str(escaped_items[child.id])
else:
child = ast.Str(child.id.replace('_', ' '))
new_args.append(child)
if isinstance(node.func, ast.Name):
return ast.Call(
func=ast.Attribute(
value=ast.Name(id='state', ctx=ast.Load()),
attr=node.func.id,
ctx=ast.Load()),
args=new_args,
keywords=node.keywords)
else:
return node
def visit_Subscript(self, node):
if isinstance(node.value, ast.Name):
return ast.Subscript(
value=ast.Attribute(
value=ast.Attribute(
value=ast.Name(id='state', ctx=ast.Load()),
attr='world',
ctx=ast.Load()),
attr=node.value.id,
ctx=ast.Load()),
slice=ast.Index(value=ast.Str(node.slice.value.id.replace('_', ' '))),
ctx=node.ctx)
else:
return node
def visit_Compare(self, node):
if isinstance(node.left, ast.Name):
if node.left.id in escaped_items:
node.left = ast.Str(escaped_items[node.left.id])
if isinstance(node.comparators[0], ast.Name):
if node.comparators[0].id in escaped_items:
node.comparators[0] = ast.Str(escaped_items[node.comparators[0].id])
self.generic_visit(node)
return node
def parse_rule_string(rule, world):
if rule is None:
return lambda state: True
else:
rule = 'lambda state: ' + rule
rule = rule.split('#')[0]
rule_ast = ast.parse(rule, mode='eval')
rule_ast = ast.fix_missing_locations(Rule_AST_Transformer(world).visit(rule_ast))
rule_lambda = eval(compile(rule_ast, '<string>', 'eval'))
return rule_lambda | z5-tracker | /z5_tracker-1.2.1-py3-none-any.whl/z5tracker/rulesets/rando_aa_v4/RuleParser.py | RuleParser.py |
from enum import Enum, unique
@unique
class RegionType(Enum):
Overworld = 1
Interior = 2
Dungeon = 3
Grotto = 4
@property
def is_indoors(self):
"""Shorthand for checking if Interior or Dungeon"""
return self in (RegionType.Interior, RegionType.Dungeon, RegionType.Grotto)
class Region(object):
def __init__(self, name, type=RegionType.Overworld):
self.name = name
self.type = type
self.entrances = []
self.exits = []
self.locations = []
self.dungeon = None
self.world = None
self.spot_type = 'Region'
self.recursion_count = 0
self.price = None
self.world = None
def copy(self, new_world):
new_region = Region(self.name, self.type)
new_region.world = new_world
new_region.spot_type = self.spot_type
new_region.price = self.price
new_region.can_reach = self.can_reach
if self.dungeon:
new_region.dungeon = self.dungeon.name
new_region.locations = [location.copy(new_region) for location in self.locations]
new_region.exits = [exit.copy(new_region) for exit in self.exits]
return new_region
def can_reach(self, state):
for entrance in self.entrances:
if state.can_reach(entrance):
return True
return False
def can_fill(self, item):
is_dungeon_restricted = False
if item.map or item.compass:
is_dungeon_restricted = self.world.shuffle_mapcompass == 'dungeon'
elif item.smallkey and item.type != 'FortressSmallKey':
is_dungeon_restricted = self.world.shuffle_smallkeys == 'dungeon'
elif item.bosskey:
is_dungeon_restricted = self.world.shuffle_bosskeys == 'dungeon'
if is_dungeon_restricted:
return self.dungeon and self.dungeon.is_dungeon_item(item) and item.world.id == self.world.id
return True
def __str__(self):
return str(self.__unicode__())
def __unicode__(self):
return '%s' % self.name | z5-tracker | /z5_tracker-1.2.1-py3-none-any.whl/z5tracker/rulesets/rando_aa_v4/Region.py | Region.py |
def shop_address(shop_id, shelf_id):
return 0xC71ED0 + (0x40 * shop_id) + (0x08 * shelf_id)
# Location: Type Scene Default Hint Addresses
location_table = {
"Kokiri Sword Chest": ("Chest", 0x55, 0x00, "Kokiri Forest", None),
"Mido Chest Top Left": ("Chest", 0x28, 0x00, "Kokiri Forest", None),
"Mido Chest Top Right": ("Chest", 0x28, 0x01, "Kokiri Forest", None),
"Mido Chest Bottom Left": ("Chest", 0x28, 0x02, "Kokiri Forest", None),
"Mido Chest Bottom Right": ("Chest", 0x28, 0x03, "Kokiri Forest", None),
"Shield Grave Chest": ("Chest", 0x40, 0x00, "the Graveyard", None),
"Heart Piece Grave Chest": ("Chest", 0x3F, 0x00, "the Graveyard", None),
"Composer Grave Chest": ("Chest", 0x41, 0x00, "the Graveyard", None),
"Death Mountain Bombable Chest": ("Chest", 0x60, 0x01, "Death Mountain Trail", None),
"Goron City Leftmost Maze Chest": ("Chest", 0x62, 0x00, "Goron City", None),
"Goron City Right Maze Chest": ("Chest", 0x62, 0x01, "Goron City", None),
"Goron City Left Maze Chest": ("Chest", 0x62, 0x02, "Goron City", None),
"Zoras Domain Torch Run": ("Chest", 0x58, 0x00, "Zora's Domain", None),
"Hookshot Chest": ("Chest", 0x48, 0x00, "the Graveyard", None),
"Gerudo Valley Hammer Rocks Chest": ("Chest", 0x5A, 0x00, "Gerudo Valley", None),
"Gerudo Fortress Rooftop Chest": ("Chest", 0x5D, 0x00, "Gerudo's Fortress", None),
"Haunted Wasteland Structure Chest": ("Chest", 0x5E, 0x00, "Haunted Wasteland", None),
"Redead Grotto Chest": ("Chest", 0x3E, 0x0A, "Kakariko Village", None),
"Wolfos Grotto Chest": ("Chest", 0x3E, 0x11, "Sacred Forest Meadow", None),
"Silver Gauntlets Chest": ("Chest", 0x5C, 0x0B, "Desert Colossus", None),
"Mirror Shield Chest": ("Chest", 0x5C, 0x09, "Desert Colossus", None),
"Field West Castle Town Grotto Chest": ("Chest", 0x3E, 0x00, "Hyrule Field", None),
"Remote Southern Grotto Chest": ("Chest", 0x3E, 0x02, "Hyrule Field", None),
"Field Near Lake Outside Fence Grotto Chest": ("Chest", 0x3E, 0x03, "Hyrule Field", None),
"Kakariko Back Grotto Chest": ("Chest", 0x3E, 0x08, "Kakariko Village", None),
"Zora River Plateau Open Grotto Chest": ("Chest", 0x3E, 0x09, "Zora's River", None),
"Kokiri Forest Storms Grotto Chest": ("Chest", 0x3E, 0x0C, "Kokiri Forest", None),
"Lost Woods Generic Grotto Chest": ("Chest", 0x3E, 0x14, "the Lost Woods", None),
"Mountain Storms Grotto Chest": ("Chest", 0x3E, 0x17, "Death Mountain Trail", None),
"Top of Crater Grotto Chest": ("Chest", 0x3E, 0x1A, "Death Mountain Crater", None),
"Treasure Chest Game": ("Chest", 0x10, 0x0A, "the Market", None),
"Zelda": ("Cutscene", 0xFF, 0x01, "Temple of Time", None),
"Gift from Saria": ("Cutscene", 0xFF, 0x02, "the Lost Woods", None),
"Zoras Fountain Fairy Reward": ("Cutscene", 0xFF, 0x10, "Zora's Fountain", None),
"Hyrule Castle Fairy Reward": ("Cutscene", 0xFF, 0x11, "Hyrule Castle", None),
"Desert Colossus Fairy Reward": ("Cutscene", 0xFF, 0x12, "Desert Colossus", None),
"Mountain Summit Fairy Reward": ("Cutscene", 0xFF, 0x13, "Death Mountain Trail", None),
"Crater Fairy Reward": ("Cutscene", 0xFF, 0x14, "Death Mountain Crater", None),
"Ganons Castle Fairy Reward": ("Cutscene", 0xFF, 0x15, "outside Ganon's Castle", None),
"Sheik Forest Song": ("Song", 0xFF, 0x20, "Sacred Forest Meadow", (0x20B0809, 0x20B0809)),
"Sheik in Crater": ("Song", 0xFF, 0x21, "Death Mountain Crater", (0x224D7F1, 0x224D7F1)),
"Sheik in Ice Cavern": ("Song", 0xFF, 0x22, "Ice Cavern", (0x2BEC889, 0x2BEC889)),
"Sheik at Colossus": ("Song", 0xFF, 0x23, "Desert Colossus", (0x218C57D, 0x218C57D)),
"Sheik in Kakariko": ("Song", 0xFF, 0x24, "Kakariko Village", (0x2000FE1, 0x2000FE1)),
"Sheik at Temple": ("Song", 0xFF, 0x25, "Temple of Time", (0x2531329, 0x2531329)),
"Impa at Castle": ("Song", 0xFF, 0x26, "Hyrule Castle", (0x2E8E925, 0x2E8E925)),
"Song from Malon": ("Song", 0xFF, 0x27, "Lon Lon Ranch", (0x0D7EB53, 0x0D7EBCF)),
"Song from Saria": ("Song", 0xFF, 0x28, "Sacred Forest Meadow", (0x20B1DB1, 0x20B1DB1)),
"Song from Composer Grave": ("Song", 0xFF, 0x29, "the Graveyard", (0x332A871, 0x332A871)),
"Song from Ocarina of Time": ("Song", 0xFF, 0x2A, "Hyrule Field", (0x252FC89, 0x252FC89)),
"Song at Windmill": ("Song", 0xFF, 0x2B, "Kakariko Village", (0x0E42C07, 0x0E42B8B)),
"Malon Egg": ("NPC", 0x5F, 0x47, "Hyrule Castle", None),
"Zeldas Letter": ("NPC", None, None, "Hyrule Castle", None),
"Darunias Joy": ("NPC", 0x62, 0x54, "Goron City", None),
"Diving Minigame": ("NPC", 0x58, 0x37, "Zora's Domain", None),
"Child Fishing": ("NPC", 0x49, 0x3E, "Lake Hylia", None),
"Adult Fishing": ("NPC", 0x49, 0x38, "Lake Hylia", None),
"Diving in the Lab": ("NPC", 0x38, 0x3E, "Lake Hylia", None),
"Link the Goron": ("NPC", 0x62, 0x2C, "Goron City", None),
"King Zora Thawed": ("NPC", 0x58, 0x2D, "Zora's Domain", None),
"Bombchu Bowling Bomb Bag": ("NPC", 0x4B, 0x34, "the Market", None),
"Bombchu Bowling Piece of Heart": ("NPC", 0x4B, 0x3E, "the Market", None),
"Dog Lady": ("NPC", 0x35, 0x3E, "the Market", None),
"Skull Kid": ("NPC", 0x5B, 0x3E, "the Lost Woods", None),
"Ocarina Memory Game": ("NPC", 0x5B, 0x76, "the Lost Woods", None),
"10 Gold Skulltula Reward": ("NPC", 0x50, 0x45, "Kakariko Village", None),
"20 Gold Skulltula Reward": ("NPC", 0x50, 0x39, "Kakariko Village", None),
"30 Gold Skulltula Reward": ("NPC", 0x50, 0x46, "Kakariko Village", None),
"40 Gold Skulltula Reward": ("NPC", 0x50, 0x03, "Kakariko Village", None),
"50 Gold Skulltula Reward": ("NPC", 0x50, 0x3E, "Kakariko Village", None),
"Man on Roof": ("NPC", 0x52, 0x3E, "Kakariko Village", None),
"Frog Ocarina Game": ("NPC", 0x54, 0x76, "Zora's River", None),
"Frogs in the Rain": ("NPC", 0x54, 0x3E, "Zora's River", None),
"Horseback Archery 1000 Points": ("NPC", 0x5D, 0x3E, "Gerudo's Fortress", None),
"Horseback Archery 1500 Points": ("NPC", 0x5D, 0x30, "Gerudo's Fortress", None),
"Child Shooting Gallery": ("NPC", 0x42, 0x60, "the Market", None),
"Adult Shooting Gallery": ("NPC", 0x42, 0x30, "Kakariko Village", None),
"Target in Woods": ("NPC", 0x5B, 0x60, "the Lost Woods", None),
"Deku Theater Skull Mask": ("NPC", 0x3E, 0x77, "the Lost Woods", None),
"Deku Theater Mask of Truth": ("NPC", 0x3E, 0x7A, "the Lost Woods", None),
"Anju as Adult": ("NPC", 0x52, 0x1D, "Kakariko Village", None),
"Biggoron": ("NPC", 0x60, 0x57, "Death Mountain Trail", None),
"Anjus Chickens": ("NPC", 0x52, 0x0F, "Kakariko Village", None),
"Talons Chickens": ("NPC", 0x4C, 0x14, "Lon Lon Ranch", None),
"10 Big Poes": ("NPC", 0x4D, 0x0F, "the Market", None),
"Rolling Goron as Child": ("NPC", 0x62, 0x34, "Goron City", None),
"Underwater Bottle": ("NPC", 0x57, 0x15, "Lake Hylia", None),
"Lake Hylia Sun": ("NPC", 0x57, 0x58, "Lake Hylia", None),
"Gerudo Fortress Membership Card": ("NPC", 0x0C, 0x3A, "Gerudo's Fortress", None),
"Ocarina of Time": ("NPC", 0x51, 0x0C, "Hyrule Field", None),
"Impa House Freestanding PoH": ("Collectable", 0x37, 0x01, "Kakariko Village", None),
"Tektite Grotto Freestanding PoH": ("Collectable", 0x3E, 0x01, "Hyrule Field", None),
"Windmill Freestanding PoH": ("Collectable", 0x48, 0x01, "Kakariko Village", None),
"Dampe Race Freestanding PoH": ("Collectable", 0x48, 0x07, "the Graveyard", None),
"Lon Lon Tower Freestanding PoH": ("Collectable", 0x4C, 0x01, "Lon Lon Ranch", None),
"Graveyard Freestanding PoH": ("Collectable", 0x53, 0x04, "the Graveyard", None),
"Gravedigging Tour": ("Collectable", 0x53, 0x08, "the Graveyard", None),
"Zora River Lower Freestanding PoH": ("Collectable", 0x54, 0x04, "Zora's River", None),
"Zora River Upper Freestanding PoH": ("Collectable", 0x54, 0x0B, "Zora's River", None),
"Lake Hylia Freestanding PoH": ("Collectable", 0x57, 0x1E, "Lake Hylia", None),
"Zoras Fountain Iceberg Freestanding PoH": ("Collectable", 0x59, 0x01, "Zora's Fountain", None),
"Zoras Fountain Bottom Freestanding PoH": ("Collectable", 0x59, 0x14, "Zora's Fountain", None),
"Gerudo Valley Waterfall Freestanding PoH": ("Collectable", 0x5A, 0x01, "Gerudo Valley", None),
"Gerudo Valley Crate Freestanding PoH": ("Collectable", 0x5A, 0x02, "Gerudo Valley", None),
"Colossus Freestanding PoH": ("Collectable", 0x5C, 0x0D, "Desert Colossus", None),
"DM Trail Freestanding PoH": ("Collectable", 0x60, 0x1E, "Death Mountain Trail", None),
"DM Crater Wall Freestanding PoH": ("Collectable", 0x61, 0x02, "Death Mountain Crater", None),
"DM Crater Volcano Freestanding PoH": ("Collectable", 0x61, 0x08, "Death Mountain Crater", None),
"Goron City Pot Freestanding PoH": ("Collectable", 0x62, 0x1F, "Goron City", None),
"Gerudo Fortress North F1 Carpenter": ("Collectable", 0x0C, 0x0C, "Gerudo's Fortress", None),
"Gerudo Fortress North F2 Carpenter": ("Collectable", 0x0C, 0x0A, "Gerudo's Fortress", None),
"Gerudo Fortress South F1 Carpenter": ("Collectable", 0x0C, 0x0E, "Gerudo's Fortress", None),
"Gerudo Fortress South F2 Carpenter": ("Collectable", 0x0C, 0x0F, "Gerudo's Fortress", None),
"Magic Bean Salesman": ("Event", None, None, "Zora's River", None),
"King Zora Moves": ("Event", None, None, "Zora's Domain", None),
"Master Sword Pedestal": ("Event", None, None, "Temple of Time", None),
"Epona": ("Event", None, None, "Lon Lon Ranch", None),
"Deku Baba Sticks": ("Event", None, None, "Kokiri Forest", None),
"Deku Baba Nuts": ("Event", None, None, "Kokiri Forest", None),
"Goron City Stick Pot": ("Event", None, None, "Goron City", None),
"Bottom of the Well Stick Pot": ("Event", None, None, "Bottom of the Well", None),
"Zoras Domain Stick Pot": ("Event", None, None, "Zora's Domain", None),
"Zoras Domain Nut Pot": ("Event", None, None, "Zora's Domain", None),
"Spirit Temple Nut Crate": ("Event", None, None, "Spirit Temple", None),
"Gerudo Fortress Carpenter Rescue": ("Event", None, None, "Gerudo's Fortress", None),
"Haunted Wasteland Bombchu Salesman": ("Event", None, None, "Haunted Wasteland", None),
"Ganons Castle Forest Trial Clear": ("Event", None, None, "Ganon's Castle", None),
"Ganons Castle Fire Trial Clear": ("Event", None, None, "Ganon's Castle", None),
"Ganons Castle Water Trial Clear": ("Event", None, None, "Ganon's Castle", None),
"Ganons Castle Shadow Trial Clear": ("Event", None, None, "Ganon's Castle", None),
"Ganons Castle Spirit Trial Clear": ("Event", None, None, "Ganon's Castle", None),
"Ganons Castle Light Trial Clear": ("Event", None, None, "Ganon's Castle", None),
# Deku Tree vanilla
"Deku Tree Lobby Chest": ("Chest", 0x00, 0x03, "Deku Tree", None),
"Deku Tree Slingshot Chest": ("Chest", 0x00, 0x01, "Deku Tree", None),
"Deku Tree Slingshot Room Side Chest": ("Chest", 0x00, 0x05, "Deku Tree", None),
"Deku Tree Compass Chest": ("Chest", 0x00, 0x02, "Deku Tree", None),
"Deku Tree Compass Room Side Chest": ("Chest", 0x00, 0x06, "Deku Tree", None),
"Deku Tree Basement Chest": ("Chest", 0x00, 0x04, "Deku Tree", None),
# Deku Tree MQ
"Deku Tree MQ Lobby Chest": ("Chest", 0x00, 0x03, "Deku Tree", None),
"Deku Tree MQ Compass Chest": ("Chest", 0x00, 0x01, "Deku Tree", None),
"Deku Tree MQ Slingshot Chest": ("Chest", 0x00, 0x06, "Deku Tree", None),
"Deku Tree MQ Slingshot Room Back Chest": ("Chest", 0x00, 0x02, "Deku Tree", None),
"Deku Tree MQ Basement Chest": ("Chest", 0x00, 0x04, "Deku Tree", None),
"Deku Tree MQ Before Spinning Log Chest": ("Chest", 0x00, 0x05, "Deku Tree", None),
"Deku Tree MQ After Spinning Log Chest": ("Chest", 0x00, 0x00, "Deku Tree", None),
# Dodongo's Cavern shared
"Chest Above King Dodongo": ("Chest", 0x12, 0x00, "Dodongo's Cavern", None),
# Dodongo's Cavern vanilla
"Dodongos Cavern Map Chest": ("Chest", 0x01, 0x08, "Dodongo's Cavern", None),
"Dodongos Cavern Compass Chest": ("Chest", 0x01, 0x05, "Dodongo's Cavern", None),
"Dodongos Cavern Bomb Flower Platform": ("Chest", 0x01, 0x06, "Dodongo's Cavern", None),
"Dodongos Cavern Bomb Bag Chest": ("Chest", 0x01, 0x04, "Dodongo's Cavern", None),
"Dodongos Cavern End of Bridge Chest": ("Chest", 0x01, 0x0A, "Dodongo's Cavern", None),
# Dodongo's Cavern MQ
"Dodongos Cavern MQ Map Chest": ("Chest", 0x01, 0x00, "Dodongo's Cavern", None),
"Dodongos Cavern MQ Bomb Bag Chest": ("Chest", 0x01, 0x04, "Dodongo's Cavern", None),
"Dodongos Cavern MQ Compass Chest": ("Chest", 0x01, 0x05, "Dodongo's Cavern", None),
"Dodongos Cavern MQ Larva Room Chest": ("Chest", 0x01, 0x02, "Dodongo's Cavern", None),
"Dodongos Cavern MQ Torch Puzzle Room Chest": ("Chest", 0x01, 0x03, "Dodongo's Cavern", None),
"Dodongos Cavern MQ Under Grave Chest": ("Chest", 0x01, 0x01, "Dodongo's Cavern", None),
# Jabu Jabu's Belly vanilla
"Boomerang Chest": ("Chest", 0x02, 0x01, "Jabu Jabu's Belly", None),
"Jabu Jabus Belly Map Chest": ("Chest", 0x02, 0x02, "Jabu Jabu's Belly", None),
"Jabu Jabus Belly Compass Chest": ("Chest", 0x02, 0x04, "Jabu Jabu's Belly", None),
# Jabu Jabu's Belly MQ
"Jabu Jabus Belly MQ Entry Side Chest": ("Chest", 0x02, 0x05, "Jabu Jabu's Belly", None),
"Jabu Jabus Belly MQ Map Chest": ("Chest", 0x02, 0x03, "Jabu Jabu's Belly", None),
"Jabu Jabus Belly MQ Second Room Lower Chest": ("Chest", 0x02, 0x02, "Jabu Jabu's Belly", None),
"Jabu Jabus Belly MQ Compass Chest": ("Chest", 0x02, 0x00, "Jabu Jabu's Belly", None),
"Jabu Jabus Belly MQ Second Room Upper Chest": ("Chest", 0x02, 0x07, "Jabu Jabu's Belly", None),
"Jabu Jabus Belly MQ Basement North Chest": ("Chest", 0x02, 0x08, "Jabu Jabu's Belly", None),
"Jabu Jabus Belly MQ Basement South Chest": ("Chest", 0x02, 0x04, "Jabu Jabu's Belly", None),
"Jabu Jabus Belly MQ Near Boss Chest": ("Chest", 0x02, 0x0A, "Jabu Jabu's Belly", None),
"Jabu Jabus Belly MQ Falling Like Like Room Chest":("Chest", 0x02, 0x09, "Jabu Jabu's Belly", None),
"Jabu Jabus Belly MQ Boomerang Room Small Chest": ("Chest", 0x02, 0x01, "Jabu Jabu's Belly", None),
"MQ Boomerang Chest": ("Chest", 0x02, 0x06, "Jabu Jabu's Belly", None),
# Forest Temple vanilla
"Forest Temple First Chest": ("Chest", 0x03, 0x03, "Forest Temple", None),
"Forest Temple Chest Behind Lobby": ("Chest", 0x03, 0x00, "Forest Temple", None),
"Forest Temple Well Chest": ("Chest", 0x03, 0x09, "Forest Temple", None),
"Forest Temple Map Chest": ("Chest", 0x03, 0x01, "Forest Temple", None),
"Forest Temple Outside Hookshot Chest": ("Chest", 0x03, 0x05, "Forest Temple", None),
"Forest Temple Falling Room Chest": ("Chest", 0x03, 0x07, "Forest Temple", None),
"Forest Temple Block Push Chest": ("Chest", 0x03, 0x04, "Forest Temple", None),
"Forest Temple Boss Key Chest": ("Chest", 0x03, 0x0E, "Forest Temple", None),
"Forest Temple Floormaster Chest": ("Chest", 0x03, 0x02, "Forest Temple", None),
"Forest Temple Bow Chest": ("Chest", 0x03, 0x0C, "Forest Temple", None),
"Forest Temple Red Poe Chest": ("Chest", 0x03, 0x0D, "Forest Temple", None),
"Forest Temple Blue Poe Chest": ("Chest", 0x03, 0x0F, "Forest Temple", None),
"Forest Temple Near Boss Chest": ("Chest", 0x03, 0x0B, "Forest Temple", None),
# Forest Temple MQ
"Forest Temple MQ First Chest": ("Chest", 0x03, 0x03, "Forest Temple", None),
"Forest Temple MQ Chest Behind Lobby": ("Chest", 0x03, 0x00, "Forest Temple", None),
"Forest Temple MQ Bow Chest": ("Chest", 0x03, 0x0C, "Forest Temple", None),
"Forest Temple MQ NE Outdoors Lower Chest": ("Chest", 0x03, 0x01, "Forest Temple", None),
"Forest Temple MQ NE Outdoors Upper Chest": ("Chest", 0x03, 0x05, "Forest Temple", None),
"Forest Temple MQ Well Chest": ("Chest", 0x03, 0x09, "Forest Temple", None),
"Forest Temple MQ Map Chest": ("Chest", 0x03, 0x0D, "Forest Temple", None),
"Forest Temple MQ Compass Chest": ("Chest", 0x03, 0x0F, "Forest Temple", None),
"Forest Temple MQ Falling Room Chest": ("Chest", 0x03, 0x06, "Forest Temple", None),
"Forest Temple MQ Near Boss Chest": ("Chest", 0x03, 0x0B, "Forest Temple", None),
"Forest Temple MQ Redead Chest": ("Chest", 0x03, 0x02, "Forest Temple", None),
"Forest Temple MQ Boss Key Chest": ("Chest", 0x03, 0x0E, "Forest Temple", None),
# Fire Temple vanilla
"Fire Temple Chest Near Boss": ("Chest", 0x04, 0x01, "Fire Temple", None),
"Fire Temple Fire Dancer Chest": ("Chest", 0x04, 0x00, "Fire Temple", None),
"Fire Temple Boss Key Chest": ("Chest", 0x04, 0x0C, "Fire Temple", None),
"Fire Temple Big Lava Room Bombable Chest": ("Chest", 0x04, 0x02, "Fire Temple", None),
"Fire Temple Big Lava Room Open Chest": ("Chest", 0x04, 0x04, "Fire Temple", None),
"Fire Temple Boulder Maze Lower Chest": ("Chest", 0x04, 0x03, "Fire Temple", None),
"Fire Temple Boulder Maze Upper Chest": ("Chest", 0x04, 0x06, "Fire Temple", None),
"Fire Temple Boulder Maze Side Room": ("Chest", 0x04, 0x08, "Fire Temple", None),
"Fire Temple Boulder Maze Bombable Pit": ("Chest", 0x04, 0x0B, "Fire Temple", None),
"Fire Temple Scarecrow Chest": ("Chest", 0x04, 0x0D, "Fire Temple", None),
"Fire Temple Map Chest": ("Chest", 0x04, 0x0A, "Fire Temple", None),
"Fire Temple Compass Chest": ("Chest", 0x04, 0x07, "Fire Temple", None),
"Fire Temple Highest Goron Chest": ("Chest", 0x04, 0x09, "Fire Temple", None),
"Fire Temple Megaton Hammer Chest": ("Chest", 0x04, 0x05, "Fire Temple", None),
# Fire Temple MQ
"Fire Temple MQ Chest Near Boss": ("Chest", 0x04, 0x07, "Fire Temple", None),
"Fire Temple MQ Megaton Hammer Chest": ("Chest", 0x04, 0x00, "Fire Temple", None),
"Fire Temple MQ Compass Chest": ("Chest", 0x04, 0x0B, "Fire Temple", None),
"Fire Temple MQ Maze Lower Chest": ("Chest", 0x04, 0x03, "Fire Temple", None),
"Fire Temple MQ Maze Upper Chest": ("Chest", 0x04, 0x06, "Fire Temple", None),
"Fire Temple MQ West Tower Top Chest": ("Chest", 0x04, 0x05, "Fire Temple", None),
"Fire Temple MQ Entrance Hallway Small Chest": ("Chest", 0x04, 0x02, "Fire Temple", None),
"Fire Temple MQ Map Chest": ("Chest", 0x04, 0x0C, "Fire Temple", None),
"Fire Temple MQ Boss Key Chest": ("Chest", 0x04, 0x04, "Fire Temple", None),
"Fire Temple MQ Big Lava Room Bombable Chest": ("Chest", 0x04, 0x01, "Fire Temple", None),
"Fire Temple MQ Maze Side Room": ("Chest", 0x04, 0x08, "Fire Temple", None),
"Fire Temple MQ Freestanding Key": ("Collectable", 0x04, 0x1C, "Fire Temple", None),
# Water Temple vanilla
"Water Temple Map Chest": ("Chest", 0x05, 0x02, "Water Temple", None),
"Water Temple Compass Chest": ("Chest", 0x05, 0x09, "Water Temple", None),
"Water Temple Torches Chest": ("Chest", 0x05, 0x01, "Water Temple", None),
"Water Temple Dragon Chest": ("Chest", 0x05, 0x0A, "Water Temple", None),
"Water Temple Central Bow Target Chest": ("Chest", 0x05, 0x08, "Water Temple", None),
"Water Temple Central Pillar Chest": ("Chest", 0x05, 0x06, "Water Temple", None),
"Water Temple Cracked Wall Chest": ("Chest", 0x05, 0x00, "Water Temple", None),
"Water Temple Boss Key Chest": ("Chest", 0x05, 0x05, "Water Temple", None),
"Water Temple Dark Link Chest": ("Chest", 0x05, 0x07, "Water Temple", None),
"Water Temple River Chest": ("Chest", 0x05, 0x03, "Water Temple", None),
# Water Temple MQ
"Water Temple MQ Central Pillar Chest": ("Chest", 0x05, 0x06, "Water Temple", None),
"Water Temple MQ Boss Key Chest": ("Chest", 0x05, 0x05, "Water Temple", None),
"Water Temple MQ Longshot Chest": ("Chest", 0x05, 0x00, "Water Temple", None),
"Water Temple MQ Compass Chest": ("Chest", 0x05, 0x01, "Water Temple", None),
"Water Temple MQ Map Chest": ("Chest", 0x05, 0x02, "Water Temple", None),
"Water Temple MQ Freestanding Key": ("Collectable", 0x05, 0x01, "Water Temple", None),
# Spirit Temple vanilla
"Spirit Temple Child Left Chest": ("Chest", 0x06, 0x08, "Spirit Temple", None),
"Spirit Temple Child Right Chest": ("Chest", 0x06, 0x00, "Spirit Temple", None),
"Spirit Temple Compass Chest": ("Chest", 0x06, 0x04, "Spirit Temple", None),
"Spirit Temple Early Adult Right Chest": ("Chest", 0x06, 0x07, "Spirit Temple", None),
"Spirit Temple First Mirror Right Chest": ("Chest", 0x06, 0x0D, "Spirit Temple", None),
"Spirit Temple First Mirror Left Chest": ("Chest", 0x06, 0x0E, "Spirit Temple", None),
"Spirit Temple Map Chest": ("Chest", 0x06, 0x03, "Spirit Temple", None),
"Spirit Temple Child Climb East Chest": ("Chest", 0x06, 0x06, "Spirit Temple", None),
"Spirit Temple Child Climb North Chest": ("Chest", 0x06, 0x0C, "Spirit Temple", None),
"Spirit Temple Sun Block Room Chest": ("Chest", 0x06, 0x01, "Spirit Temple", None),
"Spirit Temple Statue Hand Chest": ("Chest", 0x06, 0x02, "Spirit Temple", None),
"Spirit Temple NE Main Room Chest": ("Chest", 0x06, 0x0F, "Spirit Temple", None),
"Spirit Temple Near Four Armos Chest": ("Chest", 0x06, 0x05, "Spirit Temple", None),
"Spirit Temple Hallway Left Invisible Chest": ("Chest", 0x06, 0x14, "Spirit Temple", None),
"Spirit Temple Hallway Right Invisible Chest": ("Chest", 0x06, 0x15, "Spirit Temple", None),
"Spirit Temple Boss Key Chest": ("Chest", 0x06, 0x0A, "Spirit Temple", None),
"Spirit Temple Topmost Chest": ("Chest", 0x06, 0x12, "Spirit Temple", None),
# Spirit Temple MQ
"Spirit Temple MQ Entrance Front Left Chest": ("Chest", 0x06, 0x1A, "Spirit Temple", None),
"Spirit Temple MQ Entrance Back Right Chest": ("Chest", 0x06, 0x1F, "Spirit Temple", None),
"Spirit Temple MQ Entrance Front Right Chest": ("Chest", 0x06, 0x1B, "Spirit Temple", None),
"Spirit Temple MQ Entrance Back Left Chest": ("Chest", 0x06, 0x1E, "Spirit Temple", None),
"Spirit Temple MQ Child Center Chest": ("Chest", 0x06, 0x1D, "Spirit Temple", None),
"Spirit Temple MQ Map Chest": ("Chest", 0x06, 0x00, "Spirit Temple", None),
"Spirit Temple MQ Child Left Chest": ("Chest", 0x06, 0x08, "Spirit Temple", None),
"Spirit Temple MQ Child Climb North Chest": ("Chest", 0x06, 0x06, "Spirit Temple", None),
"Spirit Temple MQ Child Climb South Chest": ("Chest", 0x06, 0x0C, "Spirit Temple", None),
"Spirit Temple MQ Compass Chest": ("Chest", 0x06, 0x03, "Spirit Temple", None),
"Spirit Temple MQ Lower NE Main Room Chest": ("Chest", 0x06, 0x0F, "Spirit Temple", None),
"Spirit Temple MQ Upper NE Main Room Chest": ("Chest", 0x06, 0x02, "Spirit Temple", None),
"Spirit Temple MQ Silver Block Hallway Chest": ("Chest", 0x06, 0x1C, "Spirit Temple", None),
"Spirit Temple MQ Sun Block Room Chest": ("Chest", 0x06, 0x01, "Spirit Temple", None),
"Spirit Temple MQ Lower Adult Right Chest": ("Chest", 0x06, 0x07, "Spirit Temple", None),
"Spirit Temple MQ Lower Adult Left Chest": ("Chest", 0x06, 0x04, "Spirit Temple", None),
"Spirit Temple MQ Beamos Room Chest": ("Chest", 0x06, 0x19, "Spirit Temple", None),
"Spirit Temple MQ Ice Trap Chest": ("Chest", 0x06, 0x18, "Spirit Temple", None),
"Spirit Temple MQ Boss Key Chest": ("Chest", 0x06, 0x05, "Spirit Temple", None),
"Spirit Temple MQ Mirror Puzzle Invisible Chest": ("Chest", 0x06, 0x12, "Spirit Temple", None),
# Shadow Temple vanilla
"Shadow Temple Map Chest": ("Chest", 0x07, 0x01, "Shadow Temple", None),
"Shadow Temple Hover Boots Chest": ("Chest", 0x07, 0x07, "Shadow Temple", None),
"Shadow Temple Compass Chest": ("Chest", 0x07, 0x03, "Shadow Temple", None),
"Shadow Temple Early Silver Rupee Chest": ("Chest", 0x07, 0x02, "Shadow Temple", None),
"Shadow Temple Invisible Blades Visible Chest": ("Chest", 0x07, 0x0C, "Shadow Temple", None),
"Shadow Temple Invisible Blades Invisible Chest": ("Chest", 0x07, 0x16, "Shadow Temple", None),
"Shadow Temple Falling Spikes Lower Chest": ("Chest", 0x07, 0x05, "Shadow Temple", None),
"Shadow Temple Falling Spikes Upper Chest": ("Chest", 0x07, 0x06, "Shadow Temple", None),
"Shadow Temple Falling Spikes Switch Chest": ("Chest", 0x07, 0x04, "Shadow Temple", None),
"Shadow Temple Invisible Spikes Chest": ("Chest", 0x07, 0x09, "Shadow Temple", None),
"Shadow Temple Wind Hint Chest": ("Chest", 0x07, 0x15, "Shadow Temple", None),
"Shadow Temple After Wind Enemy Chest": ("Chest", 0x07, 0x08, "Shadow Temple", None),
"Shadow Temple After Wind Hidden Chest": ("Chest", 0x07, 0x14, "Shadow Temple", None),
"Shadow Temple Spike Walls Left Chest": ("Chest", 0x07, 0x0A, "Shadow Temple", None),
"Shadow Temple Boss Key Chest": ("Chest", 0x07, 0x0B, "Shadow Temple", None),
"Shadow Temple Hidden Floormaster Chest": ("Chest", 0x07, 0x0D, "Shadow Temple", None),
"Shadow Temple Freestanding Key": ("Collectable", 0x07, 0x01, "Shadow Temple", None),
# Shadow Temple MQ
"Shadow Temple MQ Compass Chest": ("Chest", 0x07, 0x01, "Shadow Temple", None),
"Shadow Temple MQ Hover Boots Chest": ("Chest", 0x07, 0x07, "Shadow Temple", None),
"Shadow Temple MQ Early Gibdos Chest": ("Chest", 0x07, 0x02, "Shadow Temple", None),
"Shadow Temple MQ Map Chest": ("Chest", 0x07, 0x03, "Shadow Temple", None),
"Shadow Temple MQ Beamos Silver Rupees Chest": ("Chest", 0x07, 0x0F, "Shadow Temple", None),
"Shadow Temple MQ Falling Spikes Switch Chest": ("Chest", 0x07, 0x04, "Shadow Temple", None),
"Shadow Temple MQ Falling Spikes Lower Chest": ("Chest", 0x07, 0x05, "Shadow Temple", None),
"Shadow Temple MQ Falling Spikes Upper Chest": ("Chest", 0x07, 0x06, "Shadow Temple", None),
"Shadow Temple MQ Invisible Spikes Chest": ("Chest", 0x07, 0x09, "Shadow Temple", None),
"Shadow Temple MQ Boss Key Chest": ("Chest", 0x07, 0x0B, "Shadow Temple", None),
"Shadow Temple MQ Spike Walls Left Chest": ("Chest", 0x07, 0x0A, "Shadow Temple", None),
"Shadow Temple MQ Stalfos Room Chest": ("Chest", 0x07, 0x10, "Shadow Temple", None),
"Shadow Temple MQ Invisible Blades Invisible Chest": ("Chest", 0x07, 0x16, "Shadow Temple", None),
"Shadow Temple MQ Invisible Blades Visible Chest": ("Chest", 0x07, 0x0C, "Shadow Temple", None),
"Shadow Temple MQ Bomb Flower Chest": ("Chest", 0x07, 0x0D, "Shadow Temple", None),
"Shadow Temple MQ Wind Hint Chest": ("Chest", 0x07, 0x15, "Shadow Temple", None),
"Shadow Temple MQ After Wind Hidden Chest": ("Chest", 0x07, 0x14, "Shadow Temple", None),
"Shadow Temple MQ After Wind Enemy Chest": ("Chest", 0x07, 0x08, "Shadow Temple", None),
"Shadow Temple MQ Near Ship Invisible Chest": ("Chest", 0x07, 0x0E, "Shadow Temple", None),
"Shadow Temple MQ Freestanding Key": ("Collectable", 0x07, 0x06, "Shadow Temple", None),
# Bottom of the Well vanilla
"Bottom of the Well Front Left Hidden Wall": ("Chest", 0x08, 0x08, "Bottom of the Well", None),
"Bottom of the Well Front Center Bombable": ("Chest", 0x08, 0x02, "Bottom of the Well", None),
"Bottom of the Well Right Bottom Hidden Wall": ("Chest", 0x08, 0x05, "Bottom of the Well", None),
"Bottom of the Well Center Large Chest": ("Chest", 0x08, 0x01, "Bottom of the Well", None),
"Bottom of the Well Center Small Chest": ("Chest", 0x08, 0x0E, "Bottom of the Well", None),
"Bottom of the Well Back Left Bombable": ("Chest", 0x08, 0x04, "Bottom of the Well", None),
"Bottom of the Well Defeat Boss": ("Chest", 0x08, 0x03, "Bottom of the Well", None),
"Bottom of the Well Invisible Chest": ("Chest", 0x08, 0x14, "Bottom of the Well", None),
"Bottom of the Well Underwater Front Chest": ("Chest", 0x08, 0x10, "Bottom of the Well", None),
"Bottom of the Well Underwater Left Chest": ("Chest", 0x08, 0x09, "Bottom of the Well", None),
"Bottom of the Well Basement Chest": ("Chest", 0x08, 0x07, "Bottom of the Well", None),
"Bottom of the Well Locked Pits": ("Chest", 0x08, 0x0A, "Bottom of the Well", None),
"Bottom of the Well Behind Right Grate": ("Chest", 0x08, 0x0C, "Bottom of the Well", None),
"Bottom of the Well Freestanding Key": ("Collectable", 0x08, 0x01, "Bottom of the Well", None),
# Bottom of the Well MQ
"Bottom of the Well MQ Map Chest": ("Chest", 0x08, 0x03, "Bottom of the Well", None),
"Bottom of the Well MQ Lens Chest": ("Chest", 0x08, 0x01, "Bottom of the Well", None),
"Bottom of the Well MQ Compass Chest": ("Chest", 0x08, 0x02, "Bottom of the Well", None),
"Bottom of the Well MQ Dead Hand Freestanding Key":("Collectable", 0x08, 0x02, "Bottom of the Well", None),
"Bottom of the Well MQ East Inner Room Freestanding Key":("Collectable",0x08,0x01,"Bottom of the Well", None),
# Ice Cavern vanilla
"Ice Cavern Map Chest": ("Chest", 0x09, 0x00, "Ice Cavern", None),
"Ice Cavern Compass Chest": ("Chest", 0x09, 0x01, "Ice Cavern", None),
"Ice Cavern Iron Boots Chest": ("Chest", 0x09, 0x02, "Ice Cavern", None),
"Ice Cavern Freestanding PoH": ("Collectable", 0x09, 0x01, "Ice Cavern", None),
# Ice Cavern MQ
"Ice Cavern MQ Iron Boots Chest": ("Chest", 0x09, 0x02, "Ice Cavern", None),
"Ice Cavern MQ Compass Chest": ("Chest", 0x09, 0x00, "Ice Cavern", None),
"Ice Cavern MQ Map Chest": ("Chest", 0x09, 0x01, "Ice Cavern", None),
"Ice Cavern MQ Freestanding PoH": ("Collectable", 0x09, 0x01, "Ice Cavern", None),
# Gerudo Training Grounds vanilla
"Gerudo Training Grounds Lobby Left Chest": ("Chest", 0x0B, 0x13, "Gerudo Training Grounds", None),
"Gerudo Training Grounds Lobby Right Chest": ("Chest", 0x0B, 0x07, "Gerudo Training Grounds", None),
"Gerudo Training Grounds Stalfos Chest": ("Chest", 0x0B, 0x00, "Gerudo Training Grounds", None),
"Gerudo Training Grounds Beamos Chest": ("Chest", 0x0B, 0x01, "Gerudo Training Grounds", None),
"Gerudo Training Grounds Hidden Ceiling Chest": ("Chest", 0x0B, 0x0B, "Gerudo Training Grounds", None),
"Gerudo Training Grounds Maze Path First Chest": ("Chest", 0x0B, 0x06, "Gerudo Training Grounds", None),
"Gerudo Training Grounds Maze Path Second Chest": ("Chest", 0x0B, 0x0A, "Gerudo Training Grounds", None),
"Gerudo Training Grounds Maze Path Third Chest": ("Chest", 0x0B, 0x09, "Gerudo Training Grounds", None),
"Gerudo Training Grounds Maze Path Final Chest": ("Chest", 0x0B, 0x0C, "Gerudo Training Grounds", None),
"Gerudo Training Grounds Maze Right Central Chest":("Chest", 0x0B, 0x05, "Gerudo Training Grounds", None),
"Gerudo Training Grounds Maze Right Side Chest": ("Chest", 0x0B, 0x08, "Gerudo Training Grounds", None),
"Gerudo Training Grounds Underwater Silver Rupee Chest": ("Chest", 0x0B, 0x0D, "Gerudo Training Grounds", None),
"Gerudo Training Grounds Hammer Room Clear Chest": ("Chest", 0x0B, 0x12, "Gerudo Training Grounds", None),
"Gerudo Training Grounds Hammer Room Switch Chest":("Chest", 0x0B, 0x10, "Gerudo Training Grounds", None),
"Gerudo Training Grounds Eye Statue Chest": ("Chest", 0x0B, 0x03, "Gerudo Training Grounds", None),
"Gerudo Training Grounds Near Scarecrow Chest": ("Chest", 0x0B, 0x04, "Gerudo Training Grounds", None),
"Gerudo Training Grounds Before Heavy Block Chest":("Chest", 0x0B, 0x11, "Gerudo Training Grounds", None),
"Gerudo Training Grounds Heavy Block First Chest": ("Chest", 0x0B, 0x0F, "Gerudo Training Grounds", None),
"Gerudo Training Grounds Heavy Block Second Chest":("Chest", 0x0B, 0x0E, "Gerudo Training Grounds", None),
"Gerudo Training Grounds Heavy Block Third Chest": ("Chest", 0x0B, 0x14, "Gerudo Training Grounds", None),
"Gerudo Training Grounds Heavy Block Fourth Chest":("Chest", 0x0B, 0x02, "Gerudo Training Grounds", None),
"Gerudo Training Grounds Freestanding Key": ("Collectable", 0x0B, 0x01, "Gerudo Training Grounds", None),
# Gerudo Training Grounds MQ
"Gerudo Training Grounds MQ Lobby Right Chest": ("Chest", 0x0B, 0x07, "Gerudo Training Grounds", None),
"Gerudo Training Grounds MQ Lobby Left Chest": ("Chest", 0x0B, 0x13, "Gerudo Training Grounds", None),
"Gerudo Training Grounds MQ First Iron Knuckle Chest": ("Chest", 0x0B, 0x00, "Gerudo Training Grounds", None),
"Gerudo Training Grounds MQ Before Heavy Block Chest": ("Chest", 0x0B, 0x11, "Gerudo Training Grounds", None),
"Gerudo Training Grounds MQ Eye Statue Chest": ("Chest", 0x0B, 0x03, "Gerudo Training Grounds", None),
"Gerudo Training Grounds MQ Flame Circle Chest": ("Chest", 0x0B, 0x0E, "Gerudo Training Grounds", None),
"Gerudo Training Grounds MQ Second Iron Knuckle Chest": ("Chest", 0x0B, 0x12, "Gerudo Training Grounds", None),
"Gerudo Training Grounds MQ Dinolfos Chest": ("Chest", 0x0B, 0x01, "Gerudo Training Grounds", None),
"Gerudo Training Grounds MQ Ice Arrows Chest": ("Chest", 0x0B, 0x04, "Gerudo Training Grounds", None),
"Gerudo Training Grounds MQ Maze Right Central Chest": ("Chest", 0x0B, 0x05, "Gerudo Training Grounds", None),
"Gerudo Training Grounds MQ Maze Path First Chest":("Chest", 0x0B, 0x06, "Gerudo Training Grounds", None),
"Gerudo Training Grounds MQ Maze Right Side Chest":("Chest", 0x0B, 0x08, "Gerudo Training Grounds", None),
"Gerudo Training Grounds MQ Maze Path Third Chest":("Chest", 0x0B, 0x09, "Gerudo Training Grounds", None),
"Gerudo Training Grounds MQ Maze Path Second Chest": ("Chest", 0x0B, 0x0A, "Gerudo Training Grounds", None),
"Gerudo Training Grounds MQ Hidden Ceiling Chest": ("Chest", 0x0B, 0x0B, "Gerudo Training Grounds", None),
"Gerudo Training Grounds MQ Underwater Silver Rupee Chest":("Chest",0x0B, 0x0D, "Gerudo Training Grounds", None),
"Gerudo Training Grounds MQ Heavy Block Chest": ("Chest", 0x0B, 0x02, "Gerudo Training Grounds", None),
# Ganon's Castle shared
"Ganons Tower Boss Key Chest": ("Chest", 0x0A, 0x0B, "Ganon's Castle", None),
# Ganon's Castle vanilla
"Ganons Castle Forest Trial Chest": ("Chest", 0x0D, 0x09, "Ganon's Castle", None),
"Ganons Castle Water Trial Left Chest": ("Chest", 0x0D, 0x07, "Ganon's Castle", None),
"Ganons Castle Water Trial Right Chest": ("Chest", 0x0D, 0x06, "Ganon's Castle", None),
"Ganons Castle Shadow Trial First Chest": ("Chest", 0x0D, 0x08, "Ganon's Castle", None),
"Ganons Castle Shadow Trial Second Chest": ("Chest", 0x0D, 0x05, "Ganon's Castle", None),
"Ganons Castle Spirit Trial First Chest": ("Chest", 0x0D, 0x12, "Ganon's Castle", None),
"Ganons Castle Spirit Trial Second Chest": ("Chest", 0x0D, 0x14, "Ganon's Castle", None),
"Ganons Castle Light Trial First Left Chest": ("Chest", 0x0D, 0x0C, "Ganon's Castle", None),
"Ganons Castle Light Trial Second Left Chest": ("Chest", 0x0D, 0x0B, "Ganon's Castle", None),
"Ganons Castle Light Trial Third Left Chest": ("Chest", 0x0D, 0x0D, "Ganon's Castle", None),
"Ganons Castle Light Trial First Right Chest": ("Chest", 0x0D, 0x0E, "Ganon's Castle", None),
"Ganons Castle Light Trial Second Right Chest": ("Chest", 0x0D, 0x0A, "Ganon's Castle", None),
"Ganons Castle Light Trial Third Right Chest": ("Chest", 0x0D, 0x0F, "Ganon's Castle", None),
"Ganons Castle Light Trial Invisible Enemies Chest": ("Chest", 0x0D, 0x10, "Ganon's Castle", None),
"Ganons Castle Light Trial Lullaby Chest": ("Chest", 0x0D, 0x11, "Ganon's Castle", None),
# Ganon's Castle MQ
"Ganons Castle MQ Water Trial Chest": ("Chest", 0x0D, 0x01, "Ganon's Castle", None),
"Ganons Castle MQ Forest Trial First Chest": ("Chest", 0x0D, 0x02, "Ganon's Castle", None),
"Ganons Castle MQ Forest Trial Second Chest": ("Chest", 0x0D, 0x03, "Ganon's Castle", None),
"Ganons Castle MQ Light Trial Lullaby Chest": ("Chest", 0x0D, 0x04, "Ganon's Castle", None),
"Ganons Castle MQ Shadow Trial First Chest": ("Chest", 0x0D, 0x00, "Ganon's Castle", None),
"Ganons Castle MQ Shadow Trial Second Chest": ("Chest", 0x0D, 0x05, "Ganon's Castle", None),
"Ganons Castle MQ Spirit Trial Golden Gauntlets Chest": ("Chest", 0x0D, 0x06, "Ganon's Castle", None),
"Ganons Castle MQ Spirit Trial Sun Back Right Chest": ("Chest", 0x0D, 0x07, "Ganon's Castle", None),
"Ganons Castle MQ Spirit Trial Sun Back Left Chest": ("Chest", 0x0D, 0x08, "Ganon's Castle", None),
"Ganons Castle MQ Spirit Trial Sun Front Left Chest": ("Chest", 0x0D, 0x09, "Ganon's Castle", None),
"Ganons Castle MQ Spirit Trial First Chest": ("Chest", 0x0D, 0x0A, "Ganon's Castle", None),
"Ganons Castle MQ Spirit Trial Second Chest": ("Chest", 0x0D, 0x14, "Ganon's Castle", None),
"Ganons Castle MQ Forest Trial Freestanding Key": ("Collectable", 0x0D, 0x01, "Ganon's Castle", None),
"Links Pocket": ("Boss", None, None, "Link's Pocket", None),
"Queen Gohma": ("Boss", None, 0x6C, "Deku Tree", (0x0CA315F, 0x2079571)),
"King Dodongo": ("Boss", None, 0x6D, "Dodongo's Cavern", (0x0CA30DF, 0x2223309)),
"Barinade": ("Boss", None, 0x6E, "Jabu Jabu's Belly", (0x0CA36EB, 0x2113C19)),
"Phantom Ganon": ("Boss", None, 0x66, "Forest Temple", (0x0CA3D07, 0x0D4ED79)),
"Volvagia": ("Boss", None, 0x67, "Fire Temple", (0x0CA3D93, 0x0D10135)),
"Morpha": ("Boss", None, 0x68, "Water Temple", (0x0CA3E1F, 0x0D5A3A9)),
"Twinrova": ("Boss", None, 0x69, "Spirit Temple", (0x0CA3EB3, 0x0D39FF1)),
"Bongo Bongo": ("Boss", None, 0x6A, "Shadow Temple", (0x0CA3F43, 0x0D13E19)),
"Ganon": ("Event", None, None, "Ganon's Castle", None),
"Queen Gohma Heart": ("BossHeart", 0x11, 0x4F, "Deku Tree", None),
"King Dodongo Heart": ("BossHeart", 0x12, 0x4F, "Dodongo's Cavern", None),
"Barinade Heart": ("BossHeart", 0x13, 0x4F, "Jabu Jabu's Belly", None),
"Phantom Ganon Heart": ("BossHeart", 0x14, 0x4F, "Forest Temple", None),
"Volvagia Heart": ("BossHeart", 0x15, 0x4F, "Fire Temple", None),
"Morpha Heart": ("BossHeart", 0x16, 0x4F, "Water Temple", None),
"Twinrova Heart": ("BossHeart", 0x17, 0x4F, "Spirit Temple", None),
"Bongo Bongo Heart": ("BossHeart", 0x18, 0x4F, "Shadow Temple", None),
# note that the scene for skulltulas is not the actual scene the token appears in
# rather, it is the index of the grouping used when storing skulltula collection
# for example, zora river, zora's domain, and zora fountain are all a single 'scene' for skulltulas
"GS Deku Tree Basement Back Room": ("GS Token", 0x00, 0x01, "Deku Tree", None),
"GS Deku Tree Basement Gate": ("GS Token", 0x00, 0x02, "Deku Tree", None),
"GS Deku Tree Basement Vines": ("GS Token", 0x00, 0x04, "Deku Tree", None),
"GS Deku Tree Compass Room": ("GS Token", 0x00, 0x08, "Deku Tree", None),
"GS Deku Tree MQ Lobby": ("GS Token", 0x00, 0x02, "Deku Tree", None),
"GS Deku Tree MQ Compass Room": ("GS Token", 0x00, 0x08, "Deku Tree", None),
"GS Deku Tree MQ Basement Ceiling": ("GS Token", 0x00, 0x04, "Deku Tree", None),
"GS Deku Tree MQ Basement Back Room": ("GS Token", 0x00, 0x01, "Deku Tree", None),
"GS Dodongo's Cavern Vines Above Stairs": ("GS Token", 0x01, 0x01, "Dodongo's Cavern", None),
"GS Dodongo's Cavern Scarecrow": ("GS Token", 0x01, 0x02, "Dodongo's Cavern", None),
"GS Dodongo's Cavern Alcove Above Stairs": ("GS Token", 0x01, 0x04, "Dodongo's Cavern", None),
"GS Dodongo's Cavern Back Room": ("GS Token", 0x01, 0x08, "Dodongo's Cavern", None),
"GS Dodongo's Cavern East Side Room": ("GS Token", 0x01, 0x10, "Dodongo's Cavern", None),
"GS Dodongo's Cavern MQ Scrub Room": ("GS Token", 0x01, 0x02, "Dodongo's Cavern", None),
"GS Dodongo's Cavern MQ Song of Time Block Room": ("GS Token", 0x01, 0x08, "Dodongo's Cavern", None),
"GS Dodongo's Cavern MQ Lizalfos Room": ("GS Token", 0x01, 0x04, "Dodongo's Cavern", None),
"GS Dodongo's Cavern MQ Larva Room": ("GS Token", 0x01, 0x10, "Dodongo's Cavern", None),
"GS Dodongo's Cavern MQ Back Area": ("GS Token", 0x01, 0x01, "Dodongo's Cavern", None),
"GS Jabu Jabu Lobby Basement Lower": ("GS Token", 0x02, 0x01, "Jabu Jabu's Belly", None),
"GS Jabu Jabu Lobby Basement Upper": ("GS Token", 0x02, 0x02, "Jabu Jabu's Belly", None),
"GS Jabu Jabu Near Boss": ("GS Token", 0x02, 0x04, "Jabu Jabu's Belly", None),
"GS Jabu Jabu Water Switch Room": ("GS Token", 0x02, 0x08, "Jabu Jabu's Belly", None),
"GS Jabu Jabu MQ Tailpasaran Room": ("GS Token", 0x02, 0x04, "Jabu Jabu's Belly", None),
"GS Jabu Jabu MQ Invisible Enemies Room": ("GS Token", 0x02, 0x08, "Jabu Jabu's Belly", None),
"GS Jabu Jabu MQ Boomerang Room": ("GS Token", 0x02, 0x01, "Jabu Jabu's Belly", None),
"GS Jabu Jabu MQ Near Boss": ("GS Token", 0x02, 0x02, "Jabu Jabu's Belly", None),
"GS Forest Temple Outdoor East": ("GS Token", 0x03, 0x01, "Forest Temple", None),
"GS Forest Temple First Room": ("GS Token", 0x03, 0x02, "Forest Temple", None),
"GS Forest Temple Outdoor West": ("GS Token", 0x03, 0x04, "Forest Temple", None),
"GS Forest Temple Lobby": ("GS Token", 0x03, 0x08, "Forest Temple", None),
"GS Forest Temple Basement": ("GS Token", 0x03, 0x10, "Forest Temple", None),
"GS Forest Temple MQ First Hallway": ("GS Token", 0x03, 0x02, "Forest Temple", None),
"GS Forest Temple MQ Block Push Room": ("GS Token", 0x03, 0x10, "Forest Temple", None),
"GS Forest Temple MQ Outdoor East": ("GS Token", 0x03, 0x01, "Forest Temple", None),
"GS Forest Temple MQ Outdoor West": ("GS Token", 0x03, 0x04, "Forest Temple", None),
"GS Forest Temple MQ Well": ("GS Token", 0x03, 0x08, "Forest Temple", None),
"GS Fire Temple Song of Time Room": ("GS Token", 0x04, 0x01, "Fire Temple", None),
"GS Fire Temple Basement": ("GS Token", 0x04, 0x02, "Fire Temple", None),
"GS Fire Temple Unmarked Bomb Wall": ("GS Token", 0x04, 0x04, "Fire Temple", None),
"GS Fire Temple East Tower Top": ("GS Token", 0x04, 0x08, "Fire Temple", None),
"GS Fire Temple East Tower Climb": ("GS Token", 0x04, 0x10, "Fire Temple", None),
"GS Fire Temple MQ Above Fire Wall Maze": ("GS Token", 0x04, 0x02, "Fire Temple", None),
"GS Fire Temple MQ Fire Wall Maze Center": ("GS Token", 0x04, 0x08, "Fire Temple", None),
"GS Fire Temple MQ Big Lava Room": ("GS Token", 0x04, 0x01, "Fire Temple", None),
"GS Fire Temple MQ Fire Wall Maze Side Room": ("GS Token", 0x04, 0x10, "Fire Temple", None),
"GS Fire Temple MQ East Tower Top": ("GS Token", 0x04, 0x04, "Fire Temple", None),
"GS Water Temple South Basement": ("GS Token", 0x05, 0x01, "Water Temple", None),
"GS Water Temple Falling Platform Room": ("GS Token", 0x05, 0x02, "Water Temple", None),
"GS Water Temple Central Room": ("GS Token", 0x05, 0x04, "Water Temple", None),
"GS Water Temple Near Boss Key Chest": ("GS Token", 0x05, 0x08, "Water Temple", None),
"GS Water Temple Serpent River": ("GS Token", 0x05, 0x10, "Water Temple", None),
"GS Water Temple MQ Before Upper Water Switch": ("GS Token", 0x05, 0x04, "Water Temple", None),
"GS Water Temple MQ North Basement": ("GS Token", 0x05, 0x08, "Water Temple", None),
"GS Water Temple MQ Lizalfos Hallway": ("GS Token", 0x05, 0x01, "Water Temple", None),
"GS Water Temple MQ Serpent River": ("GS Token", 0x05, 0x02, "Water Temple", None),
"GS Water Temple MQ South Basement": ("GS Token", 0x05, 0x10, "Water Temple", None),
"GS Spirit Temple Hall to West Iron Knuckle": ("GS Token", 0x06, 0x01, "Spirit Temple", None),
"GS Spirit Temple Boulder Room": ("GS Token", 0x06, 0x02, "Spirit Temple", None),
"GS Spirit Temple Lobby": ("GS Token", 0x06, 0x04, "Spirit Temple", None),
"GS Spirit Temple Bomb for Light Room": ("GS Token", 0x06, 0x08, "Spirit Temple", None),
"GS Spirit Temple Metal Fence": ("GS Token", 0x06, 0x10, "Spirit Temple", None),
"GS Spirit Temple MQ Lower Adult Right": ("GS Token", 0x06, 0x08, "Spirit Temple", None),
"GS Spirit Temple MQ Lower Adult Left": ("GS Token", 0x06, 0x02, "Spirit Temple", None),
"GS Spirit Temple MQ Iron Knuckle West": ("GS Token", 0x06, 0x04, "Spirit Temple", None),
"GS Spirit Temple MQ Iron Knuckle North": ("GS Token", 0x06, 0x10, "Spirit Temple", None),
"GS Spirit Temple MQ Sun Block Room": ("GS Token", 0x06, 0x01, "Spirit Temple", None),
"GS Shadow Temple Single Giant Pot": ("GS Token", 0x07, 0x01, "Shadow Temple", None),
"GS Shadow Temple Crusher Room": ("GS Token", 0x07, 0x02, "Shadow Temple", None),
"GS Shadow Temple Triple Giant Pot": ("GS Token", 0x07, 0x04, "Shadow Temple", None),
"GS Shadow Temple Like Like Room": ("GS Token", 0x07, 0x08, "Shadow Temple", None),
"GS Shadow Temple Near Ship": ("GS Token", 0x07, 0x10, "Shadow Temple", None),
"GS Shadow Temple MQ Crusher Room": ("GS Token", 0x07, 0x02, "Shadow Temple", None),
"GS Shadow Temple MQ Wind Hint Room": ("GS Token", 0x07, 0x01, "Shadow Temple", None),
"GS Shadow Temple MQ After Wind": ("GS Token", 0x07, 0x08, "Shadow Temple", None),
"GS Shadow Temple MQ After Ship": ("GS Token", 0x07, 0x10, "Shadow Temple", None),
"GS Shadow Temple MQ Near Boss": ("GS Token", 0x07, 0x04, "Shadow Temple", None),
"GS Well Like Like Cage": ("GS Token", 0x08, 0x01, "Bottom of the Well", None),
"GS Well East Inner Room": ("GS Token", 0x08, 0x02, "Bottom of the Well", None),
"GS Well West Inner Room": ("GS Token", 0x08, 0x04, "Bottom of the Well", None),
"GS Well MQ Basement": ("GS Token", 0x08, 0x01, "Bottom of the Well", None),
"GS Well MQ Coffin Room": ("GS Token", 0x08, 0x04, "Bottom of the Well", None),
"GS Well MQ West Inner Room": ("GS Token", 0x08, 0x02, "Bottom of the Well", None),
"GS Ice Cavern Push Block Room": ("GS Token", 0x09, 0x01, "Ice Cavern", None),
"GS Ice Cavern Spinning Scythe Room": ("GS Token", 0x09, 0x02, "Ice Cavern", None),
"GS Ice Cavern Heart Piece Room": ("GS Token", 0x09, 0x04, "Ice Cavern", None),
"GS Ice Cavern MQ Scarecrow": ("GS Token", 0x09, 0x01, "Ice Cavern", None),
"GS Ice Cavern MQ Ice Block": ("GS Token", 0x09, 0x04, "Ice Cavern", None),
"GS Ice Cavern MQ Red Ice": ("GS Token", 0x09, 0x02, "Ice Cavern", None),
"GS Hyrule Field Near Gerudo Valley": ("GS Token", 0x0A, 0x01, "Hyrule Field", None),
"GS Hyrule Field near Kakariko": ("GS Token", 0x0A, 0x02, "Hyrule Field", None),
"GS Lon Lon Ranch Back Wall": ("GS Token", 0x0B, 0x01, "Lon Lon Ranch", None),
"GS Lon Lon Ranch Rain Shed": ("GS Token", 0x0B, 0x02, "Lon Lon Ranch", None),
"GS Lon Lon Ranch House Window": ("GS Token", 0x0B, 0x04, "Lon Lon Ranch", None),
"GS Lon Lon Ranch Tree": ("GS Token", 0x0B, 0x08, "Lon Lon Ranch", None),
"GS Kokiri Bean Patch": ("GS Token", 0x0C, 0x01, "Kokiri Forest", None),
"GS Kokiri Know It All House": ("GS Token", 0x0C, 0x02, "Kokiri Forest", None),
"GS Kokiri House of Twins": ("GS Token", 0x0C, 0x04, "Kokiri Forest", None),
"GS Lost Woods Bean Patch Near Bridge": ("GS Token", 0x0D, 0x01, "the Lost Woods", None),
"GS Lost Woods Bean Patch Near Stage": ("GS Token", 0x0D, 0x02, "the Lost Woods", None),
"GS Lost Woods Above Stage": ("GS Token", 0x0D, 0x04, "the Lost Woods", None),
"GS Sacred Forest Meadow": ("GS Token", 0x0D, 0x08, "Sacred Forest Meadow", None),
"GS Outside Ganon's Castle": ("GS Token", 0x0E, 0x01, "outside Ganon's Castle", None),
"GS Hyrule Castle Grotto": ("GS Token", 0x0E, 0x02, "Hyrule Castle", None),
"GS Hyrule Castle Tree": ("GS Token", 0x0E, 0x04, "Hyrule Castle", None),
"GS Castle Market Guard House": ("GS Token", 0x0E, 0x08, "the Market", None),
"GS Mountain Crater Bean Patch": ("GS Token", 0x0F, 0x01, "Death Mountain Crater", None),
"GS Mountain Trail Bean Patch": ("GS Token", 0x0F, 0x02, "Death Mountain Trail", None),
"GS Mountain Trail Bomb Alcove": ("GS Token", 0x0F, 0x04, "Death Mountain Trail", None),
"GS Mountain Trail Above Dodongo's Cavern": ("GS Token", 0x0F, 0x08, "Death Mountain Trail", None),
"GS Mountain Trail Path to Crater": ("GS Token", 0x0F, 0x10, "Death Mountain Trail", None),
"GS Goron City Center Platform": ("GS Token", 0x0F, 0x20, "Goron City", None),
"GS Goron City Boulder Maze": ("GS Token", 0x0F, 0x40, "Goron City", None),
"GS Death Mountain Crater Crate": ("GS Token", 0x0F, 0x80, "Death Mountain Crater", None),
"GS Kakariko House Under Construction": ("GS Token", 0x10, 0x08, "Kakariko Village", None),
"GS Kakariko Skulltula House": ("GS Token", 0x10, 0x10, "Kakariko Village", None),
"GS Kakariko Guard's House": ("GS Token", 0x10, 0x02, "Kakariko Village", None),
"GS Kakariko Tree": ("GS Token", 0x10, 0x20, "Kakariko Village", None),
"GS Kakariko Watchtower": ("GS Token", 0x10, 0x04, "Kakariko Village", None),
"GS Kakariko Above Impa's House": ("GS Token", 0x10, 0x40, "Kakariko Village", None),
"GS Graveyard Wall": ("GS Token", 0x10, 0x80, "the Graveyard", None),
"GS Graveyard Bean Patch": ("GS Token", 0x10, 0x01, "the Graveyard", None),
"GS Zora River Ladder": ("GS Token", 0x11, 0x01, "Zora's River", None),
"GS Zora River Tree": ("GS Token", 0x11, 0x02, "Zora's River", None),
"GS Zora's Fountain Above the Log": ("GS Token", 0x11, 0x04, "Zora's Fountain", None),
"GS Zora River Above Bridge": ("GS Token", 0x11, 0x08, "Zora's River", None),
"GS Zora River Near Raised Grottos": ("GS Token", 0x11, 0x10, "Zora's River", None),
"GS Zora's Fountain Hidden Cave": ("GS Token", 0x11, 0x20, "Zora's Fountain", None),
"GS Zora's Domain Frozen Waterfall": ("GS Token", 0x11, 0x40, "Zora's Domain", None),
"GS Zora's Fountain Tree": ("GS Token", 0x11, 0x80, "Zora's Fountain", None),
"GS Lake Hylia Bean Patch": ("GS Token", 0x12, 0x01, "Lake Hylia", None),
"GS Lake Hylia Small Island": ("GS Token", 0x12, 0x02, "Lake Hylia", None),
"GS Lake Hylia Lab Wall": ("GS Token", 0x12, 0x04, "Lake Hylia", None),
"GS Lab Underwater Crate": ("GS Token", 0x12, 0x08, "Lake Hylia", None),
"GS Lake Hylia Giant Tree": ("GS Token", 0x12, 0x10, "Lake Hylia", None),
"GS Gerudo Valley Bean Patch": ("GS Token", 0x13, 0x01, "Gerudo Valley", None),
"GS Gerudo Valley Small Bridge": ("GS Token", 0x13, 0x02, "Gerudo Valley", None),
"GS Gerudo Valley Pillar": ("GS Token", 0x13, 0x04, "Gerudo Valley", None),
"GS Gerudo Valley Behind Tent": ("GS Token", 0x13, 0x08, "Gerudo Valley", None),
"GS Gerudo Fortress Archery Range": ("GS Token", 0x14, 0x01, "Gerudo's Fortress", None),
"GS Gerudo Fortress Top Floor": ("GS Token", 0x14, 0x02, "Gerudo's Fortress", None),
"GS Desert Colossus Bean Patch": ("GS Token", 0x15, 0x01, "Desert Colossus", None),
"GS Wasteland Ruins": ("GS Token", 0x15, 0x02, "Haunted Wasteland", None),
"GS Desert Colossus Hill": ("GS Token", 0x15, 0x04, "Desert Colossus", None),
"GS Desert Colossus Tree": ("GS Token", 0x15, 0x08, "Desert Colossus", None),
"Kokiri Shop Item 1": ("Shop", 0x2D, 0x30, "Kokiri Forest", (shop_address(0, 0), None)),
"Kokiri Shop Item 2": ("Shop", 0x2D, 0x31, "Kokiri Forest", (shop_address(0, 1), None)),
"Kokiri Shop Item 3": ("Shop", 0x2D, 0x32, "Kokiri Forest", (shop_address(0, 2), None)),
"Kokiri Shop Item 4": ("Shop", 0x2D, 0x33, "Kokiri Forest", (shop_address(0, 3), None)),
"Kokiri Shop Item 5": ("Shop", 0x2D, 0x34, "Kokiri Forest", (shop_address(0, 4), None)),
"Kokiri Shop Item 6": ("Shop", 0x2D, 0x35, "Kokiri Forest", (shop_address(0, 5), None)),
"Kokiri Shop Item 7": ("Shop", 0x2D, 0x36, "Kokiri Forest", (shop_address(0, 6), None)),
"Kokiri Shop Item 8": ("Shop", 0x2D, 0x37, "Kokiri Forest", (shop_address(0, 7), None)),
"Kakariko Potion Shop Item 1": ("Shop", 0x30, 0x30, "Kakariko Village", (shop_address(1, 0), None)),
"Kakariko Potion Shop Item 2": ("Shop", 0x30, 0x31, "Kakariko Village", (shop_address(1, 1), None)),
"Kakariko Potion Shop Item 3": ("Shop", 0x30, 0x32, "Kakariko Village", (shop_address(1, 2), None)),
"Kakariko Potion Shop Item 4": ("Shop", 0x30, 0x33, "Kakariko Village", (shop_address(1, 3), None)),
"Kakariko Potion Shop Item 5": ("Shop", 0x30, 0x34, "Kakariko Village", (shop_address(1, 4), None)),
"Kakariko Potion Shop Item 6": ("Shop", 0x30, 0x35, "Kakariko Village", (shop_address(1, 5), None)),
"Kakariko Potion Shop Item 7": ("Shop", 0x30, 0x36, "Kakariko Village", (shop_address(1, 6), None)),
"Kakariko Potion Shop Item 8": ("Shop", 0x30, 0x37, "Kakariko Village", (shop_address(1, 7), None)),
"Bombchu Shop Item 1": ("Shop", 0x32, 0x30, "the Market", (shop_address(2, 0), None)),
"Bombchu Shop Item 2": ("Shop", 0x32, 0x31, "the Market", (shop_address(2, 1), None)),
"Bombchu Shop Item 3": ("Shop", 0x32, 0x32, "the Market", (shop_address(2, 2), None)),
"Bombchu Shop Item 4": ("Shop", 0x32, 0x33, "the Market", (shop_address(2, 3), None)),
"Bombchu Shop Item 5": ("Shop", 0x32, 0x34, "the Market", (shop_address(2, 4), None)),
"Bombchu Shop Item 6": ("Shop", 0x32, 0x35, "the Market", (shop_address(2, 5), None)),
"Bombchu Shop Item 7": ("Shop", 0x32, 0x36, "the Market", (shop_address(2, 6), None)),
"Bombchu Shop Item 8": ("Shop", 0x32, 0x37, "the Market", (shop_address(2, 7), None)),
"Castle Town Potion Shop Item 1": ("Shop", 0x31, 0x30, "the Market", (shop_address(3, 0), None)),
"Castle Town Potion Shop Item 2": ("Shop", 0x31, 0x31, "the Market", (shop_address(3, 1), None)),
"Castle Town Potion Shop Item 3": ("Shop", 0x31, 0x32, "the Market", (shop_address(3, 2), None)),
"Castle Town Potion Shop Item 4": ("Shop", 0x31, 0x33, "the Market", (shop_address(3, 3), None)),
"Castle Town Potion Shop Item 5": ("Shop", 0x31, 0x34, "the Market", (shop_address(3, 4), None)),
"Castle Town Potion Shop Item 6": ("Shop", 0x31, 0x35, "the Market", (shop_address(3, 5), None)),
"Castle Town Potion Shop Item 7": ("Shop", 0x31, 0x36, "the Market", (shop_address(3, 6), None)),
"Castle Town Potion Shop Item 8": ("Shop", 0x31, 0x37, "the Market", (shop_address(3, 7), None)),
"Castle Town Bazaar Item 1": ("Shop", 0x2C, 0x30, "the Market", (shop_address(4, 0), None)),
"Castle Town Bazaar Item 2": ("Shop", 0x2C, 0x31, "the Market", (shop_address(4, 1), None)),
"Castle Town Bazaar Item 3": ("Shop", 0x2C, 0x32, "the Market", (shop_address(4, 2), None)),
"Castle Town Bazaar Item 4": ("Shop", 0x2C, 0x33, "the Market", (shop_address(4, 3), None)),
"Castle Town Bazaar Item 5": ("Shop", 0x2C, 0x34, "the Market", (shop_address(4, 4), None)),
"Castle Town Bazaar Item 6": ("Shop", 0x2C, 0x35, "the Market", (shop_address(4, 5), None)),
"Castle Town Bazaar Item 7": ("Shop", 0x2C, 0x36, "the Market", (shop_address(4, 6), None)),
"Castle Town Bazaar Item 8": ("Shop", 0x2C, 0x37, "the Market", (shop_address(4, 7), None)),
"Kakariko Bazaar Item 1": ("Shop", 0x2C, 0x38, "Kakariko Village", (shop_address(5, 0), None)),
"Kakariko Bazaar Item 2": ("Shop", 0x2C, 0x39, "Kakariko Village", (shop_address(5, 1), None)),
"Kakariko Bazaar Item 3": ("Shop", 0x2C, 0x3A, "Kakariko Village", (shop_address(5, 2), None)),
"Kakariko Bazaar Item 4": ("Shop", 0x2C, 0x3B, "Kakariko Village", (shop_address(5, 3), None)),
"Kakariko Bazaar Item 5": ("Shop", 0x2C, 0x3D, "Kakariko Village", (shop_address(5, 4), None)),
"Kakariko Bazaar Item 6": ("Shop", 0x2C, 0x3E, "Kakariko Village", (shop_address(5, 5), None)),
"Kakariko Bazaar Item 7": ("Shop", 0x2C, 0x3F, "Kakariko Village", (shop_address(5, 6), None)),
"Kakariko Bazaar Item 8": ("Shop", 0x2C, 0x40, "Kakariko Village", (shop_address(5, 7), None)),
"Zora Shop Item 1": ("Shop", 0x2F, 0x30, "Zora's Domain", (shop_address(7, 0), None)),
"Zora Shop Item 2": ("Shop", 0x2F, 0x31, "Zora's Domain", (shop_address(7, 1), None)),
"Zora Shop Item 3": ("Shop", 0x2F, 0x32, "Zora's Domain", (shop_address(7, 2), None)),
"Zora Shop Item 4": ("Shop", 0x2F, 0x33, "Zora's Domain", (shop_address(7, 3), None)),
"Zora Shop Item 5": ("Shop", 0x2F, 0x34, "Zora's Domain", (shop_address(7, 4), None)),
"Zora Shop Item 6": ("Shop", 0x2F, 0x35, "Zora's Domain", (shop_address(7, 5), None)),
"Zora Shop Item 7": ("Shop", 0x2F, 0x36, "Zora's Domain", (shop_address(7, 6), None)),
"Zora Shop Item 8": ("Shop", 0x2F, 0x37, "Zora's Domain", (shop_address(7, 7), None)),
"Goron Shop Item 1": ("Shop", 0x2E, 0x30, "Goron City", (shop_address(8, 0), None)),
"Goron Shop Item 2": ("Shop", 0x2E, 0x31, "Goron City", (shop_address(8, 1), None)),
"Goron Shop Item 3": ("Shop", 0x2E, 0x32, "Goron City", (shop_address(8, 2), None)),
"Goron Shop Item 4": ("Shop", 0x2E, 0x33, "Goron City", (shop_address(8, 3), None)),
"Goron Shop Item 5": ("Shop", 0x2E, 0x34, "Goron City", (shop_address(8, 4), None)),
"Goron Shop Item 6": ("Shop", 0x2E, 0x35, "Goron City", (shop_address(8, 5), None)),
"Goron Shop Item 7": ("Shop", 0x2E, 0x36, "Goron City", (shop_address(8, 6), None)),
"Goron Shop Item 8": ("Shop", 0x2E, 0x37, "Goron City", (shop_address(8, 7), None)),
# NPC Scrubs are on the overworld, while GrottoNPC is a special handler for Grottos
# Grottos scrubs are the same scene and actor, so we use a unique grotto ID for the scene
"DC Deku Scrub Deku Nuts": ("NPC", 0x01, 0x30, "Dodongo's Cavern", None),
"DC Deku Scrub Deku Sticks": ("NPC", 0x01, 0x31, "Dodongo's Cavern", None),
"DC Deku Scrub Deku Seeds": ("NPC", 0x01, 0x33, "Dodongo's Cavern", None),
"DC Deku Scrub Deku Shield": ("NPC", 0x01, 0x34, "Dodongo's Cavern", None),
"Jabu Deku Scrub Deku Nuts": ("NPC", 0x02, 0x30, "Jabu Jabu's Belly", None),
"GC Deku Scrub Bombs": ("NPC", 0x0D, 0x37, "Ganon's Castle", None),
"GC Deku Scrub Arrows": ("NPC", 0x0D, 0x33, "Ganon's Castle", None),
"GC Deku Scrub Red Potion": ("NPC", 0x0D, 0x39, "Ganon's Castle", None),
"GC Deku Scrub Green Potion": ("NPC", 0x0D, 0x3A, "Ganon's Castle", None),
"DT MQ Deku Scrub Deku Shield": ("NPC", 0x00, 0x34, "Deku Tree", None),
"DC MQ Deku Scrub Deku Sticks": ("NPC", 0x01, 0x31, "Dodongo's Cavern", None),
"DC MQ Deku Scrub Deku Seeds": ("NPC", 0x01, 0x33, "Dodongo's Cavern", None),
"DC MQ Deku Scrub Deku Shield": ("NPC", 0x01, 0x34, "Dodongo's Cavern", None),
"DC MQ Deku Scrub Red Potion": ("NPC", 0x01, 0x39, "Dodongo's Cavern", None),
"GC MQ Deku Scrub Deku Nuts": ("NPC", 0x0D, 0x30, "Ganon's Castle", None),
"GC MQ Deku Scrub Bombs": ("NPC", 0x0D, 0x37, "Ganon's Castle", None),
"GC MQ Deku Scrub Arrows": ("NPC", 0x0D, 0x33, "Ganon's Castle", None),
"GC MQ Deku Scrub Red Potion": ("NPC", 0x0D, 0x39, "Ganon's Castle", None),
"GC MQ Deku Scrub Green Potion": ("NPC", 0x0D, 0x3A, "Ganon's Castle", None),
"HF Grotto Deku Scrub Piece of Heart": ("GrottoNPC", 0x01, 0x3E, "Hyrule Field", None),
"ZR Grotto Deku Scrub Red Potion": ("GrottoNPC", 0x02, 0x39, "Zora's River", None),
"ZR Grotto Deku Scrub Green Potion": ("GrottoNPC", 0x02, 0x3A, "Zora's River", None),
"SFM Grotto Deku Scrub Red Potion": ("GrottoNPC", 0x03, 0x39, "Sacred Forest Meadow", None),
"SFM Grotto Deku Scrub Green Potion": ("GrottoNPC", 0x03, 0x3A, "Sacred Forest Meadow", None),
"LH Grotto Deku Scrub Deku Nuts": ("GrottoNPC", 0x04, 0x30, "Lake Hylia", None),
"LH Grotto Deku Scrub Bombs": ("GrottoNPC", 0x04, 0x37, "Lake Hylia", None),
"LH Grotto Deku Scrub Arrows": ("GrottoNPC", 0x04, 0x33, "Lake Hylia", None),
"Valley Grotto Deku Scrub Red Potion": ("GrottoNPC", 0x05, 0x39, "Gerudo Valley", None),
"Valley Grotto Deku Scrub Green Potion": ("GrottoNPC", 0x05, 0x3A, "Gerudo Valley", None),
"LW Deku Scrub Deku Nuts": ("NPC", 0x5B, 0x30, "the Lost Woods", None),
"LW Deku Scrub Deku Sticks": ("NPC", 0x5B, 0x31, "the Lost Woods", None),
"LW Deku Scrub Deku Stick Upgrade": ("NPC", 0x5B, 0x77, "the Lost Woods", None),
"LW Grotto Deku Scrub Arrows": ("GrottoNPC", 0x06, 0x33, "the Lost Woods", None),
"LW Grotto Deku Scrub Deku Nut Upgrade": ("GrottoNPC", 0x06, 0x79, "the Lost Woods", None),
"Desert Grotto Deku Scrub Red Potion": ("GrottoNPC", 0x07, 0x39, "Desert Colossus", None),
"Desert Grotto Deku Scrub Green Potion": ("GrottoNPC", 0x07, 0x3A, "Desert Colossus", None),
"DMC Deku Scrub Bombs": ("NPC", 0x61, 0x37, "Death Mountain Crater", None),
"DMC Grotto Deku Scrub Deku Nuts": ("GrottoNPC", 0x08, 0x30, "Death Mountain Crater", None),
"DMC Grotto Deku Scrub Bombs": ("GrottoNPC", 0x08, 0x37, "Death Mountain Crater", None),
"DMC Grotto Deku Scrub Arrows": ("GrottoNPC", 0x08, 0x33, "Death Mountain Crater", None),
"Goron Grotto Deku Scrub Deku Nuts": ("GrottoNPC", 0x09, 0x30, "Goron City", None),
"Goron Grotto Deku Scrub Bombs": ("GrottoNPC", 0x09, 0x37, "Goron City", None),
"Goron Grotto Deku Scrub Arrows": ("GrottoNPC", 0x09, 0x33, "Goron City", None),
"LLR Grotto Deku Scrub Deku Nuts": ("GrottoNPC", 0x0A, 0x30, "Lon Lon Ranch", None),
"LLR Grotto Deku Scrub Bombs": ("GrottoNPC", 0x0A, 0x37, "Lon Lon Ranch", None),
"LLR Grotto Deku Scrub Arrows": ("GrottoNPC", 0x0A, 0x33, "Lon Lon Ranch", None),
# These are not actual locations, but are filler spots used for hint reachability
"Death Mountain Crater Gossip Stone": ("GossipStone", None, None, None, None),
"Death Mountain Trail Gossip Stone": ("GossipStone", None, None, None, None),
"Desert Colossus Gossip Stone": ("GossipStone", None, None, None, None),
"Dodongos Cavern Gossip Stone": ("GossipStone", None, None, None, None),
"Generic Grotto Gossip Stone": ("GossipStone", None, None, None, None),
"Gerudo Valley Gossip Stone": ("GossipStone", None, None, None, None),
"Goron City Maze Gossip Stone": ("GossipStone", None, None, None, None),
"Goron City Medigoron Gossip Stone": ("GossipStone", None, None, None, None),
"Graveyard Gossip Stone": ("GossipStone", None, None, None, None),
"Hyrule Castle Malon Gossip Stone": ("GossipStone", None, None, None, None),
"Hyrule Castle Rock Wall Gossip Stone": ("GossipStone", None, None, None, None),
"Castle Storms Grotto Gossip Stone": ("GossipStone", None, None, None, None),
"Field Valley Grotto Gossip Stone": ("GossipStone", None, None, None, None),
"Deku Tree Gossip Stone (Left)": ("GossipStone", None, None, None, None),
"Deku Tree Gossip Stone (Right)": ("GossipStone", None, None, None, None),
"Kokiri Forest Gossip Stone": ("GossipStone", None, None, None, None),
"Lake Hylia Lab Gossip Stone": ("GossipStone", None, None, None, None),
"Lake Hylia Gossip Stone (Southeast)": ("GossipStone", None, None, None, None),
"Lake Hylia Gossip Stone (Southwest)": ("GossipStone", None, None, None, None),
"Lost Woods Gossip Stone": ("GossipStone", None, None, None, None),
"Sacred Forest Meadow Maze Gossip Stone (Lower)": ("GossipStone", None, None, None, None),
"Sacred Forest Meadow Maze Gossip Stone (Upper)": ("GossipStone", None, None, None, None),
"Sacred Forest Meadow Saria Gossip Stone": ("GossipStone", None, None, None, None),
"Temple of Time Gossip Stone (Left)": ("GossipStone", None, None, None, None),
"Temple of Time Gossip Stone (Left-Center)": ("GossipStone", None, None, None, None),
"Temple of Time Gossip Stone (Right)": ("GossipStone", None, None, None, None),
"Temple of Time Gossip Stone (Right-Center)": ("GossipStone", None, None, None, None),
"Zoras Domain Gossip Stone": ("GossipStone", None, None, None, None),
"Zoras Fountain Fairy Gossip Stone": ("GossipStone", None, None, None, None),
"Zoras Fountain Jabu Gossip Stone": ("GossipStone", None, None, None, None),
"Zoras River Plateau Gossip Stone": ("GossipStone", None, None, None, None),
"Zoras River Waterfall Gossip Stone": ("GossipStone", None, None, None, None),
}
# Business Scrub Details
business_scrubs = [
# id price text text replacement
(0x30, 20, 0x10A0, ["Deku Nuts", "a \x05\x42mysterious item\x05\x40"]),
(0x31, 15, 0x10A1, ["Deku Sticks", "a \x05\x42mysterious item\x05\x40"]),
(0x3E, 10, 0x10A2, ["Piece of Heart", "\x05\x42mysterious item\x05\x40"]),
(0x33, 40, 0x10CA, ["\x05\x41Deku Seeds", "a \x05\x42mysterious item"]),
(0x34, 50, 0x10CB, ["\x41Deku Shield", "\x42mysterious item"]),
(0x37, 40, 0x10CC, ["\x05\x41Bombs", "a \x05\x42mysterious item"]),
(0x38, 00, 0x10CD, ["\x05\x41Arrows", "a \x05\x42mysterious item"]), # unused
(0x39, 40, 0x10CE, ["\x05\x41Red Potion", "\x05\x42mysterious item"]),
(0x3A, 40, 0x10CF, ["Green Potion", "mysterious item"]),
(0x77, 40, 0x10DC, ["enable you to pick up more\x01\x05\x41Deku Sticks", "sell you a \x05\x42mysterious item"]),
(0x79, 40, 0x10DD, ["enable you to pick up more \x05\x41Deku\x01Nuts", "sell you a \x05\x42mysterious item"]),
] | z5-tracker | /z5_tracker-1.2.1-py3-none-any.whl/z5tracker/rulesets/rando_aa_v4/LocationList.py | LocationList.py |
from .ItemList import item_table
class Item(object):
def __init__(self, name='', advancement=False, priority=False, type=None, index=None, special=None, world=None):
self.name = name
self.advancement = advancement
self.priority = priority
self.type = type
self.special = special or {}
self.index = index
self.location = None
self.price = self.special.get('price')
self.world = world
self.looks_like_item = None
item_worlds_to_fix = {}
def copy(self, new_world=None):
if new_world is not None and self.world is not None and new_world.id != self.world.id:
new_world = None
new_item = Item(self.name, self.advancement, self.priority, self.type, self.index, self.special)
new_item.world = new_world
new_item.price = self.price
if new_world is None and self.world is not None:
Item.item_worlds_to_fix[new_item] = self.world.id
return new_item
@classmethod
def fix_worlds_after_copy(cls, worlds):
items_fixed = []
for item, world_id in cls.item_worlds_to_fix.items():
item.world = worlds[world_id]
items_fixed.append(item)
for item in items_fixed:
del cls.item_worlds_to_fix[item]
@property
def key(self):
return self.smallkey or self.bosskey
@property
def smallkey(self):
return self.type == 'SmallKey' or self.type == 'FortressSmallKey'
@property
def bosskey(self):
return self.type == 'BossKey'
@property
def map(self):
return self.type == 'Map'
@property
def compass(self):
return self.type == 'Compass'
@property
def dungeonitem(self):
return self.smallkey or self.bosskey or self.map or self.compass
@property
def majoritem(self):
if self.type == 'Token':
return self.world.bridge == 'tokens'
if self.type == 'Event' or self.type == 'Shop' or not self.advancement:
return False
if self.name.startswith('Bombchus') and not self.world.bombchus_in_logic:
return False
if self.map or self.compass:
return False
if self.smallkey and self.world.shuffle_smallkeys == 'dungeon':
return False
if self.bosskey and self.world.shuffle_bosskeys == 'dungeon':
return False
return True
def __str__(self):
return str(self.__unicode__())
def __unicode__(self):
return '%s' % self.name
def ItemFactory(items, world=None):
ret = []
singleton = False
if isinstance(items, str):
items = [items]
singleton = True
for item in items:
if item in item_table:
(type, progessive, itemID, special) = item_table[item]
advancement = (progessive == True)
priority = (progessive == False)
new_item = Item(item, advancement, priority, type, itemID, special)
if world:
new_item.world = world
ret.append(new_item)
else:
raise KeyError('Unknown Item: %s', item)
if singleton:
return ret[0]
return ret | z5-tracker | /z5_tracker-1.2.1-py3-none-any.whl/z5tracker/rulesets/rando_aa_v4/Item.py | Item.py |
from collections import Counter, defaultdict
import copy
class State(object):
def __init__(self, parent):
self.prog_items = Counter()
self.world = parent
self.region_cache = {}
self.location_cache = {}
self.entrance_cache = {}
self.recursion_count = 0
self.collected_locations = {}
def clear_cached_unreachable(self):
# we only need to invalidate results which were False, places we could reach before we can still reach after adding more items
self.region_cache = {k: v for k, v in self.region_cache.items() if v}
self.location_cache = {k: v for k, v in self.location_cache.items() if v}
self.entrance_cache = {k: v for k, v in self.entrance_cache.items() if v}
def copy(self, new_world=None):
if not new_world:
new_world = self.world
new_state = State(new_world)
new_state.prog_items = copy.copy(self.prog_items)
new_state.region_cache = copy.copy(self.region_cache)
new_state.location_cache = copy.copy(self.location_cache)
new_state.entrance_cache = copy.copy(self.entrance_cache)
new_state.collected_locations = copy.copy(self.collected_locations)
return new_state
def can_reach(self, spot, resolution_hint=None):
try:
spot_type = spot.spot_type
if spot_type == 'Location':
correct_cache = self.location_cache
elif spot_type == 'Region':
correct_cache = self.region_cache
elif spot_type == 'Entrance':
correct_cache = self.entrance_cache
else:
raise AttributeError
except AttributeError:
# try to resolve a name
if resolution_hint == 'Location':
spot = self.world.get_location(spot)
correct_cache = self.location_cache
elif resolution_hint == 'Entrance':
spot = self.world.get_entrance(spot)
correct_cache = self.entrance_cache
else:
# default to Region
spot = self.world.get_region(spot)
correct_cache = self.region_cache
if spot.recursion_count > 0:
return False
if spot not in correct_cache:
# for the purpose of evaluating results, recursion is resolved by always denying recursive access (as that ia what we are trying to figure out right now in the first place
spot.recursion_count += 1
self.recursion_count += 1
can_reach = spot.can_reach(self)
spot.recursion_count -= 1
self.recursion_count -= 1
# we only store qualified false results (i.e. ones not inside a hypothetical)
if not can_reach:
if self.recursion_count == 0:
correct_cache[spot] = can_reach
else:
correct_cache[spot] = can_reach
return can_reach
return correct_cache[spot]
def item_name(self, location):
location = self.world.get_location(location)
if location.item is None:
return None
return location.item.name
def has(self, item, count=1):
return self.prog_items[item] >= count
def item_count(self, item):
return self.prog_items[item]
def is_adult(self):
return self.has('Master Sword')
def can_child_attack(self):
return self.has_slingshot() or \
self.has('Boomerang') or \
self.has_sticks() or \
self.has_explosives() or \
self.has('Kokiri Sword') or \
(self.has('Dins Fire') and self.has('Magic Meter'))
def can_stun_deku(self):
return self.is_adult() or \
self.can_child_attack() or \
self.has_nuts() or \
self.has('Buy Deku Shield')
def has_nuts(self):
return self.has('Buy Deku Nut (5)') or self.has('Buy Deku Nut (10)') or self.has('Deku Nut Drop')
def has_sticks(self):
return self.has('Buy Deku Stick (1)') or self.has('Deku Stick Drop')
def has_bow(self):
return self.has('Bow')
def has_slingshot(self):
return self.has('Slingshot')
def has_bombs(self):
return self.has('Bomb Bag')
def has_blue_fire(self):
return self.has_bottle() and \
(self.can_reach('Ice Cavern')
or self.can_reach('Ganons Castle Water Trial')
or self.has('Buy Blue Fire')
or (self.world.dungeon_mq['Gerudo Training Grounds'] and self.can_reach('Gerudo Training Grounds Stalfos Room')))
def has_ocarina(self):
return (self.has('Ocarina') or self.has("Fairy Ocarina") or self.has("Ocarina of Time"))
def can_play(self, song):
return self.has_ocarina() and self.has(song)
def can_use(self, item):
magic_items = ['Dins Fire', 'Farores Wind', 'Nayrus Love', 'Lens of Truth']
adult_items = ['Bow', 'Hammer', 'Iron Boots', 'Hover Boots', 'Magic Bean']
magic_arrows = ['Fire Arrows', 'Light Arrows']
if item in magic_items:
return self.has(item) and self.has('Magic Meter')
elif item in adult_items:
return self.has(item) and self.is_adult()
elif item in magic_arrows:
return self.has(item) and self.is_adult() and self.has_bow() and self.has('Magic Meter')
elif item == 'Hookshot':
return self.has('Progressive Hookshot') and self.is_adult()
elif item == 'Longshot':
return self.has('Progressive Hookshot', 2) and self.is_adult()
elif item == 'Silver Gauntlets':
return self.has('Progressive Strength Upgrade', 2) and self.is_adult()
elif item == 'Golden Gauntlets':
return self.has('Progressive Strength Upgrade', 3) and self.is_adult()
elif item == 'Scarecrow':
return self.has('Progressive Hookshot') and self.is_adult() and self.has_ocarina()
elif item == 'Distant Scarecrow':
return self.has('Progressive Hookshot', 2) and self.is_adult() and self.has_ocarina()
else:
return self.has(item)
def can_buy_bombchus(self):
return self.has('Buy Bombchu (5)') or \
self.has('Buy Bombchu (10)') or \
self.has('Buy Bombchu (20)') or \
self.can_reach('Castle Town Bombchu Bowling') or \
self.can_reach('Haunted Wasteland Bombchu Salesman', 'Location')
def has_bombchus(self):
return (self.world.bombchus_in_logic and \
(any(pritem.startswith('Bombchus') for pritem in self.prog_items) and \
self.can_buy_bombchus())) \
or (not self.world.bombchus_in_logic and self.has('Bomb Bag') and \
self.can_buy_bombchus())
def has_bombchus_item(self):
return (self.world.bombchus_in_logic and \
(any(pritem.startswith('Bombchus') for pritem in self.prog_items) \
or (self.has('Progressive Wallet') and self.can_reach('Haunted Wasteland')))) \
or (not self.world.bombchus_in_logic and self.has('Bomb Bag'))
def has_explosives(self):
return self.has_bombs() or self.has_bombchus()
def can_blast_or_smash(self):
return self.has_explosives() or (self.is_adult() and self.has('Hammer'))
def can_dive(self):
return self.has('Progressive Scale')
def can_see_with_lens(self):
return ((self.has('Magic Meter') and self.has('Lens of Truth')) or self.world.logic_lens != 'all')
def has_projectile(self, age='either'):
if age == 'child':
return self.has_explosives() or self.has_slingshot() or self.has('Boomerang')
elif age == 'adult':
return self.has_explosives() or self.has_bow() or self.has('Progressive Hookshot')
elif age == 'both':
return self.has_explosives() or ((self.has_bow() or self.has('Progressive Hookshot')) and (self.has_slingshot() or self.has('Boomerang')))
else:
return self.has_explosives() or ((self.has_bow() or self.has('Progressive Hookshot')) or (self.has_slingshot() or self.has('Boomerang')))
def has_GoronTunic(self):
return (self.has('Goron Tunic') or self.has('Buy Goron Tunic'))
def has_ZoraTunic(self):
return (self.has('Zora Tunic') or self.has('Buy Zora Tunic'))
def can_leave_forest(self):
return self.world.open_forest or self.can_reach(self.world.get_location('Queen Gohma'))
def can_finish_adult_trades(self):
zora_thawed = (self.can_play('Zeldas Lullaby') or (self.has('Hover Boots') and self.world.logic_zora_with_hovers)) and self.has_blue_fire()
carpenter_access = self.has('Epona') or self.has('Progressive Hookshot', 2)
return (self.has('Claim Check') or ((self.has('Progressive Strength Upgrade') or self.can_blast_or_smash() or self.has_bow()) and (((self.has('Eyedrops') or self.has('Eyeball Frog') or self.has('Prescription') or self.has('Broken Sword')) and zora_thawed) or ((self.has('Poachers Saw') or self.has('Odd Mushroom') or self.has('Cojiro') or self.has('Pocket Cucco') or self.has('Pocket Egg')) and zora_thawed and carpenter_access))))
def has_bottle(self):
is_normal_bottle = lambda item: (item.startswith('Bottle') and item != 'Bottle with Letter' and (item != 'Bottle with Big Poe' or self.is_adult()))
return any(is_normal_bottle(pritem) for pritem in self.prog_items)
def bottle_count(self):
return sum([pritem for pritem in self.prog_items if pritem.startswith('Bottle') and pritem != 'Bottle with Letter' and (pritem != 'Bottle with Big Poe' or self.is_adult())])
def has_hearts(self, count):
# Warning: This only considers items that are marked as advancement items
return self.heart_count() >= count
def heart_count(self):
# Warning: This only considers items that are marked as advancement items
return (
self.item_count('Heart Container')
+ self.item_count('Piece of Heart') // 4
+ 3 # starting hearts
)
def has_fire_source(self):
return self.can_use('Dins Fire') or self.can_use('Fire Arrows')
def guarantee_hint(self):
if(self.world.hints == 'mask'):
# has the mask of truth
return self.has('Zeldas Letter') and self.can_play('Sarias Song') and self.has('Kokiri Emerald') and self.has('Goron Ruby') and self.has('Zora Sapphire')
elif(self.world.hints == 'agony'):
# has the Stone of Agony
return self.has('Stone of Agony')
return True
def nighttime(self):
if self.world.logic_no_night_tokens_without_suns_song:
return self.can_play('Suns Song')
return True
def had_night_start(self):
stod = self.world.starting_tod
# These are all between 6:30 and 18:00
if (stod == 'evening' or # 18
stod == 'dusk' or # 21
stod == 'midnight' or # 00
stod == 'witching-hour' or # 03
stod == 'early-morning'): # 06
return True
else:
return False
def can_finish_GerudoFortress(self):
if self.world.gerudo_fortress == 'normal':
return self.has('Small Key (Gerudo Fortress)', 4) and (self.can_use('Bow') or self.can_use('Hookshot') or self.can_use('Hover Boots') or self.world.logic_gerudo_kitchen)
elif self.world.gerudo_fortress == 'fast':
return self.has('Small Key (Gerudo Fortress)', 1) and self.is_adult()
else:
return self.is_adult()
# Be careful using this function. It will not collect any
# items that may be locked behind the item, only the item itself.
def collect(self, item):
if item.advancement:
self.prog_items[item.name] += 1
self.clear_cached_unreachable()
# Be careful using this function. It will not uncollect any
# items that may be locked behind the item, only the item itself.
def remove(self, item):
if self.prog_items[item.name] > 0:
self.prog_items[item.name] -= 1
if self.prog_items[item.name] <= 0:
del self.prog_items[item.name]
# invalidate collected cache. unreachable locations are still unreachable
self.region_cache = {k: v for k, v in self.region_cache.items() if not v}
self.location_cache = {k: v for k, v in self.location_cache.items() if not v}
self.entrance_cache = {k: v for k, v in self.entrance_cache.items() if not v}
self.recursion_count = 0
def __getstate__(self):
return self.__dict__.copy()
def __setstate__(self, state):
self.__dict__.update(state)
def __getattr__(self, item):
if item.startswith('can_reach_'):
return self.can_reach(item[10])
elif item.startswith('has_'):
return self.has(item[4])
raise RuntimeError('Cannot parse %s.' % item)
# This function returns a list of states that is each of the base_states
# with every item still in the itempool. It only adds items that belong
# to its respective world. See fill_restrictive
@staticmethod
def get_states_with_items(base_state_list, itempool):
new_state_list = []
for base_state in base_state_list:
new_state = base_state.copy()
for item in itempool:
if item.world.id == base_state.world.id: # Check world
new_state.collect(item)
new_state_list.append(new_state)
State.collect_locations(new_state_list)
return new_state_list
# This collected all item locations available in the state list given that
# the states have collected items. The purpose is that it will search for
# all new items that become accessible with a new item set
@staticmethod
def collect_locations(state_list):
# Get all item locations in the worlds
item_locations = [location for state in state_list for location in state.world.get_filled_locations() if location.item.advancement]
# will loop if there is more items opened up in the previous iteration. Always run once
reachable_items_locations = True
while reachable_items_locations:
# get reachable new items locations
reachable_items_locations = [location for location in item_locations if location.name not in state_list[location.world.id].collected_locations and state_list[location.world.id].can_reach(location)]
for location in reachable_items_locations:
# Mark the location collected in the state world it exists in
state_list[location.world.id].collected_locations[location.name] = True
# Collect the item for the state world it is for
state_list[location.item.world.id].collect(location.item)
# This returns True is every state is beatable. It's important to ensure
# all states beatable since items required in one world can be in another.
@staticmethod
def can_beat_game(state_list, scan_for_items=True):
if scan_for_items:
# Check if already beaten
game_beaten = True
for state in state_list:
if not state.has('Triforce'):
game_beaten = False
break
if game_beaten:
return True
# collect all available items
new_state_list = [state.copy() for state in state_list]
State.collect_locations(new_state_list)
else:
new_state_list = state_list
# if the every state got the Triforce, then return True
for state in new_state_list:
if not state.has('Triforce'):
return False
return True
@staticmethod
def update_required_items(spoiler):
worlds = spoiler.worlds
state_list = [world.state for world in worlds]
# get list of all of the progressive items that can appear in hints
all_locations = [location for world in worlds for location in world.get_filled_locations()]
item_locations = [location for location in all_locations if location.item.majoritem and not location.locked]
# if the playthrough was generated, filter the list of locations to the
# locations in the playthrough. The required locations is a subset of these
# locations. Can't use the locations directly since they are location to the
# copied spoiler world, so must try to find the matching locations by name
if spoiler.playthrough:
spoiler_locations = defaultdict(lambda: [])
for location in [location for _,sphere in spoiler.playthrough.items() for location in sphere]:
spoiler_locations[location.name].append(location.world.id)
item_locations = list(filter(lambda location: location.world.id in spoiler_locations[location.name], item_locations))
required_locations = []
reachable_items_locations = True
while (item_locations and reachable_items_locations):
reachable_items_locations = [location for location in all_locations if location.name not in state_list[location.world.id].collected_locations and state_list[location.world.id].can_reach(location)]
for location in reachable_items_locations:
# Try to remove items one at a time and see if the game is still beatable
if location in item_locations:
old_item = location.item
location.item = None
if not State.can_beat_game(state_list):
required_locations.append(location)
location.item = old_item
item_locations.remove(location)
state_list[location.world.id].collected_locations[location.name] = True
state_list[location.item.world.id].collect(location.item)
# Filter the required location to only include location in the world
required_locations_dict = {}
for world in worlds:
required_locations_dict[world.id] = list(filter(lambda location: location.world.id == world.id, required_locations))
spoiler.required_locations = required_locations_dict | z5-tracker | /z5_tracker-1.2.1-py3-none-any.whl/z5tracker/rulesets/rando_aa_v4/State.py | State.py |
import argparse
import textwrap
import string
import re
import hashlib
import math
import sys
import json
from .version import __version__
#from Utils import random_choices, local_path
from ..compatibility.Utils import random_choices
from .SettingsList import setting_infos, get_setting_info
class ArgumentDefaultsHelpFormatter(argparse.RawTextHelpFormatter):
def _get_help_string(self, action):
return textwrap.dedent(action.help)
# 32 characters
letters = "ABCDEFGHJKLMNPQRSTUVWXYZ23456789"
index_to_letter = { i: letters[i] for i in range(32) }
letter_to_index = { v: k for k, v in index_to_letter.items() }
def bit_string_to_text(bits):
# pad the bits array to be multiple of 5
if len(bits) % 5 > 0:
bits += [0] * (5 - len(bits) % 5)
# convert to characters
result = ""
for i in range(0, len(bits), 5):
chunk = bits[i:i + 5]
value = 0
for b in range(5):
value |= chunk[b] << b
result += index_to_letter[value]
return result
def text_to_bit_string(text):
bits = []
for c in text:
index = letter_to_index[c]
for b in range(5):
bits += [ (index >> b) & 1 ]
return bits
# holds the particular choices for a run's settings
class Settings:
def get_settings_display(self):
padding = 0
for setting in filter(lambda s: s.shared, setting_infos):
padding = max( len(setting.name), padding )
padding += 2
output = ''
for setting in filter(lambda s: s.shared, setting_infos):
name = setting.name + ': ' + ' ' * (padding - len(setting.name))
if setting.type == list:
val = ('\n' + (' ' * (padding + 2))).join(self.__dict__[setting.name])
else:
val = str(self.__dict__[setting.name])
output += name + val + '\n'
return output
def get_settings_string(self):
bits = []
for setting in filter(lambda s: s.shared and s.bitwidth > 0, setting_infos):
value = self.__dict__[setting.name]
i_bits = []
if setting.type == bool:
i_bits = [ 1 if value else 0 ]
if setting.type == str:
if 'choices' in setting.args_params:
try:
index = setting.args_params['choices'].index(value)
except ValueError:
index = setting.args_params['choices'].index(setting.args_params['default'])
# https://stackoverflow.com/questions/10321978/integer-to-bitfield-as-a-list
i_bits = [1 if digit=='1' else 0 for digit in bin(index)[2:]]
i_bits.reverse()
elif 'char_options' in setting.gui_params:
char_bitwidth = math.ceil(math.log(len(setting.gui_params['char_options']), 2))
for c in value.upper():
index = setting.gui_params['char_options'].index(c)
# https://stackoverflow.com/questions/10321978/integer-to-bitfield-as-a-list
c_bits = [1 if digit=='1' else 0 for digit in bin(index)[2:]]
c_bits.reverse()
c_bits += [0] * ( char_bitwidth - len(c_bits) )
i_bits.extend(c_bits)
else:
raise ValueError('Setting is string type, but missing parse parameters.')
if setting.type == int:
value = int(value)
value = value - (setting.gui_params.get('min', 0))
value = int(value / (setting.gui_params.get('step', 1)))
value = min(value, (setting.gui_params.get('max', value)))
# https://stackoverflow.com/questions/10321978/integer-to-bitfield-as-a-list
i_bits = [1 if digit=='1' else 0 for digit in bin(value)[2:]]
i_bits.reverse()
if setting.type == list:
if 'choices' in setting.args_params:
if len(value) > len(setting.args_params['choices']) / 2:
value = [item for item in setting.args_params['choices'] if item not in value]
terminal = [1] * setting.bitwidth
else:
terminal = [0] * setting.bitwidth
item_indexes = []
for item in value:
try:
item_indexes.append(setting.args_params['choices'].index(item))
except ValueError:
continue
item_indexes.sort()
for index in item_indexes:
item_bits = [1 if digit=='1' else 0 for digit in bin(index+1)[2:]]
item_bits.reverse()
item_bits += [0] * ( setting.bitwidth - len(item_bits) )
i_bits.extend(item_bits)
i_bits.extend(terminal)
else:
raise ValueError('Setting is list type, but missing parse parameters.')
# pad it
i_bits += [0] * ( setting.bitwidth - len(i_bits) )
bits += i_bits
return bit_string_to_text(bits)
def update_with_settings_string(self, text):
bits = text_to_bit_string(text)
for setting in filter(lambda s: s.shared and s.bitwidth > 0, setting_infos):
cur_bits = bits[:setting.bitwidth]
bits = bits[setting.bitwidth:]
value = None
if setting.type == bool:
value = True if cur_bits[0] == 1 else False
if setting.type == str:
if 'choices' in setting.args_params:
index = 0
for b in range(setting.bitwidth):
index |= cur_bits[b] << b
value = setting.args_params['choices'][index]
elif 'char_options' in setting.gui_params:
char_bitwidth = math.ceil(math.log(len(setting.gui_params['char_options']), 2))
value = ''
for i in range(0, setting.bitwidth, char_bitwidth):
char_bits = cur_bits[i:i+char_bitwidth]
index = 0
for b in range(char_bitwidth):
index |= char_bits[b] << b
value += setting.gui_params['char_options'][index]
else:
raise ValueError('Setting is string type, but missing parse parameters.')
if setting.type == int:
value = 0
for b in range(setting.bitwidth):
value |= cur_bits[b] << b
value = value * ('step' in setting.gui_params and setting.gui_params['step'] or 1)
value = value + ('min' in setting.gui_params and setting.gui_params['min'] or 0)
if setting.type == list:
if 'choices' in setting.args_params:
value = []
max_index = (1 << setting.bitwidth) - 1
while True:
index = 0
for b in range(setting.bitwidth):
index |= cur_bits[b] << b
if index == 0:
break
if index == max_index:
value = [item for item in setting.args_params['choices'] if item not in value]
break
value.append(setting.args_params['choices'][index-1])
cur_bits = bits[:setting.bitwidth]
bits = bits[setting.bitwidth:]
else:
raise ValueError('Setting is list type, but missing parse parameters.')
self.__dict__[setting.name] = value
self.settings_string = self.get_settings_string()
self.numeric_seed = self.get_numeric_seed()
def get_numeric_seed(self):
# salt seed with the settings, and hash to get a numeric seed
full_string = self.settings_string + __version__ + self.seed
return int(hashlib.sha256(full_string.encode('utf-8')).hexdigest(), 16)
def sanitize_seed(self):
# leave only alphanumeric and some punctuation
self.seed = re.sub(r'[^a-zA-Z0-9_-]', '', self.seed, re.UNICODE)
def update_seed(self, seed):
if seed is None or seed == '':
# https://stackoverflow.com/questions/2257441/random-string-generation-with-upper-case-letters-and-digits-in-python
self.seed = ''.join(random_choices(string.ascii_uppercase + string.digits, k=10))
else:
self.seed = seed
self.sanitize_seed()
self.numeric_seed = self.get_numeric_seed()
def update(self):
self.settings_string = self.get_settings_string()
self.numeric_seed = self.get_numeric_seed()
def check_dependency(self, setting_name):
info = get_setting_info(setting_name)
if info.gui_params is not None and 'dependency' in info.gui_params:
return info.gui_params['dependency'](self) == None
else:
return True
def remove_disabled(self):
for info in setting_infos:
if info.gui_params is not None and 'dependency' in info.gui_params:
new_value = info.gui_params['dependency'](self)
if new_value != None:
self.__dict__[info.name] = new_value
self.settings_string = self.get_settings_string()
# add the settings as fields, and calculate information based on them
def __init__(self, settings_dict):
self.__dict__.update(settings_dict)
for info in setting_infos:
if info.name not in self.__dict__:
if info.type == bool:
if info.gui_params is not None and 'default' in info.gui_params:
self.__dict__[info.name] = True if info.gui_params['default'] == 'checked' else False
else:
self.__dict__[info.name] = False
if info.type == str:
if 'default' in info.args_params:
self.__dict__[info.name] = info.args_params['default']
elif info.gui_params is not None and 'default' in info.gui_params:
if 'options' in info.gui_params and isinstance(info.gui_params['options'], dict):
self.__dict__[info.name] = info.gui_params['options'][info.gui_params['default']]
else:
self.__dict__[info.name] = info.gui_params['default']
else:
self.__dict__[info.name] = ""
if info.type == int:
if 'default' in info.args_params:
self.__dict__[info.name] = info.args_params['default']
elif info.gui_params is not None and 'default' in info.gui_params:
self.__dict__[info.name] = info.gui_params['default']
else:
self.__dict__[info.name] = 1
if info.type == list:
if 'default' in info.args_params:
self.__dict__[info.name] = list(info.args_params['default'])
elif info.gui_params is not None and 'default' in info.gui_params:
self.__dict__[info.name] = list(info.gui_params['default'])
else:
self.__dict__[info.name] = []
self.settings_string = self.get_settings_string()
self.update_seed(self.seed)
# gets the randomizer settings, whether to open the gui, and the logger level from command line arguments
def get_settings_from_command_line_args():
parser = argparse.ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('--gui', help='Launch the GUI', action='store_true')
parser.add_argument('--loglevel', default='info', const='info', nargs='?', choices=['error', 'info', 'warning', 'debug'], help='Select level of logging for output.')
parser.add_argument('--settings_string', help='Provide sharable settings using a settings string. This will override all flags that it specifies.')
parser.add_argument('--convert_settings', help='Only convert the specified settings to a settings string. If a settings string is specified output the used settings instead.', action='store_true')
parser.add_argument('--settings', help='Use the specified settings file to use for generation')
parser.add_argument('--seed', help='Generate the specified seed.')
args = parser.parse_args()
if args.settings is None:
settingsFile = local_path('settings.sav')
else:
settingsFile = local_path(args.settings)
try:
with open(settingsFile) as f:
settings = Settings(json.load(f))
except Exception as ex:
if args.settings is None:
settings = Settings({})
else:
raise ex
if args.settings_string is not None:
settings.update_with_settings_string(args.settings_string)
if args.seed is not None:
settings.update_seed(args.seed)
if args.convert_settings:
if args.settings_string is not None:
print(settings.get_settings_display())
else:
print(settings.get_settings_string())
sys.exit(0)
return settings, args.gui, args.loglevel | z5-tracker | /z5_tracker-1.2.1-py3-none-any.whl/z5tracker/rulesets/rando_aa_v4/Settings.py | Settings.py |
item_table = {
'Bombs (5)': ('Item', None, 0x01, None),
'Deku Nuts (5)': ('Item', None, 0x02, None),
'Bombchus (10)': ('Item', True, 0x03, None),
'Boomerang': ('Item', True, 0x06, None),
'Deku Stick (1)': ('Item', None, 0x07, None),
'Lens of Truth': ('Item', True, 0x0A, None),
'Hammer': ('Item', True, 0x0D, None),
'Cojiro': ('Item', True, 0x0E, None),
'Bottle': ('Item', True, 0x0F, None),
'Bottle with Milk': ('Item', True, 0x14, None),
'Bottle with Letter': ('Item', True, 0x15, None),
'Magic Bean': ('Item', True, 0x16, None),
'Skull Mask': ('Item', None, 0x17, None),
'Spooky Mask': ('Item', None, 0x18, None),
'Keaton Mask': ('Item', None, 0x1A, None),
'Bunny Hood': ('Item', None, 0x1B, None),
'Mask of Truth': ('Item', None, 0x1C, None),
'Pocket Egg': ('Item', True, 0x1D, None),
'Pocket Cucco': ('Item', True, 0x1E, None),
'Odd Mushroom': ('Item', True, 0x1F, None),
'Odd Potion': ('Item', True, 0x20, None),
'Poachers Saw': ('Item', True, 0x21, None),
'Broken Sword': ('Item', True, 0x22, None),
'Prescription': ('Item', True, 0x23, None),
'Eyeball Frog': ('Item', True, 0x24, None),
'Eyedrops': ('Item', True, 0x25, None),
'Claim Check': ('Item', True, 0x26, None),
'Kokiri Sword': ('Item', True, 0x27, None),
'Deku Shield': ('Item', None, 0x29, None),
'Hylian Shield': ('Item', None, 0x2A, None),
'Mirror Shield': ('Item', True, 0x2B, None),
'Goron Tunic': ('Item', True, 0x2C, None),
'Zora Tunic': ('Item', True, 0x2D, None),
'Iron Boots': ('Item', True, 0x2E, None),
'Hover Boots': ('Item', True, 0x2F, None),
'Stone of Agony': ('Item', True, 0x39, None),
'Gerudo Membership Card': ('Item', True, 0x3A, None),
'Heart Container': ('Item', None, 0x3D, None),
'Piece of Heart': ('Item', None, 0x3E, None),
'Boss Key': ('BossKey', True, 0x3F, None),
'Compass': ('Compass', None, 0x40, None),
'Map': ('Map', None, 0x41, None),
'Small Key': ('SmallKey', True, 0x42, {'progressive': True}),
'Weird Egg': ('Item', True, 0x47, None),
'Recovery Heart': ('Item', None, 0x48, None),
'Arrows (5)': ('Item', None, 0x49, None),
'Arrows (10)': ('Item', None, 0x4A, None),
'Arrows (30)': ('Item', None, 0x4B, None),
'Rupee (1)': ('Item', None, 0x4C, None),
'Rupees (5)': ('Item', None, 0x4D, None),
'Rupees (20)': ('Item', None, 0x4E, None),
'Heart Container (Boss)': ('Item', None, 0x4F, None),
'Goron Mask': ('Item', None, 0x51, None),
'Zora Mask': ('Item', None, 0x52, None),
'Gerudo Mask': ('Item', None, 0x53, None),
'Rupees (50)': ('Item', None, 0x55, None),
'Rupees (200)': ('Item', None, 0x56, None),
'Biggoron Sword': ('Item', True, 0x57, None),
'Fire Arrows': ('Item', True, 0x58, None),
'Ice Arrows': ('Item', True, 0x59, None),
'Light Arrows': ('Item', True, 0x5A, None),
'Gold Skulltula Token': ('Token', True, 0x5B, {'progressive': True}),
'Dins Fire': ('Item', True, 0x5C, None),
'Nayrus Love': ('Item', True, 0x5E, None),
'Farores Wind': ('Item', True, 0x5D, None),
'Deku Nuts (10)': ('Item', None, 0x64, None),
'Bombs (10)': ('Item', None, 0x66, None),
'Bombs (20)': ('Item', None, 0x67, None),
'Deku Seeds (30)': ('Item', None, 0x69, None),
'Bombchus (5)': ('Item', True, 0x6A, None),
'Bombchus (20)': ('Item', True, 0x6B, None),
'Rupee (Treasure Chest Game)': ('Item', None, 0x72, None),
'Piece of Heart (Treasure Chest Game)': ('Item', None, 0x76, None),
'Ice Trap': ('Item', None, 0x7C, None),
'Progressive Hookshot': ('Item', True, 0x80, {'progressive': True}),
'Progressive Strength Upgrade': ('Item', True, 0x81, {'progressive': True}),
'Bomb Bag': ('Item', True, 0x82, None),
'Bow': ('Item', True, 0x83, None),
'Slingshot': ('Item', True, 0x84, None),
'Progressive Wallet': ('Item', True, 0x85, {'progressive': True}),
'Progressive Scale': ('Item', True, 0x86, {'progressive': True}),
'Deku Nut Capacity': ('Item', None, 0x87, None),
'Deku Stick Capacity': ('Item', None, 0x88, None),
'Bombchus': ('Item', True, 0x89, None),
'Magic Meter': ('Item', True, 0x8A, None),
'Ocarina': ('Item', True, 0x8B, None),
'Bottle with Red Potion': ('Item', True, 0x8C, {'shop_object': 0x0F}),
'Bottle with Green Potion': ('Item', True, 0x8D, {'shop_object': 0x0F}),
'Bottle with Blue Potion': ('Item', True, 0x8E, {'shop_object': 0x0F}),
'Bottle with Fairy': ('Item', True, 0x8F, {'shop_object': 0x0F}),
'Bottle with Fish': ('Item', True, 0x90, {'shop_object': 0x0F}),
'Bottle with Blue Fire': ('Item', True, 0x91, {'shop_object': 0x0F}),
'Bottle with Bugs': ('Item', True, 0x92, {'shop_object': 0x0F}),
'Bottle with Big Poe': ('Item', True, 0x93, {'shop_object': 0x0F}),
'Bottle with Poe': ('Item', True, 0x94, {'shop_object': 0x0F}),
'Boss Key (Forest Temple)': ('BossKey', True, 0x95, None),
'Boss Key (Fire Temple)': ('BossKey', True, 0x96, None),
'Boss Key (Water Temple)': ('BossKey', True, 0x97, None),
'Boss Key (Spirit Temple)': ('BossKey', True, 0x98, None),
'Boss Key (Shadow Temple)': ('BossKey', True, 0x99, None),
'Boss Key (Ganons Castle)': ('BossKey', True, 0x9A, None),
'Compass (Deku Tree)': ('Compass', None, 0x9B, None),
'Compass (Dodongos Cavern)': ('Compass', None, 0x9C, None),
'Compass (Jabu Jabus Belly)': ('Compass', None, 0x9D, None),
'Compass (Forest Temple)': ('Compass', None, 0x9E, None),
'Compass (Fire Temple)': ('Compass', None, 0x9F, None),
'Compass (Water Temple)': ('Compass', None, 0xA0, None),
'Compass (Spirit Temple)': ('Compass', None, 0xA1, None),
'Compass (Shadow Temple)': ('Compass', None, 0xA2, None),
'Compass (Bottom of the Well)': ('Compass', None, 0xA3, None),
'Compass (Ice Cavern)': ('Compass', None, 0xA4, None),
'Map (Deku Tree)': ('Map', None, 0xA5, None),
'Map (Dodongos Cavern)': ('Map', None, 0xA6, None),
'Map (Jabu Jabus Belly)': ('Map', None, 0xA7, None),
'Map (Forest Temple)': ('Map', None, 0xA8, None),
'Map (Fire Temple)': ('Map', None, 0xA9, None),
'Map (Water Temple)': ('Map', None, 0xAA, None),
'Map (Spirit Temple)': ('Map', None, 0xAB, None),
'Map (Shadow Temple)': ('Map', None, 0xAC, None),
'Map (Bottom of the Well)': ('Map', None, 0xAD, None),
'Map (Ice Cavern)': ('Map', None, 0xAE, None),
'Small Key (Forest Temple)': ('SmallKey', True, 0xAF, {'progressive': True}),
'Small Key (Fire Temple)': ('SmallKey', True, 0xB0, {'progressive': True}),
'Small Key (Water Temple)': ('SmallKey', True, 0xB1, {'progressive': True}),
'Small Key (Spirit Temple)': ('SmallKey', True, 0xB2, {'progressive': True}),
'Small Key (Shadow Temple)': ('SmallKey', True, 0xB3, {'progressive': True}),
'Small Key (Bottom of the Well)': ('SmallKey', True, 0xB4, {'progressive': True}),
'Small Key (Gerudo Training Grounds)':('SmallKey',True, 0xB5, {'progressive': True}),
'Small Key (Gerudo Fortress)':('FortressSmallKey',True, 0xB6, {'progressive': True}),
'Small Key (Ganons Castle)': ('SmallKey', True, 0xB7, {'progressive': True}),
'Double Defense': ('Item', True, 0xB8, None),
'Zeldas Letter': ('Item', True, None, None),
'Master Sword': ('Item', True, None, None),
'Epona': ('Event', True, None, None),
'Deku Stick Drop': ('Event', True, None, None),
'Deku Nut Drop': ('Event', True, None, None),
'Carpenter Rescue': ('Event', True, None, None),
'Forest Trial Clear': ('Event', True, None, None),
'Fire Trial Clear': ('Event', True, None, None),
'Water Trial Clear': ('Event', True, None, None),
'Shadow Trial Clear': ('Event', True, None, None),
'Spirit Trial Clear': ('Event', True, None, None),
'Light Trial Clear': ('Event', True, None, None),
'Triforce': ('Event', True, None, None),
'Minuet of Forest': ('Song', True, 0xBB,
{
'text_id': 0x73,
'song_id': 0x02,
'item_id': 0x5A,
}),
'Bolero of Fire': ('Song', True, 0xBC,
{
'text_id': 0x74,
'song_id': 0x03,
'item_id': 0x5B,
}),
'Serenade of Water': ('Song', True, 0xBD,
{
'text_id': 0x75,
'song_id': 0x04,
'item_id': 0x5C,
}),
'Requiem of Spirit': ('Song', True, 0xBE,
{
'text_id': 0x76,
'song_id': 0x05,
'item_id': 0x5D,
}),
'Nocturne of Shadow': ('Song', True, 0xBF,
{
'text_id': 0x77,
'song_id': 0x06,
'item_id': 0x5E,
}),
'Prelude of Light': ('Song', True, 0xC0,
{
'text_id': 0x78,
'song_id': 0x07,
'item_id': 0x5F,
}),
'Zeldas Lullaby': ('Song', True, 0xC1,
{
'text_id': 0xD4,
'song_id': 0x0A,
'item_id': 0x60,
}),
'Eponas Song': ('Song', True, 0xC2,
{
'text_id': 0xD2,
'song_id': 0x09,
'item_id': 0x61,
}),
'Sarias Song': ('Song', True, 0xC3,
{
'text_id': 0xD1,
'song_id': 0x08,
'item_id': 0x62,
}),
'Suns Song': ('Song', True, 0xC4,
{
'text_id': 0xD3,
'song_id': 0x0B,
'item_id': 0x63,
}),
'Song of Time': ('Song', True, 0xC5,
{
'text_id': 0xD5,
'song_id': 0x0C,
'item_id': 0x64,
}),
'Song of Storms': ('Song', True, 0xC6,
{
'text_id': 0xD6,
'song_id': 0x0D,
'item_id': 0x65,
}),
'Buy Deku Nut (5)': ('Shop', True, 0x00, {'object': 0x00BB, 'price': 15}),
'Buy Arrows (30)': ('Shop', False, 0x01, {'object': 0x00D8, 'price': 60}),
'Buy Arrows (50)': ('Shop', False, 0x02, {'object': 0x00D8, 'price': 90}),
'Buy Bombs (5) [25]': ('Shop', False, 0x03, {'object': 0x00CE, 'price': 25}),
'Buy Deku Nut (10)': ('Shop', True, 0x04, {'object': 0x00BB, 'price': 30}),
'Buy Deku Stick (1)': ('Shop', True, 0x05, {'object': 0x00C7, 'price': 10}),
'Buy Bombs (10)': ('Shop', False, 0x06, {'object': 0x00CE, 'price': 50}),
'Buy Fish': ('Shop', False, 0x07, {'object': 0x00F4, 'price': 200}),
'Buy Red Potion [30]': ('Shop', False, 0x08, {'object': 0x00EB, 'price': 30}),
'Buy Green Potion': ('Shop', False, 0x09, {'object': 0x00EB, 'price': 30}),
'Buy Blue Potion': ('Shop', False, 0x0A, {'object': 0x00EB, 'price': 100}),
'Buy Hylian Shield': ('Shop', False, 0x0C, {'object': 0x00DC, 'price': 80}),
'Buy Deku Shield': ('Shop', True, 0x0D, {'object': 0x00CB, 'price': 40}),
'Buy Goron Tunic': ('Shop', True, 0x0E, {'object': 0x00F2, 'price': 200}),
'Buy Zora Tunic': ('Shop', True, 0x0F, {'object': 0x00F2, 'price': 300}),
'Buy Heart': ('Shop', False, 0x10, {'object': 0x00B7, 'price': 10}),
'Buy Bombchu (10)': ('Shop', True, 0x15, {'object': 0x00D9, 'price': 99}),
'Buy Bombchu (20)': ('Shop', True, 0x16, {'object': 0x00D9, 'price': 180}),
'Buy Bombchu (5)': ('Shop', True, 0x18, {'object': 0x00D9, 'price': 60}),
'Buy Deku Seeds (30)': ('Shop', False, 0x1D, {'object': 0x0119, 'price': 30}),
'Sold Out': ('Shop', False, 0x26, {'object': 0x0148}),
'Buy Blue Fire': ('Shop', True, 0x27, {'object': 0x0173, 'price': 300}),
'Buy Bottle Bug': ('Shop', True, 0x28, {'object': 0x0174, 'price': 50}),
'Buy Poe': ('Shop', False, 0x2A, {'object': 0x0176, 'price': 30}),
'Buy Fairy\'s Spirit': ('Shop', False, 0x2B, {'object': 0x0177, 'price': 50}),
'Buy Arrows (10)': ('Shop', False, 0x2C, {'object': 0x00D8, 'price': 20}),
'Buy Bombs (20)': ('Shop', False, 0x2D, {'object': 0x00CE, 'price': 80}),
'Buy Bombs (30)': ('Shop', False, 0x2E, {'object': 0x00CE, 'price': 120}),
'Buy Bombs (5) [35]': ('Shop', False, 0x2F, {'object': 0x00CE, 'price': 35}),
'Buy Red Potion [40]': ('Shop', False, 0x30, {'object': 0x00EB, 'price': 40}),
'Buy Red Potion [50]': ('Shop', False, 0x31, {'object': 0x00EB, 'price': 50}),
'Kokiri Emerald': ('Event', True, None,
{
'save_byte': 0xA5,
'save_bit': 0x04,
'addr2_data': 0x80,
'bit_mask': 0x00040000,
'item_id': 0x6C,
}),
'Goron Ruby': ('Event', True, None,
{
'save_byte': 0xA5,
'save_bit': 0x08,
'addr2_data': 0x81,
'bit_mask': 0x00080000,
'item_id': 0x6D,
}),
'Zora Sapphire': ('Event', True, None,
{
'save_byte': 0xA5,
'save_bit': 0x10,
'addr2_data': 0x82,
'bit_mask': 0x00100000,
'item_id': 0x6E,
}),
'Forest Medallion': ('Event', True, None,
{
'save_byte': 0xA7,
'save_bit': 0x01,
'addr2_data': 0x3E,
'bit_mask': 0x00000001,
'item_id': 0x66,
}),
'Fire Medallion': ('Event', True, None,
{
'save_byte': 0xA7,
'save_bit': 0x02,
'addr2_data': 0x3C,
'bit_mask': 0x00000002,
'item_id': 0x67,
}),
'Water Medallion': ('Event', True, None,
{
'save_byte': 0xA7,
'save_bit': 0x04,
'addr2_data': 0x3D,
'bit_mask': 0x00000004,
'item_id': 0x68,
}),
'Spirit Medallion': ('Event', True, None,
{
'save_byte': 0xA7,
'save_bit': 0x08,
'addr2_data': 0x3F,
'bit_mask': 0x00000008,
'item_id': 0x69,
}),
'Shadow Medallion': ('Event', True, None,
{
'save_byte': 0xA7,
'save_bit': 0x10,
'addr2_data': 0x41,
'bit_mask': 0x00000010,
'item_id': 0x6A,
}),
'Light Medallion': ('Event', True, None,
{
'save_byte': 0xA7,
'save_bit': 0x20,
'addr2_data': 0x40,
'bit_mask': 0x00000020,
'item_id': 0x6B,
}),
} | z5-tracker | /z5_tracker-1.2.1-py3-none-any.whl/z5tracker/rulesets/rando_aa_v4/ItemList.py | ItemList.py |
import random
import os
from .Dungeon import Dungeon
from .Item import ItemFactory
#from Utils import data_path
def data_path(path=''):
return os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'data', path)
dungeon_table = [
{
'name': 'Deku Tree',
'boss_key': 0,
'small_key': 0,
'small_key_mq': 0,
'dungeon_item': 1,
},
{
'name': 'Dodongos Cavern',
'boss_key': 0,
'small_key': 0,
'small_key_mq': 0,
'dungeon_item': 1,
},
{
'name': 'Jabu Jabus Belly',
'boss_key': 0,
'small_key': 0,
'small_key_mq': 0,
'dungeon_item': 1,
},
{
'name': 'Forest Temple',
'boss_key': 1,
'small_key': 5,
'small_key_mq': 6,
'dungeon_item': 1,
},
{
'name': 'Bottom of the Well',
'boss_key': 0,
'small_key': 3,
'small_key_mq': 2,
'dungeon_item': 1,
},
{
'name': 'Fire Temple',
'boss_key': 1,
'small_key': 8,
'small_key_mq': 5,
'dungeon_item': 1,
},
{
'name': 'Ice Cavern',
'boss_key': 0,
'small_key': 0,
'small_key_mq': 0,
'dungeon_item': 1,
},
{
'name': 'Water Temple',
'boss_key': 1,
'small_key': 6,
'small_key_mq': 2,
'dungeon_item': 1,
},
{
'name': 'Shadow Temple',
'boss_key': 1,
'small_key': 5,
'small_key_mq': 6,
'dungeon_item': 1,
},
{
'name': 'Gerudo Training Grounds',
'boss_key': 0,
'small_key': 9,
'small_key_mq': 3,
'dungeon_item': 0,
},
{
'name': 'Spirit Temple',
'boss_key': 1,
'small_key': 5,
'small_key_mq': 7,
'dungeon_item': 1,
},
{
'name': 'Ganons Castle',
'boss_key': 1,
'small_key': 2,
'small_key_mq': 3,
'dungeon_item': 0,
},
]
def create_dungeons(world):
for dungeon_info in dungeon_table:
name = dungeon_info['name']
if not world.dungeon_mq[name]:
dungeon_json = os.path.join(data_path('World'), name + '.json')
else:
dungeon_json = os.path.join(data_path('World'), name + ' MQ.json')
world.load_regions_from_json(dungeon_json)
boss_keys = ItemFactory(['Boss Key (%s)' % name] * dungeon_info['boss_key'])
if not world.dungeon_mq[dungeon_info['name']]:
small_keys = ItemFactory(['Small Key (%s)' % name] * dungeon_info['small_key'])
else:
small_keys = ItemFactory(['Small Key (%s)' % name] * dungeon_info['small_key_mq'])
dungeon_items = ItemFactory(['Map (%s)' % name,
'Compass (%s)' % name] * dungeon_info['dungeon_item'])
world.dungeons.append(Dungeon(world, name, boss_keys, small_keys, dungeon_items)) | z5-tracker | /z5_tracker-1.2.1-py3-none-any.whl/z5tracker/rulesets/rando_aa_v4/DungeonList.py | DungeonList.py |
import operator
import os.path
from ...config import CONFIG
from ... import maps
from . import DungeonList as dungeons
from . import EntranceShuffle as entrances
from . import Item as items
from . import ItemList as itemlist
from . import HintList as hintlist
from . import Location as locationclass
from . import LocationList as locationlist
from . import Region as regions
from . import Rules as rules
from . import Settings as settings
from .SettingsList import logic_tricks
from . import State as state
from . import World as world
__all__ = 'Ruleset',
class Ruleset(object):
'''
Ruleset abstraction class.
Instance variables:
items: item locations
skulls: skulltula locations
inventory: item inventory
'''
def __init__(self):
self.settings = settings.Settings({})
self.settings.update_with_settings_string(CONFIG['rule_string'])
for trick in logic_tricks:
self.settings.__dict__[logic_tricks[trick]['name']] = (
logic_tricks[trick]['name']
in self.settings.__dict__['allowed_tricks'])
self.world = world.World(self.settings)
self.world.load_regions_from_json(os.path.join(
os.path.dirname(__file__), 'data', 'World', 'Overworld.json'))
dungeons.create_dungeons(self.world)
self.world.initialize_entrances()
rules.set_rules(self.world)
self.state = state.State(self.world)
if not CONFIG['show_disabled']:
for loc in self.world.get_locations():
if loc.disabled == locationclass.DisableType.PENDING:
loc.locked = True
self.items = {}
self.skulls = {}
for i in self.world.regions:
for j in i.locations:
if j.name in maps.SKULLTULALOCATIONS:
self.skulls[j.name] = j
elif j.name in maps.ITEMLOCATIONS:
self.items[j.name] = j
else:
if i.name in maps.SKULLTULALOCATIONS:
self.skulls[i.name] = i
elif i.name in maps.ITEMLOCATIONS:
self.items[i.name] = i
self.inventory = {}
for equipment in itemlist.item_table:
itm = itemlist.item_table[equipment]
self.inventory[equipment] = items.Item(
equipment, itm[1] == True, itm[1] == False,
itm[0], itm[2], itm[3], self.world)
def list_regions(self) -> set:
'''
Return list of all game regions.
Returns:
set: list of region names
'''
regions = {
locationlist.location_table[l][3]
for l in locationlist.location_table}
return regions
def list_locations(self, loctype: str) -> list:
'''
Return list of all item locations.
Args:
loctype: 'item' or 'skulltula'
Returns:
list: list of location names
'''
assert loctype in ('item', 'skulltula')
listing = self.items if loctype == 'item' else self.skulls
return list(listing.keys())
def location_available(
self, name: str, loctype: str, state: state.State = None) -> bool:
'''
Check whether given item location is available.
Args:
name: name of item location
loctype: 'item' or 'skulltula'
state: if given, use this state instead of default one
Returns:
bool: True if location is available
'''
assert loctype in ('item', 'skulltula')
listing = self.items if loctype == 'item' else self.skulls
if isinstance(listing[name], regions.Region):
available = all(
location.can_reach(self.state if state is None else state)
for location in listing[name].locations)
else:
try:
available = listing[name].can_reach(
self.state if state is None else state)
except AttributeError:
available = False
return available
def dungeon_available(self, name: str, loctype: str) -> str:
'''
Check to which degree dungeon is clearable.
This assumes that all keys are available. It hence only checks for
required items.
Args:
dungeonname: name of dungeon
itemtype: 'item' or 'skulltula'
Returns:
bool: True of all locations are available with all keys
'''
skeyname = 'Small Key ({0:s})'.format(name)
bkeyname = 'Boss Key ({0:s})'.format(name)
info = self.dungeon_info(name)
fullstate = self.state.copy()
if fullstate.world.shuffle_smallkeys == 'dungeon':
fullstate.prog_items[skeyname] = info['keys']
if info['bosskey'] and fullstate.world.shuffle_smallkeys == 'dungeon':
fullstate.prog_items[bkeyname] = 1
fullstate.clear_cached_unreachable()
locs = self.dungeon_locations(name)
locs = locs[0] if loctype == 'item' else locs[1]
listing = self.items if loctype == 'item' else self.skulls
available = all(listing[l].can_reach(fullstate) for l in locs)
return available
def add_item(self, itemname: str) -> None:
'''
Add item to current inventory.
Args:
itemname: identifier of item
'''
self.state.collect(self.inventory[itemname])
def remove_item(self, itemname: str) -> None:
'''
Remove item from current inventory.
Args:
itemname: identifier of item
'''
self.state.remove(self.inventory[itemname])
def is_adult(self) -> bool:
'''
Check whether adult items are available.
Returns:
bool: True if adult items are available
'''
return (self.state.is_adult() or
self.location_available('Master Sword Pedestal', 'item'))
def check_rule(self, rule: operator.methodcaller) -> bool:
'''
Check given rule.
Args:
rule: method to check with world state
Return:
bool: return value of check
'''
return rule(self.state)
def check_access(self, location: str) -> bool:
'''
Check whether given location can be accessed.
Args:
location: either item location, game region or region connector
Returns:
bool: return value of check
'''
for loctype in ('get_region', 'get_entrance', 'get_location'):
loccall = operator.methodcaller(loctype, location)
try:
locobject = loccall(self.world)
except RuntimeError:
continue
break
else:
raise
return locobject.can_reach(self.state)
def dungeon_locations(self, dungeonname: str) -> (list, list):
'''
Return list of locations in given dungeon.
The item list includes the dungeon reward, but not Gossip Stones.
Args:
dungeonname: name of dungeon
Returns:
list: list of item locations
list: list of skulltula locations
'''
for dungeon in self.world.dungeons:
if dungeon.name == dungeonname:
break
else:
assert False
items = []
spiders = []
for region in dungeon.regions:
for location in region.locations:
if location.type in ('GossipStone', 'Event', 'Boss'):
continue
if location.type == 'GS Token':
spiders.append(location.name)
continue
maploc = maps.ITEMLOCATIONS[location.name]
if not maploc['maps']:
continue
if 'restriction' in maploc:
if maploc['restriction'] == 'scrubshuffle':
if self.world.shuffle_scrubs == 'off':
continue
items.append(location.name)
return items, spiders
def dungeon_info(self, dungeonname: str) -> dict:
'''
Return info about given dungeon.
Args:
dungeonname: name of dungeon
Returns:
dict: {'keys': int, 'items': int, 'bosskey': bool}
'''
for dungeon in self.world.dungeons:
if dungeon.name == dungeonname:
break
else:
assert False
ret = {}
ret['keys'] = len(dungeon.small_keys)
ret['bosskey'] = bool(dungeon.boss_key)
locations, _ = self.dungeon_locations(dungeonname)
ret['items'] = (
len(locations)
- ret['keys'] * (self.world.shuffle_smallkeys == 'dungeon')
- ret['bosskey'] * (self.world.shuffle_bosskeys == 'dungeon')
- len(dungeon.dungeon_items) * (
self.world.shuffle_mapcompass == 'dungeon'))
return ret
def get_hint_items(self, pooltype: str) -> list:
'''
Return list of possible hint information.
Args:
pooltype: item, location or alwaysLocation
Returns:
list: list of hint strings
'''
if pooltype == 'all':
return hintlist.hintTable
return hintlist.getHintGroup(pooltype, self.world) | z5-tracker | /z5_tracker-1.2.1-py3-none-any.whl/z5tracker/rulesets/rando_aa_v4/rulesets.py | rulesets.py |
from enum import Enum
from collections import namedtuple
class Tags(Enum):
LOOPED = 0
QUIET = 1
IMMEDIATE = 2 # Delayed sounds are commonly undesirable
BRIEF = 3 # Punchy sounds, good for rapid fire
NEW = 4
PAINFUL = 5 # Eardrum-piercing sounds
INC_NE = 20 # Incompatible with NAVI_ENEMY? (Verify)
# I'm now thinking it has to do with a limit of concurrent sounds)
Sound = namedtuple('Sound', 'id keyword label tags')
class Sounds(Enum):
NONE = Sound(0x0000, 'none', 'None', [])
ARMOS_GROAN = Sound(0x3848, 'armos', 'Armos', [])
BARK = Sound(0x28D8, 'bark', 'Bark', [Tags.BRIEF])
BOMB_BOUNCE = Sound(0x282F, 'bomb-bounce', 'Bomb Bounce', [Tags.QUIET])
BOOTS_HOVER = Sound(0x08C9, 'hover-boots', 'Hover Boots', [Tags.LOOPED])
BOOTS_IRON = Sound(0x080D, 'iron-boots', 'Iron Boots', [Tags.BRIEF, Tags.QUIET])
BOTTLE_CORK = Sound(0x286C, 'bottle-cork', 'Bottle Cork', [Tags.IMMEDIATE, Tags.BRIEF, Tags.QUIET])
BOW_TWANG = Sound(0x1830, 'bow-twang', 'Bow Twang', [])
BUBBLE_LOL = Sound(0x38CA, 'bubble-laugh', 'Bubble Laugh', [])
BONGO_HIGH = Sound(0x3951, 'bongo-bongo-high', 'Bongo Bongo High', [])
BONGO_LOW = Sound(0x3950, 'bongo-bongo-low', 'Bongo Bongo Low', [Tags.QUIET])
CARROT_REFILL = Sound(0x4845, 'carrot-refill', 'Carrot Refill', [])
CARTOON_FALL = Sound(0x28A0, 'cartoon-fall', 'Cartoon Fall', [])
CHANGE_ITEM = Sound(0x0835, 'change-item', 'Change Item', [Tags.IMMEDIATE, Tags.BRIEF])
CHEST_OPEN = Sound(0x2820, 'chest-open', 'Chest Open', [])
CHILD_CRINGE = Sound(0x683A, 'child-cringe', 'Child Cringe', [Tags.IMMEDIATE])
CHILD_GASP = Sound(0x6836, 'child-gasp', 'Child Gasp', [])
CHILD_HURT = Sound(0x6825, 'child-hurt', 'Child Hurt', [])
CHILD_OWO = Sound(0x6823, 'child-owo', 'Child owo', [])
CHILD_PANT = Sound(0x6829, 'child-pant', 'Child Pant', [Tags.IMMEDIATE])
CHILD_SCREAM = Sound(0x6828, 'child-scream', 'Child Scream', [Tags.IMMEDIATE])
CRATE_EXPLODE = Sound(0x2839, 'exploding-crate', 'Exploding Crate', [])
CUCCO_CLUCK = Sound(0x2812, 'cluck', 'Cluck', [Tags.BRIEF])
CUCCO_CROW = Sound(0x2813, 'cockadoodledoo', 'Cockadoodledoo', [])
CURSED_SCREAM = Sound(0x6867, 'cursed-scream', 'Cursed Scream', [Tags.PAINFUL])
CURSED_ATTACK = Sound(0x6868, 'cursed-attack', 'Cursed Attack', [Tags.IMMEDIATE])
DRAWBRIDGE_SET = Sound(0x280E, 'drawbridge-set', 'Drawbridge Set', [])
DUSK_HOWL = Sound(0x28AE, 'dusk-howl', 'Dusk Howl', [])
DEKU_BABA_CHATTER = Sound(0x3860, 'deku-baba', 'Deku Baba', [])
EPONA_CHILD = Sound(0x2844, 'baby-epona', 'Baby Epona', [])
EXPLOSION = Sound(0x180E, 'explosion', 'Explosion', [])
FANFARE_MED = Sound(0x4831, 'medium-fanfare', 'Medium Fanfare', [])
FANFARE_SMALL = Sound(0x4824, 'light-fanfare', 'Light Fanfare', [])
FIELD_SHRUB = Sound(0x2877, 'field-shrub', 'Field Shrub', [])
FLARE_BOSS_LOL = Sound(0x3981, 'flare-dancer-laugh', 'Flare Dancer Laugh', [Tags.IMMEDIATE])
FLARE_BOSS_STARTLE = Sound(0x398B, 'flare-dancer-startled', 'Flare Dancer Startled', [])
GANON_TENNIS = Sound(0x39CA, 'ganondorf-teh', 'Ganondorf "Teh!"', [])
GOHMA_LARVA_CROAK = Sound(0x395D, 'gohma-larva-croak', 'Gohma Larva Croak', [])
GOLD_SKULL_TOKEN = Sound(0x4843, 'gold-skull-token', 'Gold Skull Token', [])
GORON_WAKE = Sound(0x38FC, 'goron-wake', 'Goron Wake', [])
GREAT_FAIRY = Sound(0x6858, 'great-fairy', 'Great Fairy', [Tags.PAINFUL])
GUAY = Sound(0x38B6, 'guay', 'Guay', [Tags.BRIEF])
GUNSHOT = Sound(0x4835, 'gunshot', 'Gunshot', [])
HAMMER_BONK = Sound(0x180A, 'hammer-bonk', 'Hammer Bonk', [])
HORSE_NEIGH = Sound(0x2805, 'horse-neigh', 'Horse Neigh', [Tags.PAINFUL])
HORSE_TROT = Sound(0x2804, 'horse-trot', 'Horse Trot', [])
HP_LOW = Sound(0x481B, 'low-health', 'Low Health', [Tags.INC_NE])
HP_RECOVER = Sound(0x480B, 'recover-health', 'Recover Health', [])
ICE_SHATTER = Sound(0x0875, 'shattering-ice', 'Shattering Ice', [])
INGO_WOOAH = Sound(0x6854, 'ingo-wooah', 'Ingo "Wooah!"', [])
IRON_KNUCKLE = Sound(0x3929, 'iron-knuckle', 'Iron Knuckle', [])
INGO_KAAH = Sound(0x6855, 'kaah', 'Kaah!', [])
MOBLIN_CLUB_GROUND = Sound(0x38EF, 'moblin-club-ground', 'Moblin Club Ground', [])
MOBLIN_CLUB_SWING = Sound(0x39E1, 'moblin-club-swing', 'Moblin Club Swing', [])
MOO = Sound(0x28DF, 'moo', 'Moo', [])
NAVI_HELLO = Sound(0x6844, 'navi-hello', 'Navi "Hello!"', [])
NAVI_HEY = Sound(0x685F, 'navi-hey', 'Navi "Hey!"', [])
NAVI_RANDOM = Sound(0x6843, 'navi-random', 'Navi Random', [])
NOTIFICATION = Sound(0x4820, 'notification', 'Notification', [])
PHANTOM_GANON_LOL = Sound(0x38B0, 'phantom-ganon-laugh', 'Phantom Ganon Laugh', [])
PLANT_EXPLODE = Sound(0x284E, 'plant-explode', 'Plant Explode', [])
POE = Sound(0x38EC, 'poe', 'Poe', [])
POT_SHATTER = Sound(0x2887, 'shattering-pot', 'Shattering Pot', [])
REDEAD_MOAN = Sound(0x38E4, 'redead-moan', 'Redead Moan', [])
REDEAD_SCREAM = Sound(0x38E5, 'redead-scream', 'Redead Scream', [Tags.PAINFUL])
RIBBIT = Sound(0x28B1, 'ribbit', 'Ribbit', [])
RUPEE = Sound(0x4803, 'rupee', 'Rupee', [Tags.PAINFUL])
RUPEE_SILVER = Sound(0x28E8, 'silver-rupee', 'Silver Rupee', [])
RUTO_CHILD_CRASH = Sound(0x6860, 'ruto-crash', 'Ruto Crash', [])
RUTO_CHILD_EXCITED = Sound(0x6861, 'ruto-excited', 'Ruto Excited', [])
RUTO_CHILD_GIGGLE = Sound(0x6863, 'ruto-giggle', 'Ruto Giggle', [])
RUTO_CHILD_LIFT = Sound(0x6864, 'ruto-lift', 'Ruto Lift', [])
RUTO_CHILD_THROWN = Sound(0x6865, 'ruto-thrown', 'Ruto Thrown', [])
RUTO_CHILD_WIGGLE = Sound(0x6866, 'ruto-wiggle', 'Ruto Wiggle', [])
SCRUB_BUSINESS = Sound(0x3882, 'business-scrub', 'Business Scrub', [])
SCRUB_NUTS_UP = Sound(0x387C, 'scrub-emerge', 'Scrub Emerge', [])
SHABOM_BOUNCE = Sound(0x3948, 'shabom-bounce', 'Shabom Bounce', [Tags.IMMEDIATE])
SHABOM_POP = Sound(0x3949, 'shabom-pop', 'Shabom Pop', [Tags.IMMEDIATE, Tags.BRIEF])
SHELLBLADE = Sound(0x3849, 'shellblade', 'Shellblade', [])
SKULLTULA = Sound(0x39DA, 'skulltula', 'Skulltula', [Tags.BRIEF])
SOFT_BEEP = Sound(0x4804, 'soft-beep', 'Soft Beep', [])
SPIKE_TRAP = Sound(0x38E9, 'spike-trap', 'Spike Trap', [Tags.LOOPED])
SPIT_NUT = Sound(0x387E, 'spit-nut', 'Spit Nut', [Tags.IMMEDIATE, Tags.BRIEF])
STALCHILD_ATTACK = Sound(0x3831, 'stalchild-attack', 'Stalchild Attack', [])
STINGER_CRY = Sound(0x39A3, 'stinger-squeak', 'Stinger Squeak', [Tags.PAINFUL])
SWITCH = Sound(0x2815, 'switch', 'Switch', [])
SWORD_BONK = Sound(0x181A, 'sword-bonk', 'Sword Bonk', [])
TAMBOURINE = Sound(0x4842, 'tambourine', 'Tambourine', [Tags.QUIET])
TARGETING_ENEMY = Sound(0x4830, 'target-enemy', 'Target Enemy', [])
TARGETING_NEUTRAL = Sound(0x480C, 'target-neutral', 'Target Neutral', [])
TALON_CRY = Sound(0x6853, 'talon-cry', 'Talon Cry', [])
TALON_HMM = Sound(0x6852, 'talon-hmm', 'Talon "Hmm"', [])
TALON_SNORE = Sound(0x6850, 'talon-snore', 'Talon Snore', [])
TALON_WTF = Sound(0x6851, 'talon-wtf', 'Talon Wtf', [])
THUNDER = Sound(0x282E, 'thunder', 'Thunder', [])
TIMER = Sound(0x481A, 'timer', 'Timer', [Tags.INC_NE])
TWINROVA_BICKER = Sound(0x39E7, 'twinrova-bicker', 'Twinrova Bicker', [Tags.LOOPED])
WOLFOS_HOWL = Sound(0x383C, 'wolfos-howl', 'Wolfos Howl', [])
ZELDA_ADULT_GASP = Sound(0x6879, 'adult-zelda-gasp', 'Adult Zelda Gasp', [])
ZORA_KING = Sound(0x687A, 'mweep', 'Mweep!', [Tags.BRIEF])
# Sound pools
standard = [s for s in Sounds if Tags.LOOPED not in s.value.tags]
looping = [s for s in Sounds if Tags.LOOPED in s.value.tags]
no_painful = [s for s in standard if Tags.PAINFUL not in s.value.tags]
# Selected by hand (very much a WIP)
navi = [
Sounds.NONE,
Sounds.CUCCO_CLUCK,
Sounds.SOFT_BEEP,
Sounds.HP_RECOVER,
Sounds.TIMER,
Sounds.HP_LOW,
Sounds.NOTIFICATION,
Sounds.TAMBOURINE,
Sounds.CARROT_REFILL,
Sounds.ZELDA_ADULT_GASP,
Sounds.ZORA_KING,
Sounds.ICE_SHATTER,
Sounds.EXPLOSION,
Sounds.CRATE_EXPLODE,
Sounds.GREAT_FAIRY,
Sounds.MOO,
Sounds.BARK,
Sounds.RIBBIT,
Sounds.POT_SHATTER,
Sounds.CUCCO_CROW,
Sounds.HORSE_NEIGH,
Sounds.SKULLTULA,
Sounds.REDEAD_SCREAM,
Sounds.POE,
Sounds.RUTO_CHILD_GIGGLE,
Sounds.DUSK_HOWL,
Sounds.SCRUB_BUSINESS,
Sounds.GUAY,
Sounds.NAVI_HELLO,
]
hp_low = [
Sounds.NONE,
Sounds.CUCCO_CLUCK,
Sounds.SOFT_BEEP,
Sounds.HP_RECOVER,
Sounds.TIMER,
Sounds.NOTIFICATION,
Sounds.TAMBOURINE,
Sounds.CARROT_REFILL,
Sounds.NAVI_RANDOM,
Sounds.NAVI_HEY,
Sounds.ZELDA_ADULT_GASP,
Sounds.ZORA_KING,
Sounds.BOOTS_IRON,
Sounds.SWORD_BONK,
Sounds.BOW_TWANG,
Sounds.HORSE_TROT,
Sounds.DRAWBRIDGE_SET,
Sounds.SWITCH,
Sounds.BOMB_BOUNCE,
Sounds.BARK,
Sounds.RIBBIT,
Sounds.POT_SHATTER,
Sounds.SCRUB_BUSINESS,
Sounds.GUAY,
Sounds.BONGO_LOW,
]
hover_boots = [
Sounds.BARK,
Sounds.SHABOM_POP,
Sounds.CARTOON_FALL,
Sounds.ZORA_KING,
Sounds.TAMBOURINE,
]
nightfall = [
Sounds.CUCCO_CROW,
Sounds.REDEAD_MOAN,
Sounds.TALON_SNORE,
Sounds.GREAT_FAIRY,
Sounds.THUNDER,
Sounds.MOO,
Sounds.GOLD_SKULL_TOKEN,
]
# Too small, needs more thought
menu_select = [
Sounds.CHILD_CRINGE,
Sounds.CHANGE_ITEM,
Sounds.BONGO_HIGH,
]
# Too small, needs more thought
menu_cursor = [
Sounds.CHILD_SCREAM,
Sounds.BOW_TWANG,
Sounds.DEKU_BABA_CHATTER,
Sounds.BONGO_LOW,
]
horse_neigh = [
Sounds.MOO,
Sounds.CHILD_SCREAM,
Sounds.RUTO_CHILD_WIGGLE,
Sounds.GREAT_FAIRY,
Sounds.ARMOS_GROAN,
Sounds.REDEAD_SCREAM,
Sounds.STALCHILD_ATTACK,
]
SoundHook = namedtuple('SoundHook', 'name pool locations')
class SoundHooks(Enum):
NAVI_OVERWORLD = SoundHook('Navi - Overworld', navi, [0xAE7EF2, 0xC26C7E])
NAVI_ENEMY = SoundHook('Navi - Enemy', navi, [0xAE7EC6])
HP_LOW = SoundHook('Low Health', hp_low, [0xADBA1A])
BOOTS_HOVER = SoundHook('Hover Boots', hover_boots, [0xBDBD8A])
NIGHTFALL = SoundHook('Nightfall', nightfall, [0xAD3466, 0xAD7A2E])
MENU_SELECT = SoundHook('Menu Select', no_painful, [
0xBA1BBE, 0xBA23CE, 0xBA2956, 0xBA321A, 0xBA72F6, 0xBA8106, 0xBA82EE,
0xBA9DAE, 0xBA9EAE, 0xBA9FD2, 0xBAE6D6])
MENU_CURSOR = SoundHook('Menu Cursor', no_painful, [
0xBA165E, 0xBA1C1A, 0xBA2406, 0xBA327E, 0xBA3936, 0xBA77C2, 0xBA7886,
0xBA7A06, 0xBA7A6E, 0xBA7AE6, 0xBA7D6A, 0xBA8186, 0xBA822E, 0xBA82A2,
0xBAA11E, 0xBAE7C6])
HORSE_NEIGH = SoundHook('Horse Neigh', horse_neigh, [
0xC18832, 0xC18C32, 0xC19A7E, 0xC19CBE, 0xC1A1F2, 0xC1A3B6, 0xC1B08A,
0xC1B556, 0xC1C28A, 0xC1CC36, 0xC1EB4A, 0xC1F18E, 0xC6B136, 0xC6BBA2,
0xC1E93A, 0XC6B366, 0XC6B562])
# # Some enemies have a different cutting sound, making this a bit weird
# SWORD_SLASH = SoundHook('Sword Slash', standard, [0xAC2942])
def get_patch_dict():
return {s.value.keyword: s.value.id for s in Sounds}
def get_hook_pool(sound_hook):
return sound_hook.value.pool
def get_setting_choices(sound_hook):
pool = sound_hook.value.pool
choices = {s.value.keyword: s.value.label for s in pool}
result = {
'default': 'Default',
'completely-random': 'Completely Random',
'random-ear-safe': 'Random Ear-Safe',
'random-choice': 'Random Choice',
'none': 'None',
**choices,
}
return result | z5-tracker | /z5_tracker-1.2.1-py3-none-any.whl/z5tracker/rulesets/rando_aa_v4/Sounds.py | Sounds.py |
from .version import __version__
import random
from . import Sounds as sfx
from collections import namedtuple
Color = namedtuple('Color', ' R G B')
tunic_colors = {
"Custom Color": Color(0x00, 0x00, 0x00),
"Kokiri Green": Color(0x1E, 0x69, 0x1B),
"Goron Red": Color(0x64, 0x14, 0x00),
"Zora Blue": Color(0x00, 0x3C, 0x64),
"Black": Color(0x30, 0x30, 0x30),
"White": Color(0xF0, 0xF0, 0xFF),
"Azure Blue": Color(0x13, 0x9E, 0xD8),
"Vivid Cyan": Color(0x13, 0xE9, 0xD8),
"Light Red": Color(0xF8, 0x7C, 0x6D),
"Fuchsia": Color(0xFF, 0x00, 0xFF),
"Purple": Color(0x95, 0x30, 0x80),
"Majora Purple": Color(0x40, 0x00, 0x40),
"Twitch Purple": Color(0x64, 0x41, 0xA5),
"Purple Heart": Color(0x8A, 0x2B, 0xE2),
"Persian Rose": Color(0xFF, 0x14, 0x93),
"Dirty Yellow": Color(0xE0, 0xD8, 0x60),
"Blush Pink": Color(0xF8, 0x6C, 0xF8),
"Hot Pink": Color(0xFF, 0x69, 0xB4),
"Rose Pink": Color(0xFF, 0x90, 0xB3),
"Orange": Color(0xE0, 0x79, 0x40),
"Gray": Color(0xA0, 0xA0, 0xB0),
"Gold": Color(0xD8, 0xB0, 0x60),
"Silver": Color(0xD0, 0xF0, 0xFF),
"Beige": Color(0xC0, 0xA0, 0xA0),
"Teal": Color(0x30, 0xD0, 0xB0),
"Blood Red": Color(0x83, 0x03, 0x03),
"Blood Orange": Color(0xFE, 0x4B, 0x03),
"Royal Blue": Color(0x40, 0x00, 0x90),
"Sonic Blue": Color(0x50, 0x90, 0xE0),
"NES Green": Color(0x00, 0xD0, 0x00),
"Dark Green": Color(0x00, 0x25, 0x18),
"Lumen": Color(0x50, 0x8C, 0xF0),
}
NaviColors = { # Inner Core Color Outer Glow Color
"Custom Color": (Color(0x00, 0x00, 0x00), Color(0x00, 0x00, 0x00)),
"Gold": (Color(0xFE, 0xCC, 0x3C), Color(0xFE, 0xC0, 0x07)),
"White": (Color(0xFF, 0xFF, 0xFF), Color(0x00, 0x00, 0xFF)),
"Green": (Color(0x00, 0xFF, 0x00), Color(0x00, 0xFF, 0x00)),
"Light Blue": (Color(0x96, 0x96, 0xFF), Color(0x96, 0x96, 0xFF)),
"Yellow": (Color(0xFF, 0xFF, 0x00), Color(0xC8, 0x9B, 0x00)),
"Red": (Color(0xFF, 0x00, 0x00), Color(0xFF, 0x00, 0x00)),
"Magenta": (Color(0xFF, 0x00, 0xFF), Color(0xC8, 0x00, 0x9B)),
"Black": (Color(0x00, 0x00, 0x00), Color(0x00, 0x00, 0x00)),
"Tatl": (Color(0xFF, 0xFF, 0xFF), Color(0xC8, 0x98, 0x00)),
"Tael": (Color(0x49, 0x14, 0x6C), Color(0xFF, 0x00, 0x00)),
"Fi": (Color(0x2C, 0x9E, 0xC4), Color(0x2C, 0x19, 0x83)),
"Ciela": (Color(0xE6, 0xDE, 0x83), Color(0xC6, 0xBE, 0x5B)),
"Epona": (Color(0xD1, 0x49, 0x02), Color(0x55, 0x1F, 0x08)),
"Ezlo": (Color(0x62, 0x9C, 0x5F), Color(0x3F, 0x5D, 0x37)),
"King of Red Lions": (Color(0xA8, 0x33, 0x17), Color(0xDE, 0xD7, 0xC5)),
"Linebeck": (Color(0x03, 0x26, 0x60), Color(0xEF, 0xFF, 0xFF)),
"Loftwing": (Color(0xD6, 0x2E, 0x31), Color(0xFD, 0xE6, 0xCC)),
"Midna": (Color(0x19, 0x24, 0x26), Color(0xD2, 0x83, 0x30)),
"Phantom Zelda": (Color(0x97, 0x7A, 0x6C), Color(0x6F, 0x46, 0x67)),
}
sword_colors = { # Initial Color Fade Color
"Custom Color": (Color(0x00, 0x00, 0x00), Color(0x00, 0x00, 0x00)),
"Rainbow": (Color(0x00, 0x00, 0x00), Color(0x00, 0x00, 0x00)),
"White": (Color(0xFF, 0xFF, 0xFF), Color(0xFF, 0xFF, 0xFF)),
"Red": (Color(0xFF, 0x00, 0x00), Color(0xFF, 0x00, 0x00)),
"Green": (Color(0x00, 0xFF, 0x00), Color(0x00, 0xFF, 0x00)),
"Blue": (Color(0x00, 0x00, 0xFF), Color(0x00, 0x00, 0xFF)),
"Cyan": (Color(0x00, 0xFF, 0xFF), Color(0x00, 0xFF, 0xFF)),
"Magenta": (Color(0xFF, 0x00, 0xFF), Color(0xFF, 0x00, 0xFF)),
"Orange": (Color(0xFF, 0xA5, 0x00), Color(0xFF, 0xA5, 0x00)),
"Gold": (Color(0xFF, 0xD7, 0x00), Color(0xFF, 0xD7, 0x00)),
"Purple": (Color(0x80, 0x00, 0x80), Color(0x80, 0x00, 0x80)),
"Pink": (Color(0xFF, 0x69, 0xB4), Color(0xFF, 0x69, 0xB4)),
}
def get_tunic_colors():
return list(tunic_colors.keys())
def get_tunic_color_options():
return ["Random Choice", "Completely Random"] + get_tunic_colors()
def get_navi_colors():
return list(NaviColors.keys())
def get_navi_color_options():
return ["Random Choice", "Completely Random"] + get_navi_colors()
def get_sword_colors():
return list(sword_colors.keys())
def get_sword_color_options():
return ["Random Choice", "Completely Random"] + get_sword_colors()
def patch_targeting(rom, settings, log, symbols):
# Set default targeting option to Hold
if settings.default_targeting == 'hold':
rom.write_byte(0xB71E6D, 0x01)
else:
rom.write_byte(0xB71E6D, 0x00)
def patch_dpad(rom, settings, log, symbols):
# Display D-Pad HUD
if settings.display_dpad:
rom.write_byte(symbols['CFG_DISPLAY_DPAD'], 0x01)
else:
rom.write_byte(symbols['CFG_DISPLAY_DPAD'], 0x00)
log.display_dpad = settings.display_dpad
def patch_music(rom, settings, log, symbols):
# patch music
if settings.background_music == 'random':
restore_music(rom)
log.bgm = randomize_music(rom)
elif settings.background_music == 'off':
disable_music(rom)
else:
restore_music(rom)
def patch_tunic_colors(rom, settings, log, symbols):
# patch tunic colors
tunics = [
('Kokiri Tunic', settings.kokiri_color, 0x00B6DA38),
('Goron Tunic', settings.goron_color, 0x00B6DA3B),
('Zora Tunic', settings.zora_color, 0x00B6DA3E),
]
tunic_color_list = get_tunic_colors()
for tunic, tunic_option, address in tunics:
# handle random
if tunic_option == 'Random Choice':
tunic_option = random.choice(tunic_color_list)
# handle completely random
if tunic_option == 'Completely Random':
color = [random.getrandbits(8), random.getrandbits(8), random.getrandbits(8)]
# grab the color from the list
elif tunic_option in tunic_colors:
color = list(tunic_colors[tunic_option])
# build color from hex code
else:
color = list(int(tunic_option[i:i+2], 16) for i in (0, 2 ,4))
tunic_option = 'Custom'
rom.write_bytes(address, color)
log.tunic_colors[tunic] = dict(option=tunic_option, color=''.join(['{:02X}'.format(c) for c in color]))
def patch_navi_colors(rom, settings, log, symbols):
# patch navi colors
navi = [
# colors for Navi
('Navi Idle', settings.navi_color_default, [0x00B5E184]), # Default
('Navi Targeting Enemy', settings.navi_color_enemy, [0x00B5E19C, 0x00B5E1BC]), # Enemy, Boss
('Navi Targeting NPC', settings.navi_color_npc, [0x00B5E194]), # NPC
('Navi Targeting Prop', settings.navi_color_prop, [0x00B5E174, 0x00B5E17C, 0x00B5E18C,
0x00B5E1A4, 0x00B5E1AC, 0x00B5E1B4,
0x00B5E1C4, 0x00B5E1CC, 0x00B5E1D4]), # Everything else
]
navi_color_list = get_navi_colors()
for navi_action, navi_option, navi_addresses in navi:
inner = navi_action in [action[0] for action in navi[0:4]]
# choose a random choice for the whole group
if navi_option == 'Random Choice':
navi_option = random.choice(navi_color_list)
custom_color = False
for address in navi_addresses:
# completely random is random for every subgroup
if navi_option == 'Completely Random':
colors = ([random.getrandbits(8), random.getrandbits(8), random.getrandbits(8)],
[random.getrandbits(8), random.getrandbits(8), random.getrandbits(8)])
if navi_action not in log.navi_colors:
log.navi_colors[navi_action] = list()
log.navi_colors[navi_action].append(dict(option=navi_option, color1=''.join(['{:02X}'.format(c) for c in list(colors[0])]), color2=''.join(['{:02X}'.format(c) for c in list(colors[1])])))
# grab the color from the list
elif navi_option in NaviColors:
colors = list(NaviColors[navi_option][0]), list(NaviColors[navi_option][1])
# build color from hex code
else:
base_color = list(int(navi_option[i:i+2], 16) for i in (0, 2 ,4))
colors = (base_color, base_color)
custom_color = True
color = colors[0] + [0xFF] + colors[1] + [0xFF]
rom.write_bytes(address, color)
if custom_color:
navi_option = 'Custom'
if navi_action not in log.navi_colors:
log.navi_colors[navi_action] = [dict(option=navi_option, color1=''.join(['{:02X}'.format(c) for c in list(colors[0])]), color2=''.join(['{:02X}'.format(c) for c in list(colors[1])]))]
def patch_sword_trails(rom, settings, log, symbols):
# patch sword trail colors
sword_trails = [
('Inner Initial Sword Trail', settings.sword_trail_color_inner,
[(0x00BEFF80, 0xB0, 0x40), (0x00BEFF88, 0x20, 0x00)], symbols['CFG_RAINBOW_SWORD_INNER_ENABLED']),
('Outer Initial Sword Trail', settings.sword_trail_color_outer,
[(0x00BEFF7C, 0xB0, 0xFF), (0x00BEFF84, 0x10, 0x00)], symbols['CFG_RAINBOW_SWORD_OUTER_ENABLED']),
]
sword_color_list = get_sword_colors()
for index, item in enumerate(sword_trails):
sword_trail_name, sword_trail_option, sword_trail_addresses, sword_trail_rainbow_symbol = item
# handle random
if sword_trail_option == 'Random Choice':
sword_trail_option = random.choice(sword_color_list)
custom_color = False
for index, (address, transparency, white_transparency) in enumerate(sword_trail_addresses):
# set rainbow option
if sword_trail_option == 'Rainbow':
rom.write_byte(sword_trail_rainbow_symbol, 0x01)
color = [0x00, 0x00, 0x00]
continue
else:
rom.write_byte(sword_trail_rainbow_symbol, 0x00)
# handle completely random
if sword_trail_option == 'Completely Random':
color = [random.getrandbits(8), random.getrandbits(8), random.getrandbits(8)]
if sword_trail_name not in log.sword_colors:
log.sword_colors[sword_trail_name] = list()
log.sword_colors[sword_trail_name].append(dict(option=sword_trail_option, color=''.join(['{:02X}'.format(c) for c in color[0:3]])))
elif sword_trail_option in sword_colors:
color = list(sword_colors[sword_trail_option][index])
# build color from hex code
else:
color = list(int(sword_trail_option[i:i+2], 16) for i in (0, 2 ,4))
custom_color = True
if sword_trail_option == 'White':
color = color + [white_transparency]
else:
color = color + [transparency]
rom.write_bytes(address, color)
if custom_color:
sword_trail_option = 'Custom'
if sword_trail_name not in log.sword_colors:
log.sword_colors[sword_trail_name] = [dict(option=sword_trail_option, color=''.join(['{:02X}'.format(c) for c in color[0:3]]))]
log.sword_trail_duration = settings.sword_trail_duration
rom.write_byte(0x00BEFF8C, settings.sword_trail_duration)
def patch_sfx(rom, settings, log, symbols):
# Configurable Sound Effects
sfx_config = [
(settings.sfx_navi_overworld, sfx.SoundHooks.NAVI_OVERWORLD),
(settings.sfx_navi_enemy, sfx.SoundHooks.NAVI_ENEMY),
(settings.sfx_low_hp, sfx.SoundHooks.HP_LOW),
(settings.sfx_menu_cursor, sfx.SoundHooks.MENU_CURSOR),
(settings.sfx_menu_select, sfx.SoundHooks.MENU_SELECT),
(settings.sfx_nightfall, sfx.SoundHooks.NIGHTFALL),
(settings.sfx_horse_neigh, sfx.SoundHooks.HORSE_NEIGH),
(settings.sfx_hover_boots, sfx.SoundHooks.BOOTS_HOVER),
]
sound_dict = sfx.get_patch_dict()
for selection, hook in sfx_config:
if selection == 'default':
for loc in hook.value.locations:
sound_id = int.from_bytes((rom.original[loc:loc+2]), byteorder='big', signed=False)
rom.write_int16(loc, sound_id)
else:
if selection == 'random-choice':
selection = random.choice(sfx.get_hook_pool(hook)).value.keyword
elif selection == 'random-ear-safe':
selection = random.choice(sfx.no_painful).value.keyword
elif selection == 'completely-random':
selection = random.choice(sfx.standard).value.keyword
sound_id = sound_dict[selection]
for loc in hook.value.locations:
rom.write_int16(loc, sound_id)
log.sfx[hook.value.name] = selection
def patch_instrument(rom, settings, log, symbols):
# Player Instrument
instruments = {
#'none': 0x00,
'ocarina': 0x01,
'malon': 0x02,
'whistle': 0x03,
'harp': 0x04,
'grind-organ': 0x05,
'flute': 0x06,
#'another_ocarina': 0x07,
}
if settings.sfx_ocarina != 'random-choice':
choice = settings.sfx_ocarina
else:
choice = random.choice(list(instruments.keys()))
rom.write_byte(0x00B53C7B, instruments[choice])
log.sfx['Ocarina'] = choice
cosmetic_data_headers = [
0x03481000,
0x03480810,
]
global_patch_sets = [
patch_targeting,
patch_music,
patch_tunic_colors,
patch_navi_colors,
patch_sfx,
patch_instrument,
]
patch_sets = {
0x1F04FA62: {
"patches": [
patch_dpad,
patch_sword_trails,
],
"symbols": {
"CFG_DISPLAY_DPAD": 0x03480814,
"CFG_RAINBOW_SWORD_INNER_ENABLED": 0x03480815,
"CFG_RAINBOW_SWORD_OUTER_ENABLED": 0x03480816,
},
},
0x1F05D3F9: {
"patches": [
patch_dpad,
patch_sword_trails,
],
"symbols": {
"CFG_DISPLAY_DPAD": 0x03481004,
"CFG_RAINBOW_SWORD_INNER_ENABLED": 0x03481005,
"CFG_RAINBOW_SWORD_OUTER_ENABLED": 0x03481006,
},
}
}
def patch_cosmetics(settings, rom):
log = CosmeticsLog(settings)
# re-seed for aesthetic effects. They shouldn't be affected by the generation seed
random.seed()
# patch cosmetics that use vanilla oot data, and always compatible
for patch_func in global_patch_sets:
patch_func(rom, settings, log, {})
# try to detect the cosmetic patch data format
versioned_patch_set = None
for header in cosmetic_data_headers:
# Search over all possible header locations
cosmetic_version = rom.read_int32(header)
if cosmetic_version in patch_sets:
versioned_patch_set = patch_sets[cosmetic_version]
break
# patch version specific patches
if versioned_patch_set:
if cosmetic_version != rom.read_int32(rom.sym('COSMETIC_FORMAT_VERSION')):
log.error = "ROM uses old cosmetic patch format."
for patch_func in versioned_patch_set['patches']:
patch_func(rom, settings, log, versioned_patch_set['symbols'])
else:
# Unknown patch format
log.error = "Unable to patch some cosmetics. ROM uses unknown cosmetic patch format."
return log
# Format: (Title, Sequence ID)
bgm_sequence_ids = [
('Hyrule Field', 0x02),
('Dodongos Cavern', 0x18),
('Kakariko Adult', 0x19),
('Battle', 0x1A),
('Boss Battle', 0x1B),
('Inside Deku Tree', 0x1C),
('Market', 0x1D),
('Title Theme', 0x1E),
('House', 0x1F),
('Jabu Jabu', 0x26),
('Kakariko Child', 0x27),
('Fairy Fountain', 0x28),
('Zelda Theme', 0x29),
('Fire Temple', 0x2A),
('Forest Temple', 0x2C),
('Castle Courtyard', 0x2D),
('Ganondorf Theme', 0x2E),
('Lon Lon Ranch', 0x2F),
('Goron City', 0x30),
('Miniboss Battle', 0x38),
('Temple of Time', 0x3A),
('Kokiri Forest', 0x3C),
('Lost Woods', 0x3E),
('Spirit Temple', 0x3F),
('Horse Race', 0x40),
('Ingo Theme', 0x42),
('Fairy Flying', 0x4A),
('Deku Tree', 0x4B),
('Windmill Hut', 0x4C),
('Shooting Gallery', 0x4E),
('Sheik Theme', 0x4F),
('Zoras Domain', 0x50),
('Shop', 0x55),
('Chamber of the Sages', 0x56),
('Ice Cavern', 0x58),
('Kaepora Gaebora', 0x5A),
('Shadow Temple', 0x5B),
('Water Temple', 0x5C),
('Gerudo Valley', 0x5F),
('Potion Shop', 0x60),
('Kotake and Koume', 0x61),
('Castle Escape', 0x62),
('Castle Underground', 0x63),
('Ganondorf Battle', 0x64),
('Ganon Battle', 0x65),
('Fire Boss', 0x6B),
('Mini-game', 0x6C)
]
def randomize_music(rom):
log = {}
# Read in all the Music data
bgm_data = []
for bgm in bgm_sequence_ids:
bgm_sequence = rom.read_bytes(0xB89AE0 + (bgm[1] * 0x10), 0x10)
bgm_instrument = rom.read_int16(0xB89910 + 0xDD + (bgm[1] * 2))
bgm_data.append((bgm[0], bgm_sequence, bgm_instrument))
# shuffle data
random.shuffle(bgm_data)
# Write Music data back in random ordering
for bgm in bgm_sequence_ids:
bgm_name, bgm_sequence, bgm_instrument = bgm_data.pop()
rom.write_bytes(0xB89AE0 + (bgm[1] * 0x10), bgm_sequence)
rom.write_int16(0xB89910 + 0xDD + (bgm[1] * 2), bgm_instrument)
log[bgm[0]] = bgm_name
# Write Fairy Fountain instrument to File Select (uses same track but different instrument set pointer for some reason)
rom.write_int16(0xB89910 + 0xDD + (0x57 * 2), rom.read_int16(0xB89910 + 0xDD + (0x28 * 2)))
return log
def disable_music(rom):
# First track is no music
blank_track = rom.read_bytes(0xB89AE0 + (0 * 0x10), 0x10)
for bgm in bgm_sequence_ids:
rom.write_bytes(0xB89AE0 + (bgm[1] * 0x10), blank_track)
def restore_music(rom):
# Restore all music from original
for bgm in bgm_sequence_ids:
bgm_sequence = rom.original[0xB89AE0 + (bgm[1] * 0x10): 0xB89AE0 + (bgm[1] * 0x10) + 0x10]
rom.write_bytes(0xB89AE0 + (bgm[1] * 0x10), bgm_sequence)
bgm_instrument = rom.original[0xB89910 + 0xDD + (bgm[1] * 2): 0xB89910 + 0xDD + (bgm[1] * 2) + 0x02]
rom.write_bytes(0xB89910 + 0xDD + (bgm[1] * 2), bgm_instrument)
# restore file select instrument
bgm_instrument = rom.original[0xB89910 + 0xDD + (0x57 * 2): 0xB89910 + 0xDD + (0x57 * 2) + 0x02]
rom.write_bytes(0xB89910 + 0xDD + (0x57 * 2), bgm_instrument)
class CosmeticsLog(object):
def __init__(self, settings):
self.settings = settings
self.tunic_colors = {}
self.navi_colors = {}
self.sword_colors = {}
self.sfx = {}
self.bgm = {}
self.error = None
def to_file(self, filename):
with open(filename, 'w') as outfile:
outfile.write(self.cosmetics_output())
def cosmetics_output(self):
output = ''
output += 'OoT Randomizer Version %s - Cosmetics Log\n' % (__version__)
if self.error:
output += 'Error: %s\n' % self.error
format_string = '\n{key:{width}} {value}'
padding = 40
output += format_string.format(key='Default Targeting Option:', value=self.settings.default_targeting, width=padding)
output += format_string.format(key='Background Music:', value=self.settings.background_music, width=padding)
if 'display_dpad' in self.__dict__:
output += format_string.format(key='Display D-Pad HUD:', value=self.display_dpad, width=padding)
output += '\n\nColors:\n'
for tunic, options in self.tunic_colors.items():
color_option_string = '{option} (#{color})'
output += format_string.format(key=tunic+':', value=color_option_string.format(option=options['option'], color=options['color']), width=padding)
for navi_action, list in self.navi_colors.items():
for i, options in enumerate(list):
color_option_string = '{option} (#{color1}, #{color2})'
output += format_string.format(key=(navi_action+':') if i == 0 else '', value=color_option_string.format(option=options['option'], color1=options['color1'], color2=options['color2']), width=padding)
if 'sword_colors' in self.__dict__:
for sword_trail, list in self.sword_colors.items():
for i, options in enumerate(list):
if options['option'] == 'Rainbow':
color_option_string = '{option}'
else:
color_option_string = '{option} (#{color})'
output += format_string.format(key=(sword_trail+':') if i == 0 else '', value=color_option_string.format(option=options['option'], color=options['color']), width=padding)
if 'sword_trail_duration' in self.__dict__:
output += format_string.format(key='Sword Trail Duration:', value=self.sword_trail_duration, width=padding)
output += '\n\nSFX:\n'
for key, value in self.sfx.items():
output += format_string.format(key=key+':', value=value, width=padding)
if self.settings.background_music == 'random':
#music_padding = 1 + len(max(self.bgm.keys(), key=len))
music_padding = 40
output += '\n\nBackground Music:\n'
for key, value in self.bgm.items():
output += format_string.format(key=key+':', value=value, width=music_padding)
return output | z5-tracker | /z5_tracker-1.2.1-py3-none-any.whl/z5tracker/rulesets/rando_aa_v4/Cosmetics.py | Cosmetics.py |
import operator
import tkinter as tk
import tkinter.ttk as ttk
import typing
from ..config import CONFIG
from ..config import layout as storage
from ..dungeons.lists import INFO as DUNGEONLOCATIONS
from ..items import image
from ..maps import ITEMLOCATIONS, SKULLTULALOCATIONS
from ..maps.info import MAPSCALE, MAPSPEC, BUTTONTYPE
from .. import world
from . import misc
__all__ = 'MapDisplay',
class MapDisplay(tk.Toplevel):
'''
Map window
Instance variables:
identifier: map name
spec: map properties
scale: map size
buttons: item location buttons
links: linked maps
tracker: location tracker
available: location availability
parent: overworld map display (if applicable)
childmaps: dungeon specific maps tied to this overworld map
'''
def __init__(self, spec: str, tracker: world.LocationTracker,
parent = None):
'''
Args:
spec: type of map to be crated
tracker: location tracker
parent: optional parent window
'''
# General initialisation
super().__init__()
self.identifier = spec
self.spec = MAPSPEC[spec]
self.scale = CONFIG['map_size']
self.title(self.spec['title'])
self.parent = parent
# Set up bottom text label.
self.helpertext = tk.StringVar()
self.helper = ttk.Label(
self, textvariable=self.helpertext,
font=('Arial', int(12 * self.scale)))
self.helper.grid(column=0, row=1, sticky=tk.S)
# Set map type.
if (
self.identifier.endswith('_child') or
self.identifier.endswith('_adult')):
mapimage = 'overworld'
else:
mapimage = DUNGEONLOCATIONS[self.identifier]['mapimg']
# Set background image.
imagefile = tk.PhotoImage(file=image(mapimage)[0], master=self)
imgdim = (
imagefile.width() * MAPSCALE * self.scale * self.spec['mapscale'],
imagefile.height() * MAPSCALE * self.scale * self.spec['mapscale'])
self.m = tk.Canvas(self, height=imgdim[1], width=imgdim[0])
self.m.grid(column=0, row=0, sticky=misc.A)
scaling = MAPSCALE * self.scale * self.spec['mapscale']
for up in range(1, 1000):
if not (scaling * up) % 1:
upscale = int(scaling * up)
break
else:
CONFIG.set('map_size', 1)
assert False
downscale = int(upscale // scaling)
if upscale != 1:
imagefile = imagefile.zoom(upscale, upscale)
if downscale != 1:
imagefile = imagefile.subsample(downscale, downscale)
self.image = self.m.create_image((0, 0), anchor=tk.NW, image=imagefile)
self.imagefile = imagefile
# Set-up location tracker.
self.tracker = tracker
self.available = {}
# Place buttons.
self.buttons = {}
for b in ITEMLOCATIONS:
button = ITEMLOCATIONS[b]
for m in button['maps']: assert m in MAPSPEC
if 'restriction' in button:
if ((button['restriction'] == 'scrubshuffle'
and not CONFIG['show_scrubs']) or
(button['restriction'] == 'shopsanity'
and not CONFIG['show_shops'])):
continue
if spec in button['maps']:
coord = list(
int(c * MAPSCALE * self.scale * self.spec['mapscale'])
for c in button['coordinates'])
coord.reverse()
self.add_button(b, coord, button['type'])
for b in SKULLTULALOCATIONS:
button = SKULLTULALOCATIONS[b]
for m in button['maps']:
assert m in MAPSPEC
if spec in button['maps']:
coord = list(
int(c * MAPSCALE * self.scale * self.spec['mapscale'])
for c in button['coordinates'])
coord.reverse()
self.add_button(b, coord, button['type'])
for b in DUNGEONLOCATIONS:
button = DUNGEONLOCATIONS[b]
if 'maps' not in button:
continue
for m in button['maps']: assert m in MAPSPEC
if 'restriction' in button:
if ((button['restriction' ]== 'scrubshuffle'
and not CONFIG['show_scrubs']) or
(button['restriction'] == 'shopsanity'
and not CONFIG['show_shops'])):
continue
if spec in button['maps']:
coord = list(
int(c * MAPSCALE * self.scale * self.spec['mapscale'])
for c in button['location'])
coord.reverse()
self.add_button(b, coord, 'dungeon')
# Restore latest button states.
self._restore_autosave()
self.update_buttons()
# Prepare for linked maps.
self.links = {}
self.childmaps = {}
# Register this window with location tracker.
self.tracker.register_gui(self)
def _update_availability(self) -> None:
'''
Update availability database.
Writes:
available
'''
if self.spec['loctype'] == 'dungeon':
self.available = self.tracker.check_availability('item')
self.available.update(self.tracker.check_availability('skulltula'))
else:
self.available = self.tracker.check_availability(
self.spec['loctype'])
def _set_colour(self, button: str, colour: str, display) -> None:
'''
Set button colour.
Args:
button: button name
colour: colour scheme
display: map display object
'''
buttontype = display.buttons[button]['type']
if buttontype == 'dungeon':
if self.identifier.startswith('skulls_'):
buttontype = 'spider'
display.m.itemconfigure(
display.buttons[button]['id'],
activefill=BUTTONTYPE[buttontype]['colours'][colour]['active'],
fill=BUTTONTYPE[buttontype]['colours'][colour]['normal'],
outline='black', width=1)
def add_button(
self, name: str, location: typing.Sequence[int],
buttontype: str) -> None:
'''
Add a button to map.
Args:
name: identifier for new button
location: coordinates for centre of button
buttontype: type of button
'''
if buttontype not in BUTTONTYPE:
buttontype = 'standard'
new = BUTTONTYPE[buttontype]['shape'](self, location)
self.buttons[name] = {
'id': new, 'type': buttontype, 'state': True, 'links': set()}
self._set_colour(name, 'on', self)
if buttontype == 'dungeon':
self.m.tag_bind(
new, '<ButtonRelease-1>', lambda _: self._click_dungeon(name))
else:
self.m.tag_bind(
new, '<ButtonRelease-1>', lambda _: self._click_button(name))
self.m.tag_bind(
new, '<ButtonRelease-1>', self.autosave, add='+')
if self.parent is not None:
self.m.tag_bind(
new, '<ButtonRelease-1>', self.parent.update_buttons,
add='+')
self.m.tag_bind(
new, '<Enter>', lambda _: self.helpertext.set(name))
self.m.tag_bind(
new, '<Leave>', lambda _: self.helpertext.set(''))
def _click_button(self, name: str) -> None:
'''
Event on clicking a button.
Args:
name: name of the clicked-on button
'''
self._switch_button(name, self)
for links in self.buttons[name]['links']:
self.links[links]._switch_button(name, self.links[links])
def _click_dungeon(self, name: str) -> None:
'''
Event on clicking a dungeon button.
Args:
name: name of the clicked-on button
'''
try:
self.childmaps[name].deiconify()
except (KeyError, tk.TclError):
self.childmaps[name] = MapDisplay(name, self.tracker, self)
def _switch_button(self, name: str, mapdisplay):
'''
Switch button state.
Args:
name: name of the button to be switched
mapdisplay: map button belongs to
'''
colours = BUTTONTYPE[self.buttons[name]['type']]['colours']
new = not mapdisplay.buttons[name]['state']
if new:
if self.buttons[name]['type'] == 'dungeon':
assert False # Never should be here.
else:
nc = self.available[name]
nc = 'on' if nc else 'unavailable'
if 'adult' in self.identifier and not self.tracker.is_adult():
nc = 'unavailable'
self._set_colour(name, nc, mapdisplay)
else:
self._set_colour(name, 'off', mapdisplay)
mapdisplay.buttons[name]['state'] = new
def link_buttons(self, other_map) -> None:
'''
Link buttons with buttons from different map.
Linking is one-way. Tha means that to create a two-way link, each object
needs to run this method.
Args:
other_map: map object to link with
'''
self.links[other_map.identifier] = other_map
for button in self.buttons:
if button in other_map.buttons:
self.buttons[button]['links'].add(other_map.identifier)
def update_buttons(self, args = None) -> None:
'''
Update availability of locations.
'''
mapping = {True: 'on', False: 'unavailable'}
self._update_availability()
for button in self.buttons:
if self.buttons[button]['state']:
if self.buttons[button]['type'] == 'dungeon':
nc = self.update_dungeon(button)
else:
nc = self.available[button]
nc = 'on' if nc else 'unavailable'
if 'adult' in self.identifier and not self.tracker.is_adult():
nc = 'unavailable'
try:
self._set_colour(button, nc, self)
except tk.TclError as err:
raise world.DisplayGone() from err
def update_dungeon(self, dungeonbutton: dict) -> str:
'''
Check supposed display state of dungeon button.
Args:
dungeonbutton: dungeon location
Returns:
str: 'off', 'on', 'partial' or 'unavailable'
'''
buttonchildren = self.tracker.dungeon_locations(dungeonbutton)
buttonchildren = (
buttonchildren[0] if self.identifier.startswith('item_')
else buttonchildren[1])
try:
childbuttons = self._load_autosave(dungeonbutton)
except KeyError:
childbuttons = {}
for child in buttonchildren:
childbuttons[child] = True
finally:
children = []
for child in buttonchildren:
try:
if childbuttons[child]:
children.append(self.available[child])
except KeyError:
pass
if not children:
nc = 'off'
elif all(children):
nc = 'on'
elif any(children):
fullclear = self.tracker.dungeon_availability(
dungeonbutton,
('item' if self.identifier.startswith('item_')
else 'skulltula'))
nc = 'on' if fullclear else 'partial'
else:
nc = 'unavailable'
return nc
def reset(self) -> None:
'''
Reset location tracker.
'''
for button in self.buttons:
self.buttons[button]['state'] = True
self.tracker.reset()
self.update_buttons()
def store(self):
'''
Return location states for storage.
Returns:
dict: list of item locations and their state
'''
states = {}
for button in self.buttons:
states[button] = self.buttons[button]['state']
return states
def restore(self, states: dict) -> None:
'''
Restore location states from storage.
Args:
states: list of item locations and their state
'''
for button in states:
try:
self.buttons[button]['state'] = states[button]
except KeyError:
continue
self._set_colour(button, 'on' if states[button] else 'off', self)
def autosave(self, args, linked: bool = True) -> None:
'''
Autosave location states.
Args:
link: if True, also store linked map
'''
storage.autosave('Maps,{0:s}'.format(self.identifier), self)
if linked:
for link in self.links:
self.links[link].autosave(None, False)
def _restore_autosave(self) -> None:
'''
Restore autosave location states.
'''
try:
self.restore(self._load_autosave(self.identifier))
except KeyError:
self.restore({})
def _load_autosave(self, mapid: str) -> dict:
'''
Load autosave file.
Args:
mapid: map identifier
Returns:
dict: autsave file contents
'''
return storage.load_save()['Maps,{0:s}'.format(mapid)]
def _standard_icon(self, location: typing.Sequence[int]) -> int:
'''Rectangular symbol'''
shape = -20, -20, 20, 20
loc = _make_symbol_coordinates(location, shape, self.spec['mapscale'])
new = self.m.create_rectangle(*loc)
return new
def _enemy_icon(self, location: typing.Sequence[int]) -> int:
'''Enemy symbol'''
shape = -20, 0, 10, 15, 20, 0, 10, -15
loc = _make_symbol_coordinates(location, shape, self.spec['mapscale'])
new = self.m.create_polygon(*loc)
return new
def _npc_icon(self, location: typing.Sequence[int]) -> int:
'''NPC symbol'''
shape = -20, 20, 0, -20, 20, 20
loc = _make_symbol_coordinates(location, shape, self.spec['mapscale'])
new = self.m.create_polygon(*loc)
return new
def _shop_icon(self, location: typing.Sequence[int]) -> int:
'''Shop symbol'''
shape = -15, -20, 15, 20
loc = _make_symbol_coordinates(location, shape, self.spec['mapscale'])
new = self.m.create_oval(*loc)
return new
def _song_icon(self, location: typing.Sequence[int]) -> int:
'''Song symbol'''
shape = -20, -15, 20, 15
loc = _make_symbol_coordinates(location, shape, self.spec['mapscale'])
new = self.m.create_oval(*loc)
return new
def _free_icon(self, location: typing.Sequence[int]) -> int:
'''Free symbol'''
shape = 0, -20, 20, 0, 0, 20, -20, 0
loc = _make_symbol_coordinates(location, shape, self.spec['mapscale'])
new = self.m.create_polygon(*loc)
return new
def _heart_icon(self, location: typing.Sequence[int]) -> int:
'''Heart symbol'''
shape = (0, -10, 15, -20, 20, -15, 10, 15,
0, 20, -10, 15, -20, -15, -15, -20, 0, -10)
loc = _make_symbol_coordinates(location, shape, self.spec['mapscale'])
new = self.m.create_polygon(*loc, smooth=1)
return new
def _fairy_icon(self, location: typing.Sequence[int]) -> int:
'''Fairy symbol'''
shape = (0, -20, 10, -20, 20, 10, 20, 20, 10, 20, 0, 10,
-10, 20, -20, 20, -20, 10, -10, -20)
loc = _make_symbol_coordinates(location, shape, self.spec['mapscale'])
new = self.m.create_polygon(*loc, smooth=1)
return new
def _sub_icon(self, location: typing.Sequence[int]) -> int:
'''Subterranean symbol'''
shape = -20, -20, 0, 20, 20, -20
loc = _make_symbol_coordinates(location, shape, self.spec['mapscale'])
new = self.m.create_polygon(*loc)
return new
def _stone_icon(self, location: typing.Sequence[int]) -> int:
'''Gossip stone symbol'''
shape = 0, -20, 20, -10, 0, 20, -20, -10
loc = _make_symbol_coordinates(location, shape, self.spec['mapscale'])
new = self.m.create_polygon(*loc)
return new
def _dungeon_icon(self, location: typing.Sequence[int]) -> int:
'''Dungeon symbol'''
shape = -30, -30, 30, 30
loc = _make_symbol_coordinates(location, shape, self.spec['mapscale'])
new = self.m.create_rectangle(*loc)
return new
def _ganon_icon(self, location: typing.Sequence[int]) -> int:
'''Go-mode symbol'''
shape = (0, -40, 9, -10, 40, -10, 15, 9, 24, 40, 0, 21,
-24, 40, -15, 9, -40, -10, -9, -10)
loc = _make_symbol_coordinates(location, shape, self.spec['mapscale'])
new = self.m.create_polygon(*loc)
return new
def _night_icon(self, location: typing.Sequence[int]) -> int:
'''Night skulltula symbol'''
shape = -20, -20, -10, -20, 20, 0, -10, 20, -20, 20
loc = _make_symbol_coordinates(location, shape, self.spec['mapscale'])
new = self.m.create_polygon(*loc)
return new
def _high_icon(self, location: typing.Sequence[int]) -> int:
'''High skulltula symbol'''
shape = 0, -20, 20, 10, 20, 20, -20, 20, -20, 10
loc = _make_symbol_coordinates(location, shape, self.spec['mapscale'])
new = self.m.create_polygon(*loc)
return new
def _bean_icon(self, location: typing.Sequence[int]) -> int:
'''Bean skulltula symbol'''
shape = -20, -20, -20, -10, 0, 20, 20, -10, 20, -20
loc = _make_symbol_coordinates(location, shape, self.spec['mapscale'])
new = self.m.create_polygon(*loc)
return new
def _tree_icon(self, location: typing.Sequence[int]) -> int:
'''Bean patch symbol'''
shape = 20, -20, 10, -20, -20, 0, 10, 20, 20, 20
loc = _make_symbol_coordinates(location, shape, self.spec['mapscale'])
new = self.m.create_polygon(*loc)
return new
def _make_symbol_coordinates(
location: typing.Sequence[int], shape: typing.Sequence[int],
mapscale: float) -> list:
'''
Create corner points for map symbol.
Args:
location: centre of symbol
shape: symbol corners relative to centre point
mapscale: additional map scaling factor
Returns:
list: flat list of coordinates for symbol creation
'''
loc = list(location[:2]) * (len(shape) // 2)
scaled = tuple(int(c * MAPSCALE * CONFIG.get('map_size')) for c in shape)
loc = [loc[l] + scaled[l] for l in range(len(scaled))]
return loc | z5-tracker | /z5_tracker-1.2.1-py3-none-any.whl/z5tracker/gui-tkinter/maps.py | maps.py |
import operator
import tkinter as tk
import tkinter.ttk as ttk
import typing
from ..config import CONFIG
from . import misc
__all__ = 'ConfigWindow',
class ConfigWindow(tk.Toplevel):
'''
Config option window.
'''
def __init__(self):
super().__init__()
self.title('Configuration')
self.configvalues = {}
self.frame = ttk.Frame(self)
self.frame.grid(sticky=misc.A)
self._display(1,'Autosave', CONFIG['autosave'])
self._float(2, 'Icon size', 'icon_size')
self._display(3, 'Item layout', CONFIG['layout'])
self._float(4, 'Map size', 'map_size')
self._display(5, 'Ruleset', CONFIG['ruleset'])
self._string(6, 'Rules string', 'rule_string', _validate_rule_string)
self._checkbox(7, 'Show disabled locations', 'show_disabled')
self._checkbox(8, 'Show all scrubs', 'show_scrubs')
self._checkbox(9, 'Show shops', 'show_shops')
self._display(10, 'Window Layout', CONFIG['window_layout'])
def _display(self, row: int, name: str, value: str) -> None:
'''
Non-editable string display
Args:
row: row placement
name: name of option
value: current value of option
'''
left = ttk.Label(self.frame, text=name)
left.grid(column=0, padx=6, pady=3, row=row, sticky=tk.W)
right = ttk.Entry(self.frame)
right.insert(0, value)
right.configure(state='readonly')
right.grid(column=1, padx=6, pady=3, row=row, sticky=tk.W+tk.E)
def _string(self, row: int, name: str, configname: str,
validation: typing.Callable[[str], bool]) -> None:
'''
Editable string display
Args:
row: row placement
name: displayed name of option
configname: name of option in config file
validation: function used to valied user-entered content
'''
left = ttk.Label(self.frame, text=name)
left.grid(column=0, padx=6, pady=3, row=row, sticky=tk.W)
self.configvalues[configname] = tk.StringVar()
self.configvalues[configname].set(CONFIG[configname].upper())
validater = (self.register(validation), '%P')
right = ttk.Entry(
self.frame, validate='all', validatecommand=validater,
textvariable=self.configvalues[configname], width=40)
right.grid(column=1, padx=6, pady=3, row=row, sticky=tk.W+tk.E)
def _float(self, row: int, name: str, configname: str) -> None:
'''
Editable float display
Args:
row: row placement
name: displayed name of option
configname: name of option in config file
'''
left = ttk.Label(self.frame, text=name)
left.grid(column=0, padx=6, pady=3, row=row, sticky=tk.W)
self.configvalues[configname] = tk.StringVar()
self.configvalues[configname].set(str(CONFIG[configname]))
validater = (self.register(_validate_float), configname, '%P')
right = ttk.Entry(
self.frame, validate='all', validatecommand=validater,
textvariable=self.configvalues[configname], width=40)
right.grid(column=1, padx=6, pady=3, row=row, sticky=tk.W+tk.E)
def _checkbox(self, row: int, name: str, configname: str) -> None:
'''
Checkbox
Args:
row: row placement
name: displayed name of option
configname: name of option in config file
'''
left = ttk.Label(self.frame, text=name)
left.grid(column=0, padx=6, pady=3, row=row, sticky=tk.W)
self.configvalues[configname] = tk.IntVar()
try:
self.configvalues[configname].set(int(CONFIG[configname]))
except KeyError:
self.configvalues[configname].set(0)
right = ttk.Checkbutton(
self.frame,
command=lambda: CONFIG.set(
configname, bool(self.configvalues[configname].get())),
takefocus=False, variable=self.configvalues[configname])
right.grid(column=1, padx=6, pady=3, row=row, sticky=tk.W+tk.E)
def _validate_rule_string(newstring: str) -> True:
'''
Validate randomiser rules string and store if string is valid.
Args:
newstring: new config string
Returns:
True: always
'''
if newstring.isalnum():
CONFIG.set('rule_string', newstring)
return True
def _validate_float(configname: str, newfloat: str) -> bool:
'''
Validate randomiser rules string and store if string is valid float.
Args:
configname: name of config entry
newstring: new config string
Returns:
True: always
'''
try:
float(newfloat)
except ValueError:
return False
CONFIG.set(configname, float(newfloat))
return True | z5-tracker | /z5_tracker-1.2.1-py3-none-any.whl/z5tracker/gui-tkinter/config.py | config.py |
import tkinter as tk
import tkinter.ttk as ttk
import typing
from ..config import CONFIG
from ..config import layout as storage
from .. import items
from . import misc
__all__ = 'ItemWindow',
class ItemWindow(tk.Toplevel):
'''
Inventory item display.
Instance variables:
tracker: item tracker object
layout: item layout in display
helpertext: helper text variable
scaling: scaling factor of individual buttons
'''
def __init__(self, tracker: items.ItemTracker):
'''
Args:
tracker: item tracker object
'''
super().__init__()
self.title('Items')
self.protocol('WM_DELETE_WINDOW', self.destroy)
self.tracker = tracker
self.frame = ttk.Frame(self)
self.frame.grid(column=0, row=0, sticky=misc.A)
self.helpertext = tk.StringVar()
self.helper = ttk.Label(
self, textvariable=self.helpertext,
font=('Arial', int(12 * CONFIG['icon_size'])))
self.helper.grid(column=0, row=1, sticky=tk.S)
self.scaling = _scale_factors()
for item in self.tracker:
if (
self.tracker[item].location and
self.tracker[item].displayname and
self.tracker[item].icon):
self._item_display(self.tracker[item])
def _item_display(self, item: items.iobj) -> None:
'''
Make and place single item display object.
Args:
item: item object
Writes:
buttons
'''
button = ItemButton(item, self.frame, self.scaling)
button.bind(
'<Enter>', lambda _: self.helpertext.set(item.display()))
button.bind(
'<Leave>', lambda _: self.helpertext.set(''))
button.bind('<ButtonRelease-1>', item.increase)
button.bind('<ButtonRelease-3>', item.decrease)
button.bind(
'<ButtonRelease-1>',
lambda _: self.helpertext.set(item.display()),
add='+')
button.bind(
'<ButtonRelease-3>',
lambda _: self.helpertext.set(item.display()),
add='+')
button.bind(
'<ButtonRelease-1>',
lambda _: storage.autosave('Items', self.tracker),
add='+')
button.bind(
'<ButtonRelease-3>',
lambda _: storage.autosave('Items', self.tracker),
add='+')
button.grid(column=item.location[0], row=item.location[1])
item.register_button(button)
def reset(self) -> None:
'''
Reset items to default.
'''
self.tracker.reset()
class ItemButton(tk.Canvas):
'''
Single item display object.
'''
def __init__(
self, item: items.iobj, parent: ttk.Frame, scaling: (int, int)):
'''
item: item object
parent: parent widget for object
scaling: button up- and downscale
'''
super().__init__(
parent, height=32 * scaling[0] / scaling[1],
width=32 * scaling[0] / scaling[1])
icon = tk.PhotoImage(file=item.icon[0][0], master=parent)
if scaling[0] != 1:
icon = icon.zoom(scaling[0], scaling[0])
if scaling[1] != 1:
icon = icon.subsample(scaling[1], scaling[1])
self.scaling = scaling
self.img = self.create_image(0, 0, anchor=tk.NW, image=icon)
self.icon = icon
self.check_state(item)
def check_state(self, item: items.iobj) -> None:
'''
Check button state and make adjustments if necessary.
Args:
item: item object
'''
self.delete(self.img)
icon = tk.PhotoImage(
file=item.icon[item.index()][0], master=self.master)
if self.scaling[0] != 1:
icon = icon.zoom(self.scaling[0], self.scaling[0])
if self.scaling[1] != 1:
icon = icon.subsample(self.scaling[1], self.scaling[1])
if not item.state():
for x in range(icon.width()):
for y in range(icon.height()):
bw = sum(icon.get(x, y)) // 3
if bw in (0, 255):
continue
bw *= 2
bw = 255 if bw > 255 else int(bw)
icon.put('#{0:02x}{0:02x}{0:02x}'.format(bw), (x, y))
self.img = self.create_image(0, 0, anchor=tk.NW, image=icon)
self.icon = icon
def _scale_factors() -> (int, int):
'''
Calculate up- and downscale factor.
Returns:
int: upscale factor
int: downscale factor
'''
scaling = CONFIG['icon_size']
for up in range(1, 1000):
if not (scaling * up) % 1:
upscale = int(scaling * up)
break
else:
CONFIG.set('icon_size', 1)
assert False
downscale = int(upscale // scaling)
return upscale, downscale | z5-tracker | /z5_tracker-1.2.1-py3-none-any.whl/z5tracker/gui-tkinter/items.py | items.py |
import tkinter as tk
import tkinter.ttk as ttk
import typing
from . import misc
__all__ = 'MenuWindow',
class MenuWindow(tk.Toplevel):
'''
Main control window.
'''
def __init__(self, commands: typing.Dict[str, typing.Callable[[], None]]):
'''
Args:
commands: set of functions for the buttons
'''
super().__init__()
self.title('Menu')
self.frame = ttk.Frame(self)
self.frame.grid(sticky=misc.A)
self.buttons = {
'items': self._make_button((0, 0), commands['items'], 'Items'),
'dungeons': self._make_button(
(1, 0), commands['dungeons'], 'Dungeons'),
'itemmap': self._make_button(
(0, 1), commands['itemmap'], 'Item Map'),
'skullmap': self._make_button(
(1, 1), commands['skullmap'], 'Skulltula Map'),
'hints': self._make_button((1, 2), commands['hints'], 'Hints'),
'config': self._make_button((0, 3), commands['config'], 'Options'),
'load': self._make_button((0, 4), commands['load'], 'Load'),
'save': self._make_button((1, 4), commands['save'], 'Save'),
'quit': self._make_button((0, 5), commands['quit'], 'Quit'),
'reset': self._make_button((1, 5), commands['itemreset'], 'Reset'),
}
def _make_button(
self, loc: typing.Sequence[int],
action: typing.Callable[[], None],
text: str or tk.StringVar) -> ttk.Button:
'''
Shortcut to place buttons.
Args:
loc: (column, row) of button on 2D grid
action: function to call when button is pressed
Returns:
ttk.Button: created button
'''
button = ttk.Button(self.frame, command=action)
if isinstance(text, tk.StringVar):
button.configure(textvariable=text)
else:
assert isinstance(text, str)
button.configure(text=text)
button.grid(column=loc[0], row=loc[1], sticky=tk.N+tk.W+tk.E)
return button | z5-tracker | /z5_tracker-1.2.1-py3-none-any.whl/z5tracker/gui-tkinter/menu.py | menu.py |
import operator
import tkinter as tk
import tkinter.ttk as ttk
import typing
from ..config import CONFIG
from ..config import layout as storage
from .. import hints
from .. import rulesets
from . import misc
__all__ = 'HintDisplay',
def _font(size: str = 'normal') -> tuple:
'''
Return scaled font.
Returns:
tuple: tkinter font variable
'''
if size == 'small':
return ('Arial', int(8 * CONFIG['icon_size']))
return ('Arial', int(12 * CONFIG['icon_size']))
class HintDisplay(tk.Toplevel):
'''
Hints window
Instance variables:
rule: item location rules
regions: list of regions in the game
items: list of items
locations: list of potential hint locations
dungeon: name of dungeons
obligatory: list of locations with guaranteed hint
loc_fields: location hint entries
woth_fields: way of the hero entries
'''
def __init__(self):
super().__init__()
self.title('Hints')
self.rules = rulesets.Ruleset()
regions = [
[r, r.replace('outside ', '').replace('the ', '')]
for r in self.rules.list_regions() if isinstance(r, str)]
regions.sort(key=operator.itemgetter(1))
self.regions = [r[0] for r in regions]
self.regions.insert(0, '')
allitems = self.rules.get_hint_items('all')
progression = list(hints.HINTITEMS)
progression.sort()
items = [allitems[itm][1].replace('the ', '') for itm in progression]
items.insert(0, '<unimportant>')
items.insert(0, '')
self.items = items
locations = self.rules.get_hint_items('location')
locations.sort(key=operator.attrgetter('name'))
self.locations = [loc.name for loc in locations]
self.locations.insert(0, '<location>')
dungeons = self.rules.get_hint_items('dungeon')
dungeons.sort(key=operator.attrgetter('name'))
self.dungeons = [allitems[dgn.name][1] for dgn in dungeons]
self.dungeons.insert(0, '<dungeon>')
obligatory = self.rules.get_hint_items('alwaysLocation')
obligatory.sort(key=operator.attrgetter('name'))
self.obligatory = obligatory
style = ttk.Style()
style.configure('hintdisplay.TButton', font=_font(), width=2)
style.map(
'hintdisplay.TButton', background=[('alternate', 'green')])
style.configure('hintdisplay.TMenubutton', font=_font())
style.map(
'hintdisplay.TMenubutton', foreground=[('disabled', 'grey')])
style.configure('hintdisplay.const.TMenubutton', font=_font())
style.map(
'hintdisplay.const.TMenubutton',
foreground=[('disabled', '!alternate', 'black'),
('disabled', 'alternate', 'grey')])
style.configure('hintdisplay.TLabel', font=_font())
style.map(
'hintdisplay.TButton', foreground=[('disabled', 'grey')])
self.topframe = ttk.Frame(self)
self.topframe.grid(sticky=misc.A)
self.wothframe = ttk.Frame(self.topframe)
self.wothframe.grid(column=0, row=0, sticky=tk.N + tk.W)
self._build_woth()
self.locframe = ttk.Frame(self.topframe)
self.locframe.grid(column=0, row=1, sticky=tk.S + tk.E + tk.W)
self._build_loc()
self.restore()
self._set_autosave()
def _build_woth(self) -> None:
'''
Make 'Way of the Hero' GUI.
'''
self.wothlabel = ttk.Label(
self.wothframe, font=_font(), text='Way of the Hero')
self.wothlabel.grid(column=0, row=0, sticky=tk.N)
self.woth_fields = {}
for row in range(1, 5):
self.woth_fields[row] = WothEntry(
row, self.wothframe, self._autosave, self.regions)
def _build_loc(self) -> None:
'''
Make generic location hint GUI.
'''
self.loclabel = ttk.Label(
self.locframe, font=_font(), text='Location hints')
self.loclabel.grid(column=0, row=0, sticky=tk.N)
self.loc_fields = {}
for row in range(1, len(self.obligatory) + 1):
self.loc_fields[row] = LocEntry(
row, self.locframe, self._autosave, self.items, self.regions,
self.obligatory[row - 1].name)
for row in range(len(self.obligatory) + 1, 13):
self.loc_fields[row] = LocEntry(
row, self.locframe, self._autosave, self.items, self.locations)
for row in range(13, 17):
self.loc_fields[row] = LocEntry(
row, self.locframe, self._autosave, self.items, self.dungeons)
for row in range(17, 34):
self.loc_fields[row] = LocEntry(
row, self.locframe, self._autosave, self.items, self.regions)
def restore(self) -> None:
'''
Restore data from save.
'''
try:
data = storage.load_save()['Hints']
except KeyError:
return
if not data:
return
for row in range(1, 5):
self.woth_fields[row].variable.set(data['woth'][row - 1][1])
if data['woth'][row - 1][0]:
self.woth_fields[row].okpress()
for row in range(1, len(self.obligatory) + 1):
self.loc_fields[row].itemvar.set(data['loc'][row - 1][1])
for row in range(len(self.obligatory) + 1, 34):
try:
self.loc_fields[row].itemvar.set(data['loc'][row - 1][1])
self.loc_fields[row].locvar.set(data['loc'][row - 1][2])
except IndexError:
break
for row in range(1, 34):
try:
if data['loc'][row - 1][0]:
self.loc_fields[row].okpress()
except IndexError:
break
self._autosave()
def entry_data(self) -> dict:
'''
Return all currently set data.
Returns:
dict: {woth: list of tuples, loc: list of tuples}
'''
ret = {}
ret['woth'] = []
for row in range(1, 5):
ret['woth'].append(
(self.woth_fields[row].statetrack.get(),
self.woth_fields[row].variable.get()))
ret['loc'] = []
for row in range(1, 34):
ret['loc'].append(
(self.loc_fields[row].statetrack.get(),
self.loc_fields[row].itemvar.get(),
self.loc_fields[row].locvar.get()))
return ret
def _autosave(self, *args) -> None:
'''
Autosave
'''
storage.autosave('Hints', hints.HintTracker(self.entry_data()))
def _set_autosave(self):
'''
Establish autosaving.
'''
for row in range(1, 5):
self.woth_fields[row].variable.trace('w', self._autosave)
for row in range(1, 34):
self.loc_fields[row].itemvar.trace('w', self._autosave)
self.loc_fields[row].locvar.trace('w', self._autosave)
def reset(self):
'''
Reset to default state.
'''
for row in range(1, 5):
self.woth_fields[row].delpress()
for row in range(1, 34):
self.loc_fields[row].delpress()
class LocEntry(ttk.Frame):
'''
Entry in location hint GUI.
Instance variables:
autosave: function for autosaving
default: default location string
itemvar: currently chosen item
itemmenu: item menu object
locvar: currently chosen location
locmenu: location menu object
obligatory: True of this hint is a guaranteed location
'''
def __init__(self, row: int, parent: ttk.Frame, autosave,
items: typing.Sequence, regions: typing.Sequence,
obligatory: str = None,):
'''
Args:
row: row placement in parent widget
parent: parent widget
items: list of possible items with hints
regions: list of location names pointed at by generic hints
obligatory: if set, this entry will be a guaranteed hint
'''
super().__init__(parent)
itemvar = tk.StringVar()
itemmenu = ttk.OptionMenu(
self, itemvar, *items, style='hintdisplay.TMenubutton')
itemmenu.config(width=18)
itemmenu.nametowidget(itemmenu.cget('menu')).config(font=_font('small'))
itemmenu.grid(column=0, row=0, sticky=misc.A)
locvar = tk.StringVar()
if not obligatory:
locmenu = ttk.OptionMenu(
self, locvar, *regions, style='hintdisplay.TMenubutton')
locmenu.config(width=22)
locmenu.nametowidget(locmenu.cget('menu')).config(font=_font())
else:
locmenu = ttk.OptionMenu(
self, locvar, style='hintdisplay.const.TMenubutton')
locmenu.config(width=22)
locmenu.state(('disabled',))
locvar.set(obligatory)
locmenu.grid(column=1, row=0, sticky=misc.A)
okbutton = ttk.Button(
self, command=self.okpress, text='✔', style='hintdisplay.TButton')
okbutton.grid(column=2, row=0)
statetrack = tk.BooleanVar()
statetrack.set(False)
delbutton = ttk.Button(
self, command=self.delpress, text='❌',
style='hintdisplay.TButton')
delbutton.grid(column=3, row=0)
self.grid(column=0, row=row, sticky=tk.E + tk.W)
self.autosave = autosave
self.default = regions[0]
self.itemvar = itemvar
self.itemmenu = itemmenu
self.locvar = locvar
self.locmenu = locmenu
self.obligatory = bool(obligatory)
self.okbutton = okbutton
self.statetrack = statetrack
self.delbutton = delbutton
def okpress(self, force_enable: bool = False) -> None:
'''
OK button press.
'''
if self.itemmenu.instate(('disabled',)) or force_enable:
self.itemmenu.state(('!disabled',))
if self.obligatory:
self.locmenu.state(('!alternate',))
else:
self.locmenu.state(('!disabled',))
self.okbutton.state(('!alternate',))
self.statetrack.set(False)
else:
self.itemmenu.state(('disabled',))
if self.obligatory:
self.locmenu.state(('alternate',))
else:
self.locmenu.state(('disabled',))
self.okbutton.state(('alternate',))
self.statetrack.set(True)
self.autosave()
def delpress(self) -> None:
'''
Reset button press.
'''
self.itemvar.set('')
if not self.obligatory:
self.locvar.set(self.default)
self.okpress(True)
class WothEntry(ttk.Frame):
'''
Entry in 'Way of the Hero' GUI.
Instance variables:
autosave: function for autosaving
variable: currently chosen region
menu: menu object
okbutton: mark button
delbutton: reset button
'''
def __init__(self, row: int, parent: ttk.Frame, autosave,
regions: typing.Sequence):
'''
Args:
row: row placement in parent widget
parent: parent widget
regions: list of region names
autosave: autosave function
'''
super().__init__(parent)
variable = tk.StringVar()
menu = ttk.OptionMenu(
self, variable, *regions, style='hintdisplay.TMenubutton')
menu.config(width=20)
menu.nametowidget(menu.cget('menu')).config(font=_font())
menu.grid(column=0, row=0, sticky=misc.A)
okbutton = ttk.Button(
self, command=self.okpress, text='✔', style='hintdisplay.TButton')
okbutton.grid(column=1, row=0)
statetrack = tk.BooleanVar()
statetrack.set(False)
delbutton = ttk.Button(
self, command=self.delpress, text='❌',
style='hintdisplay.TButton')
delbutton.grid(column=2, row=0)
self.grid(column=0, row=row, sticky=tk.E + tk.W)
self.autosave = autosave
self.variable = variable
self.menu = menu
self.okbutton = okbutton
self.statetrack = statetrack
self.delbutton = delbutton
def okpress(self, force_enable: bool = False) -> None:
'''
OK button press.
'''
if self.menu.instate(('disabled',)) or force_enable:
self.menu.state(('!disabled',))
self.okbutton.state(('!alternate',))
self.statetrack.set(False)
else:
self.menu.state(('disabled',))
self.okbutton.state(('alternate',))
self.statetrack.set(True)
self.autosave()
def delpress(self) -> None:
'''
Reset button press.
'''
self.variable.set('')
self.okpress(True) | z5-tracker | /z5_tracker-1.2.1-py3-none-any.whl/z5tracker/gui-tkinter/hints.py | hints.py |
import re
import threading
import tkinter as tk
import tkinter.filedialog as filedialog
import tkinter.messagebox as tkmbox
import tkinter.ttk as ttk
import typing
from ..config import layout as storage
from ..config import windows as window_layout
from .. import dungeons as dungeon_tracker
from .. import items as item_tracker
from .. import world as location_tracker
from . import config
from . import dungeons as dungeon_gui
from . import hints
from . import items as item_gui
from . import maps
from . import menu
__all__ = 'GraphicalInterface',
class GraphicalInterface(object):
'''
Main access point for everything GUI related.
Instance variables:
gui_root: tkinter framework root object
gui_style: tkinter style class
gui_app: tkinter application object
commands: methods used to create new windows
windows: collection of child windows
item_tracker: inventory item tracker
location tracker: item location tracker
restart: indicator for required restart
'''
def __init__(self):
'''
Initialise GUI.
'''
self.gui_root = tk.Tk()
self.gui_root.call('tk', 'scaling', 1)
self.gui_style = ttk.Style()
# self.gui_style.theme_use('clam')
self.gui_app = ttk.Frame(self.gui_root)
self.gui_root.withdraw()
self.restart = threading.Event()
self.location_tracker = location_tracker.LocationTracker()
self.item_tracker = item_tracker.ItemTracker(self.location_tracker)
self.commands = {
'items': self._open_items,
'itemreset': self._reset_items,
'itemmap': self._open_itemmap,
'itemmap_c': self._open_itemmap,
'itemmap_a': self._open_itemmap,
'skullmap': self._open_skullmap,
'skullmap_c': self._open_skullmap,
'skullmap_a': self._open_skullmap,
'dungeons': self._open_dungeons,
'hints': self._open_hints,
'config': self._open_config,
'load': self._load,
'save': self._save,
'quit': self.quit,
}
self.windows = {
'menu': menu.MenuWindow(commands=self.commands),
'items': None,
'itemmap_c': None,
'itemmap_a': None,
'skullmap_c': None,
'skullmap_a': None,
'dungeons': None,
'config': None,
'hints': None,
}
self._restore_windows()
self._prepare_windows()
self.windows['menu'].protocol('WM_DELETE_WINDOW', self.quit)
def _restore_windows(self) -> None:
'''
Restore previously stored window layout.
'''
layout = window_layout.load()
for window in layout:
if self.windows[window] is None:
self.commands[window]()
self.windows[window].geometry(
'+{0:d}+{1:d}'.format(*layout[window]))
def run(self) -> None:
'''
Run main GUI loop.
'''
self.gui_app.mainloop()
def quit(self) -> None:
'''
Quit program.
'''
window_layout.save(self._window_layout())
for window in self.windows:
self.windows[window].withdraw()
self.gui_app.quit()
def _restart(self) -> None:
'''
Restart GUI.
'''
self.restart.set()
self.quit()
def _prepare_windows(self) -> None:
'''
Preload windows without displaying them.
I don't really want to deal with the hassle of non-existing
windows/trackers, so I do this.
'''
prepwindows = []
for window in self.windows:
if self.windows[window] is None:
prepwindows.append(window)
for window in prepwindows:
self.commands[window]()
for window in prepwindows:
self.windows[window].withdraw()
def _open_window(
self, window: str,
creator: typing.Callable[[], tk.Toplevel]) -> None:
'''
Open a window.
Args:
window: name of existing window object
creator: window creation routine
'''
try:
self.windows[window].deiconify()
except (AttributeError, tk.TclError):
self.windows[window] = creator()
self.windows[window].protocol(
'WM_DELETE_WINDOW', self.windows[window].withdraw)
def _open_items(self) -> None:
self._open_window(
'items', lambda: item_gui.ItemWindow(self.item_tracker))
def _open_config(self) -> None:
self._open_window('config', lambda: config.ConfigWindow())
def _open_itemmap(self) -> None:
self._open_window(
'itemmap_c',
lambda: maps.MapDisplay('item_child', self.location_tracker))
self._open_window(
'itemmap_a',
lambda: maps.MapDisplay('item_adult', self.location_tracker))
self.windows['itemmap_c'].link_buttons(self.windows['itemmap_a'])
self.windows['itemmap_a'].link_buttons(self.windows['itemmap_c'])
def _open_skullmap(self) -> None:
self._open_window(
'skullmap_c',
lambda: maps.MapDisplay('skulls_child', self.location_tracker))
self._open_window(
'skullmap_a',
lambda: maps.MapDisplay('skulls_adult', self.location_tracker))
self.windows['skullmap_c'].link_buttons(self.windows['skullmap_a'])
self.windows['skullmap_a'].link_buttons(self.windows['skullmap_c'])
def _open_dungeons(self) -> None:
self._open_window(
'dungeons',
lambda: dungeon_gui.DungeonWindow(
dungeon_tracker.DungeonTracker(self.location_tracker)))
def _open_hints(self) -> None:
self._open_window('hints', lambda: hints.HintDisplay())
def _window_layout(self) -> dict:
'''
Return current position of all windows.
Returns:
dict: {window name: (x, y)}
'''
layout = {}
for window in self.windows:
if self.windows[window].state() == 'withdrawn':
continue
try:
self.windows[window].deiconify()
except (AttributeError, tk.TclError):
continue
layout[window] = tuple(
int(c) for c in re.match(
'(\d+)x(\d+)([-+]\d+)([-+]\d+)',
self.windows[window].geometry()).groups()[2:])
return layout
def _reset_items(self) -> None:
'''
Reset all items to default.
'''
check = tkmbox.askokcancel(
'Reset', 'This will delete all stored progress.',
default=tkmbox.CANCEL, icon=tkmbox.WARNING)
if not check:
return
storage.delete_autosave()
for win in self.windows:
try:
self.windows[win].reset()
except AttributeError:
pass
def _save(self, path: str = None) -> None:
'''
Save state.
Args:
path: optional filename; if given, won't ask user for one
'''
if not path:
path = filedialog.asksaveasfilename(defaultextension='.json')
if path:
storage.save_autosave(path)
def _load(self) -> None:
'''
Load state.
'''
path = filedialog.askopenfilename(defaultextension='.json')
if path:
storage.restore_autosave(path)
self.restart.set()
self.quit() | z5-tracker | /z5_tracker-1.2.1-py3-none-any.whl/z5tracker/gui-tkinter/interface.py | interface.py |
import tkinter as tk
import tkinter.ttk as ttk
import typing
from ..config import CONFIG
from ..config import layout as storage
from .. import dungeons
from ..dungeons.lists import INFO, IMG, REWARDS
from ..items.itemobj import image
from . import misc
__all__ = 'DungeonWindow',
class DungeonWindow(tk.Toplevel):
'''
Dungeon check display.
Instance variables:
tracker: dungeon tracker object
layout: dungeon layout in display
helpertext: helper text variable
scaling: up- and downscale factors for objects
'''
def __init__(self, tracker: dungeons.DungeonTracker):
'''
Args:
tracker: dungeon tracker object
'''
super().__init__()
self.title('Dungeons')
self.tracker = tracker
self.frame = ttk.Frame(self)
self.frame.grid(column=0, row=0, sticky=misc.A)
self.helpertext = tk.StringVar()
self.helper = ttk.Label(
self, textvariable=self.helpertext,
font=('Arial', int(12 * CONFIG['icon_size'])))
self.helper.grid(column=0, row=1, sticky=tk.S)
self.scaling = _scale_factors()
buttonstyle = ttk.Style()
buttonstyle.configure('Dungeonbutton.TButton', relief=tk.FLAT)
for dungeon in self.tracker:
self._dungeon_display(self.tracker[dungeon])
def _dungeon_display(self, dungeon: dungeons.dobj) -> None:
'''
Make and place single dungeons display object.
Args:
dungeon: dungeon object
Writes:
buttons
'''
widget = Dungeon(dungeon, self.frame, self.scaling)
widget.bind(
'<Enter>', lambda _: self.helpertext.set(dungeon.displayname))
widget.bind(
'<Leave>', lambda _: self.helpertext.set(''))
for button in widget.buttons:
button.bind(
'<ButtonRelease-1>',
lambda _: storage.autosave('Dungeons', self.tracker),
add='+')
button.bind(
'<ButtonRelease-3>',
lambda _: storage.autosave('Dungeons', self.tracker),
add='+')
widget.grid(column=dungeon.location[0], row=dungeon.location[1],
sticky=tk.N+tk.W)
dungeon.register_widget(widget)
def reset(self) -> None:
'''
Reset dungeons to default.
'''
self.tracker.reset()
class Dungeon(ttk.Frame):
'''
Single dungeon display object.
'''
def __init__(
self, dungeon: dungeons.dobj, parent: ttk.Frame,
scaling: (int, int)):
'''
dungeon: dungeon object
parent: parent widget for object
scaling: button scale
'''
self.scaling = scaling
scale = scaling[0] / scaling[1]
super().__init__(parent)
self.child = ttk.Label(self)
self.child.grid(column=0, row=0, sticky=misc.A)
icon = (
tk.PhotoImage(file=dungeon.icon, master=parent)
if dungeon.icon else None)
icon = self._icon_scale(icon)
self.pic = ttk.Label(self.child, image=icon)
self.pic.grid(column=0, row=0)
self.icon = icon
self.rewardname = '?'
self.rewardimg = REWARDS['?']
self.rewardicon = tk.PhotoImage(
file=image(REWARDS['?'])[0], master=self)
self.rewardicon = self._icon_scale(self.rewardicon)
self.reward = tk.Canvas(
self.child, height=32 * scale, width=32 * scale)
self.rewardid = self.reward.create_image(
0, 0, anchor=tk.NW, image=self.rewardicon)
self.reward.bind(
'<ButtonRelease-1>', lambda _: dungeon.cycle_reward(True))
self.reward.bind(
'<ButtonRelease-3>', lambda _: dungeon.cycle_reward(False))
if INFO[dungeon.identifier]['reward']:
self.reward.grid(column=2, row=0)
self.bosskeyicon = tk.PhotoImage(
file=image(IMG['bosskey'])[0], master=parent)
self.bosskeyicon = self._icon_scale(self.bosskeyicon)
self.bosskey = tk.Canvas(
self.child, height=32 * scale, width=32 * scale)
self.bosskeyid = self.bosskey.create_image(
0, 0, anchor=tk.NW, image=self.bosskeyicon)
self.bosskey.bind('<ButtonRelease-1>', dungeon.toggle_bosskey)
self.bosskey.bind('<ButtonRelease-3>', dungeon.toggle_bosskey)
self.hasbosskey = False
if dungeon.has_bosskey:
self.hasbosskey = True
self.bosskey.grid(column=3, row=0)
self.keyicon = tk.PhotoImage(file=image(IMG['key'])[0], master=parent)
self.keyicon = self._icon_scale(self.keyicon)
self.key = tk.Canvas(
self.child, height=32 * scale, width=48 * scale)
self.keyimg = self.key.create_image(
0, 0, anchor=tk.NW, image=self.keyicon)
self.keytext = self.key.create_text(
48 * scale, 32 * scale, anchor=tk.SE, text='')
self.key.bind('<ButtonRelease-1>', dungeon.key_up)
self.key.bind('<ButtonRelease-3>', dungeon.key_down)
self.haskeys = False
if dungeon.max_keys > 0:
self.haskeys = True
self.key.grid(column=2, columnspan=2, row=1)
self.itemicon = tk.PhotoImage(
file=image(IMG['chest_full'])[0], master=parent)
self.itemicon = self._icon_scale(self.itemicon)
self.item = tk.Canvas(
self.child, height=32 * scale, width=48 * scale)
self.itemimg = self.item.create_image(
0, 0, anchor=tk.NW, image=self.itemicon)
self.itemtext = self.item.create_text(
48 * scale, 32 * scale, anchor=tk.SE, text='')
self.item.bind('<ButtonRelease-1>', dungeon.item_up)
self.item.bind('<ButtonRelease-3>', dungeon.item_down)
self.item.grid(column=0, columnspan=2, row=1)
self.buttons = self.reward, self.bosskey, self.key, self.item
self.check_state(dungeon)
def check_state(self, dungeon: dungeons.dobj) -> None:
'''
Check button state and make adjustments if necessary.
Args:
dungeon: dungeon object
'''
# Check whether the bosskey button should be disabled.
if self.hasbosskey:
self.bosskey.delete(self.bosskeyid)
self.bosskeyicon = tk.PhotoImage(
file=image(IMG['bosskey'])[0], master=self)
self.bosskeyicon = self._icon_scale(self.bosskeyicon)
if not dungeon.bosskey:
for x in range(self.bosskeyicon.width()):
for y in range(self.bosskeyicon.height()):
bw = sum(self.bosskeyicon.get(x, y)) // 3
if bw in (0, 255):
continue
self.bosskeyicon.put(
'#{0:02x}{0:02x}{0:02x}'.format(bw), (x, y))
self.bosskey.create_image(
0, 0, anchor=tk.NW, image=self.bosskeyicon)
# Check whether reward image needs to be changed.
if self.rewardname != dungeon.reward:
self.rewardname = dungeon.reward
self.rewardimg = REWARDS[dungeon.reward]
self.rewardicon = tk.PhotoImage(
file=image(self.rewardimg)[0], master=self)
self.rewardicon = self._icon_scale(self.rewardicon)
self.reward.delete(self.rewardid)
self.reward.create_image(0, 0, anchor=tk.NW, image=self.rewardicon)
# Check numbers.
scale = self.scaling[0] / self.scaling[1]
self.key.delete(self.keytext)
self.keytext = self.key.create_text(
48 * scale, 32 * scale, anchor=tk.SE,
font=('Arial Black', int(16 * scale)), text=str(dungeon.keys))
self.item.delete(self.itemtext)
itemfont = (('Arial Black', int(8 * scale)) if dungeon.remaining() > 9
else ('Arial Black', int(16 * scale)))
self.itemtext = self.item.create_text(
48 * scale, 32 * scale, anchor=tk.SE, font=itemfont,
text=str(dungeon.remaining()))
newchest = 'chest_full' if dungeon.remaining() > 0 else 'chest_empty'
self.itemicon = tk.PhotoImage(file=image(IMG[newchest])[0], master=self)
self.itemicon = self._icon_scale(self.itemicon)
self.item.delete(self.itemimg)
self.itemimg = self.item.create_image(
0, 0, anchor=tk.NW, image=self.itemicon)
def _icon_scale(self, icon: tk.PhotoImage) -> tk.PhotoImage:
'''
Rescale icon.
Args:
icon: icon to be rescaled
Returns:
tk.PhotoImage: rescaled icon
'''
if self.scaling[0] != 1:
icon = icon.zoom(self.scaling[0], self.scaling[0])
if self.scaling[1] != 1:
icon = icon.subsample(self.scaling[1], self.scaling[1])
return icon
def _scale_factors() -> (int, int):
'''
Calculate up- and downscale factor.
Returns:
int: upscale factor
int: downscale factor
'''
scaling = CONFIG['icon_size']
for up in range(1, 1000):
if not (scaling * up) % 1:
upscale = int(scaling * up)
break
else:
CONFIG.set('icon_size', 1)
assert False
downscale = int(upscale // scaling)
return upscale, downscale | z5-tracker | /z5_tracker-1.2.1-py3-none-any.whl/z5tracker/gui-tkinter/dungeons.py | dungeons.py |
import configparser
import os.path
import json
from ..config.images import image
from ..config import layout as storage
from ..dungeons.dungeonobj import DUNGEONS
from .. import world
from .itemobj import i as iobj
from .lists import ITEMS
__all__ = 'iobj', 'image', 'get_layout', 'ItemTracker'
def get_layout() -> dict:
'''
Load (or create) item layout.
Returns:
dict: item layout in format {identifier: (column, row)}
'''
try:
layout = storage.load('Items')
except (storage.NoConfig, configparser.Error):
layout = {'Items': {}, 'Dungeons': {}}
for item in ITEMS:
layout['Items'][item.identifier] = item.location
for dungeon in DUNGEONS:
layout['Dungeons'][dungeon.identifier] = dungeon.location
storage.new(layout)
layout = storage.load('Items')
return layout
class ItemTracker(dict):
'''
Inventory item tracker.
'''
def __init__(self, location_tracker: world.LocationTracker):
'''
Args:
location_tracker: location tracker containing item placement rules
'''
layout = get_layout()
super().__init__()
delayed_link = []
for item in ITEMS:
item.register_tracker(location_tracker)
try:
item.location = layout[item.identifier.lower()]
except KeyError:
pass
for linkobj in item.link:
try:
item.register_link(self[linkobj[0]])
except KeyError:
delayed_link.append((item.identifier, linkobj))
self[item.identifier] = item
for to_link in delayed_link:
self[to_link[0]].register_link(self[to_link[1][0]])
data = storage.load_save()
try:
self.restore(data['Items'])
except KeyError:
pass
def reset(self) -> None:
'''
Reset all items.
'''
for item in self:
self[item].reset()
def store(self) -> dict:
'''
Return current item setup info for storage.
Returns:
inventory: item setup info
'''
inventory = {}
for item in self:
inventory[item] = self[item].inventory
return inventory
def restore(self, inventory) -> None:
'''
Restore current item setup from file.
Args:
inventory: information from file
'''
for item in inventory:
if item in self:
self[item].restore_inventory(inventory[item])
try:
self[item].gui.check_state(self[item])
except AttributeError:
pass | z5-tracker | /z5_tracker-1.2.1-py3-none-any.whl/z5tracker/items/items.py | items.py |
import operator
import os.path
import typing
from ..config.images import image
from .. import world
__all__ = 'i',
class i(object):
'''
Inventory item
Progressive items are listed as one.
Instance variables:
length: number of item progressions
identifier: item identifier string
displayname: name(s) displayed in UI
icon: path to image file(s) associated with item
link: items (and tresholds) linked with this item
linked_items: actual item objects linked with this items
inventory: number of items in inventory
location: (column, row) location on display
gui: associated display object
location_tracker: location tracker containing item placement rules
'''
def __init__(
self, item: str, location: typing.Sequence[int],
display: typing.Sequence[str], icon: typing.Sequence[str],
link: tuple = (), override: typing.Callable[[str], None] = None,
default: int = 0):
'''
Args:
item: internal item name
location: (column, row) coordinates on item display
display: displayed item name(s)
icon: item icon(s)
link: link other items' progression to this one
override: function called to implement special behaviour
default: default inventory setting
'''
assert len(display) == len(icon)
self.length = len(display)
self.identifier = item
self.displayname = display
self.icon = tuple(image(i) for i in icon if i != '<stub>')
self.link = link
self.linked_items = {}
self.override = override
self.default = default
self.location_tracker = None
self.inventory = default
self.location = location
self.gui = None
def register_tracker(self, tracker: world.LocationTracker) -> None:
'''
Store item location tracker.
Args:
tracker: location tracker containing item placement rules
Writes:
location_tracker
'''
self.location_tracker = tracker
def register_button(self, button: typing.Any) -> None:
'''
Store GUI object.
Args:
button: object to store
Writes:
gui
'''
self.gui = button
def index(self) -> int:
'''
Return current image index.
Returns:
int: index used for sequence attributes
'''
idx = self.inventory if self.inventory < 1 else self.inventory - 1
return idx
def display(self) -> str:
'''
Return currently applicable item display string.
Returns:
str: name to be displayed in application
'''
self.gui.check_state(self)
idx = self.index()
item_name = self.displayname[idx]
if self.icon[idx][1] is not None and self.state():
item_name = '{0:s} ({1:s})'.format(
item_name, str(self.icon[idx][1]))
return item_name
def state(self) -> bool:
'''
Return current state of item.
Returns:
str: True if item is active, else False
'''
return self.inventory > 0
def increase(self, *args) -> None:
'''
Left-click on item
'''
if self.inventory < self.length:
self.inventory += 1
self.location_tracker.add_item(self.identifier)
self._set_links()
if self.icon:
self.gui.check_state(self)
def decrease(self, *args) -> None:
'''
Right-click on item
'''
if self.inventory > 0:
self.inventory -= 1
self.location_tracker.remove_item(self.identifier)
self._set_links()
if self.icon:
self.gui.check_state(self)
def reset(self) -> None:
'''
Reset item.
'''
to_remove = self.inventory - self.default
if to_remove > 0:
for _ in range(to_remove):
self.decrease(_)
elif to_remove < 0:
for _ in range(-to_remove):
self.increase(_)
self.gui.check_state(self)
def restore_inventory(self, quantity: int) -> None:
'''
Set inventory number.
Args:
quantity: number to set inventory to
'''
self.inventory = quantity
for _ in range(quantity):
self.location_tracker.add_item(self.identifier)
def register_link(self, linkobject) -> None:
'''
Add linked object.
Args:
linkobject: item object to link with
'''
linkobject.register_tracker(self.location_tracker)
self.linked_items[linkobject.identifier] = linkobject
def _set_links(self) -> None:
'''
Set state of linked items.
'''
for link in self.link:
if self.inventory >= link[1]:
self.linked_items[link[0]].increase()
else:
self.linked_items[link[0]].decrease() | z5-tracker | /z5_tracker-1.2.1-py3-none-any.whl/z5tracker/items/itemobj.py | itemobj.py |
from .itemobj import i
__all__ = 'ITEMS',
ITEMS = (
i('Deku Stick Capacity', (0, 0), ('Deku Sticks',)*3,
tuple(zip(('stick',)*3, (10, 20, 30))),
link=(('Buy Deku Stick (1)', 1),)),
i('Slingshot', (0, 1), ('Slingshot',)*3,
tuple(zip(('slingshot',)*3, (30, 40, 50)))),
i('Boomerang', (0, 2), ('Boomerang',), ('boomerang',)),
i('Bottle', (0, 3), ('Bottles',)*4, tuple(zip(('bottle',)*4, range(1, 5)))),
i('Deku Nut Capacity', (1, 0), ('Deku Nuts',)*3,
tuple(zip(('nut',)*3, (20, 30, 40)))),
i('Ocarina', (1, 1), ('Fairy Ocarina', 'Ocarina of Time'),
('fairyocarina', 'ocarina')),
i('Lens of Truth', (1, 2), ('Lens of Truth',), ('lens',)),
i('Bottle with Letter', (1, 3), ("Ruto's Letter",),
('bottle_letter',)),
i('Bomb Bag', (2, 0), ('Bombs',)*3,
tuple(zip(('bomb',)*3, (20, 30, 40)))),
i('Bombchus', (2, 1), ('Bombchus',), ('bombchu',)),
i('Magic Bean', (2, 2), ('Magic Beans',)*10,
tuple(zip(('bean',)*10, range(1, 11)))),
i('Bottle with Big Poe', (2, 3), ('Big Poes',)*10,
tuple(zip(('bottle_bigpoe',)*10, range(1, 11)))),
i('Bow', (3, 0), ('Bow',)*3,
tuple(zip(('bow',)*3, (30, 40, 50)))),
i('Progressive Hookshot', (3, 1), ('Hookshot', 'Longshot'),
('hookshot', 'longshot')),
i('Hammer', (3, 2), ('Megaton Hammer',), ('hammer',)),
i('Gold Skulltula Token', (3, 3), ('Gold Skulltula Token',)*100,
tuple(zip(('skulltula_token',)*101, range(1, 101)))),
i('Fire Arrows', (4, 0), ('Fire Arrows',), ('firearrow',)),
i('Ice Arrows', (4, 1), ('Ice Arrows',), ('icearrow',)),
i('Light Arrows', (4, 2), ('Light Arrows',), ('lightarrow',)),
i('Weird Egg', (4, 3),
('Weird Egg (Child)', "Zelda's Letter", 'Mask Quest', 'Keaton Mask',
'Mask Quest', 'Skull Mask', 'Mask Quest', 'Spooky Mask', 'Mask Quest',
'Bunny Hood', 'Mask Quest', 'Mask of Truth'),
('egg', 'letter', 'sold_out', 'keaton', 'sold_out', 'skull', 'sold_out',
'spooky', 'sold_out', 'bunny', 'sold_out', 'truth'),
link=(('Zeldas Letter', 2),)),
i('Dins Fire', (5, 0), ("Din's Fire",), ('din',)),
i('Farores Wind', (5, 1), ("Farore's Wind",), ('farore',)),
i('Nayrus Love', (5, 2), ("Nayru's Love",), ('nayru',)),
i('Pocket Egg', (5, 3),
('Pocket Egg (Adult)', 'Pocket Cucco', 'Cojiro', 'Odd Mushroom',
'Odd Potion', "Poacher's Saw", "Broken Goron's Sword", 'Prescription',
'Eyeball Frog', "World's Finest Eye Drops", 'Claim Check', 'Done'),
('egg', 'cucco', 'cojiro', 'mushroom', 'medicine', 'saw', 'broken_sword',
'perscription', 'frog', 'eyedrops', 'claim', 'sword3'),
link=(('Pocket Cucco', 2), ('Cojiro', 3), ('Odd Mushroom', 4),
('Odd Potion', 5), ('Poachers Saw', 6), ('Broken Sword', 7),
('Prescription', 8), ('Eyeball Frog', 9), ('Eyedrops', 10),
('Claim Check', 11))),
i('Kokiri Sword', (0, 4), ('Kokiri Sword',), ('sword1',)),
i('Master Sword', (1, 4), ('Master Sword',), ('sword2',)),
i('Biggoron Sword', (2, 4), ('Biggoron Sword',), ('sword3',)),
i('Deku Shield', (3, 4), ('Deku Shield',), ('shield1',),
link=(('Buy Deku Shield', 1),)),
i('Hylian Shield', (4, 4), ('Hylian Shield',), ('shield2',)),
i('Mirror Shield', (5, 4), ('Mirror Shield',), ('shield3',)),
i('Goron Tunic', (0, 5), ('Goron Tunic',), ('redtunic',)),
i('Zora Tunic', (1, 5), ('Zora Tunic',), ('bluetunic',)),
i('Iron Boots', (2, 5), ('Iron Boots',), ('ironboots',)),
i('Hover Boots', (3, 5), ('Hover Boots',), ('hoverboots',)),
i('Small Key (Gerudo Fortress)', (4, 5), ('Rescued Carpenters',)*4,
tuple(zip(('saw',)*4, range(1, 5))),
link=(('Carpenter Rescue', 4),)),
i('Gerudo Membership Card', (5, 5), ('Gerudo Membership Card',),
('gerudocard',)),
i('Progressive Strength Upgrade', (0, 6),
("Goron's Bracelet", 'Silver Gauntlets', 'Golden Gauntlets'),
('lift1', 'lift2', 'lift3')),
i('Progressive Scale', (1, 6), ('Silver Scale', 'Golden Scale'),
('scale1', 'scale2')),
i('Progressive Wallet', (2, 6),
("Child's Wallet", "Adult's Wallet", "Giant's Wallet", "Tycoon's Wallet"),
('wallet', 'wallet1', 'wallet2', 'wallet3'), default=1),
i('Magic Meter', (3, 6), ('Magic', 'Double Magic'), ('magic1', 'magic2')),
i('Stone of Agony', (4, 6), ('Stone of Agony',), ('agony',)),
i('Zeldas Lullaby', (0, 7), ("Zelda's Lullaby",), ('zelda_colored',)),
i('Eponas Song', (1, 7), ("Epona's Song",), ('epona_colored',),
link=(('Epona', 1),)),
i('Sarias Song', (2, 7), ("Saria's Song",), ('saria_colored',)),
i('Suns Song', (3, 7), ("Sun's Song",), ('sun_colored',)),
i('Song of Time', (4, 7), ('Song of Time',), ('time_colored',)),
i('Song of Storms', (5, 7), ('Song of Storms',), ('storms_colored',)),
i('Minuet of Forest', (0, 8), ('Minuet of Forest',), ('minuet',)),
i('Bolero of Fire', (1, 8), ('Bolero of Fire',), ('bolero',)),
i('Serenade of Water', (2, 8), ('Serenade of Water',), ('serenade',)),
i('Requiem of Spirit', (3, 8), ('Requiem of Spirit',), ('requiem',)),
i('Nocturne of Shadow', (4, 8), ('Nocturne of Shadow',), ('nocturne',)),
i('Prelude of Light', (5, 8), ('Prelude of Light',), ('prelude',)),
i('Kokiri Emerald', (0, 9), ("Kokiri's Emerald",), ('emerald',)),
i('Goron Ruby', (1, 9), ("Goron's Ruby",), ('ruby',)),
i('Zora Sapphire', (2, 9), ("Zora's Sapphire",), ('sapphire',)),
i('Light Medallion', (0, 10), ('Light Medallion',), ('lightmedallion',)),
i('Forest Medallion', (1, 10), ('Forest Medallion',), ('forestmedallion',)),
i('Fire Medallion', (2, 10), ('Fire Medallion',), ('firemedallion',)),
i('Water Medallion', (3, 10), ('Water Medallion',), ('watermedallion',)),
i('Shadow Medallion', (4, 10), ('Shadow Medallion',), ('shadowmedallion',)),
i('Spirit Medallion', (5, 10), ('Spirit Medallion',), ('spiritmedallion',)),
i('Fairy Ocarina', (), (), ()),
i('Ocarina of Time', (), (), ()),
i('Bottle with Milk', (), (), ()),
i('Bottle with Red Potion', (), (), ()),
i('Bottle with Green Potion', (), (), ()),
i('Bottle with Fairy', (), (), ()),
i('Bottle with Fish', (), (), ()),
i('Bottle with Blue Fire', (), (), ()),
i('Bottle with Bugs', (), (), ()),
i('Bottle with Poe', (), (), ()),
i('Zeldas Letter', (), ('<stub>',), ('<stub>',)),
i('Pocket Cucco', (), ('<stub>',), ('<stub>',)),
i('Cojiro', (), ('<stub>',), ('<stub>',)),
i('Odd Mushroom', (), ('<stub>',), ('<stub>',)),
i('Odd Potion', (), ('<stub>',), ('<stub>',)),
i('Poachers Saw', (), ('<stub>',), ('<stub>',)),
i('Broken Sword', (), ('<stub>',), ('<stub>',)),
i('Prescription', (), ('<stub>',), ('<stub>',)),
i('Eyeball Frog', (), ('<stub>',), ('<stub>',)),
i('Eyedrops', (), ('<stub>',), ('<stub>',)),
i('Claim Check', (), ('<stub>',), ('<stub>',)),
i('Double Defense', (), (), ()),
i('Bombchus (5)', (), (), ()),
i('Bombchus (10)', (), (), ()),
i('Bombchus (20)', (), (), ()),
i('Boss Key', (), (), ()),
i('Small Key', (), (), ()),
i('Epona', (), ('<stub>',), ('<stub>',)),
i('Carpenter Rescue', (), ('<stub>',), ('<stub>',)),
i('Deku Stick Drop', (), (), ()),
i('Deku Nut Drop', (), (), ()),
i('Forest Trial Clear', (), (), (), default=1),
i('Fire Trial Clear', (), (), (), default=1),
i('Water Trial Clear', (), (), (), default=1),
i('Shadow Trial Clear', (), (), (), default=1),
i('Spirit Trial Clear', (), (), (), default=1),
i('Light Trial Clear', (), (), (), default=1),
i('Triforce', (), (), ()),
i('Buy Deku Nut (5)', (), (), ()),
i('Buy Deku Nut (10)', (), (), ()),
i('Buy Deku Stick (1)', (), ('<stub>',), ('<stub>',)),
i('Buy Deku Shield', (), ('<stub>',), ('<stub>',)),
i('Buy Goron Tunic', (), (), ()),
i('Buy Zora Tunic', (), (), ()),
i('Buy Bombchu (10)', (), (), ()),
i('Buy Bombchu (20)', (), (), ()),
i('Buy Bombchu (5)', (), (), ()),
i('Buy Blue Fire', (), (), ()),
i('Buy Bottle Bug', (), (), ()),
) | z5-tracker | /z5_tracker-1.2.1-py3-none-any.whl/z5tracker/items/lists.py | lists.py |
import importlib
import operator
from .. import rulesets
__all__ = 'DisplayGone', 'LocationTracker'
class DisplayGone(Exception):
'''Raised when a called map display has been closed.'''
pass
class LocationTracker(object):
'''
Item location tracker.
Instance variables:
rules: location ruleset
itemlocations: item location list
skulltulalocations: skulltula location list
gui: list of registered map displays
'''
def __init__(self):
self.gui = []
self.reset()
def reset(self) -> None:
'''
Recreate variables.
'''
self.rules = rulesets.Ruleset()
self.itemlocations = self.rules.list_locations('item')
self.skulltulalocations = self.rules.list_locations('skulltula')
def register_gui(self, gui) -> None:
'''
Register GUI object.
Args:
gui: map display object
'''
self.gui.append(gui)
def refresh_gui(self) -> None:
'''
Refresh registered map displays.
'''
guilist = self.gui
self.gui = []
for gui in guilist:
try:
gui.update_buttons()
except DisplayGone:
continue
self.gui.append(gui)
def check_availability(self, loctype: str) -> dict:
'''
Return list of locations and whether they are available.
Args:
loctype: 'item' or 'skulltula'
Returns:
dict: dictionary containing availability of locations
'''
assert loctype in ('item', 'skulltula')
listing = (self.itemlocations if loctype == 'item'
else self.skulltulalocations)
available = {}
for location in listing:
available[location] = self.rules.location_available(
location, loctype)
return available
def dungeon_availability(self, dungeonname: str, loctype: str) -> str:
'''
Check to which degree dungeon is clearable.
This assumes that all keys are available. It hence only checks for
required items.
Args:
dungeonname: name of dungeon
itemtype: 'item' or 'skulltula'
Returns:
bool: True of all locations are available with all keys
'''
return self.rules.dungeon_available(dungeonname, loctype)
def add_item(self, itemname: str) -> None:
'''
Add item to current inventory.
Args:
itemname: identifier of item
'''
self.rules.add_item(itemname)
self.refresh_gui()
def remove_item(self, itemname: str) -> None:
'''
Remove item from current inventory.
Args:
itemname: identifier of item
'''
self.rules.remove_item(itemname)
self.refresh_gui()
def is_adult(self) -> bool:
'''
Check whether adult items are available.
Returns:
bool: True if adult items are available
'''
return self.rules.is_adult()
def check_rule(self, rule: operator.methodcaller) -> bool:
'''
Check given rule.
Args:
rule: method to check with world state
Return:
bool: return value of check
'''
return self.rules.check_rule(rule)
def check_access(self, location: str) -> bool:
'''
Check whether given location can be accessed.
Args:
location: either item location or game region
Returns:
bool: return value of check
'''
return self.rules.check_access(location)
def dungeon_locations(self, dungeonname: str) -> (list, list):
'''
Return list of locations in given dungeon.
The item list includes the dungeon reward.
Args:
dungeonname: name of dungeon
Returns:
list: list of item locations
list: list of skulltula locations
'''
return self.rules.dungeon_locations(dungeonname)
def dungeon_info(self, dungeonname: str) -> dict:
'''
Return info about given dungeon.
Args:
dungeonname: name of dungeon
Returns:
dict: {'keys': int, 'items': int, 'bosskey': bool}
'''
return self.rules.dungeon_info(dungeonname) | z5-tracker | /z5_tracker-1.2.1-py3-none-any.whl/z5tracker/world/world.py | world.py |
import os.path
import typing
from ..config import CONFIG
from ..config.images import image
from .. import world
from .lists import INFO, REWARDS
__all__ = 'd', 'DUNGEONS'
class d(object):
'''
Dungeon item
Instance variables:
identifier: dungeon identifier string
displayname: name displayed in UI
icon: path to image file associated with dungeon
keys: number of small keys in inventory
bosskey: whether bosskey has been found
items: number of items found
reward: type of stone or medallion
location: (column, row) location on display
gui: associated display object
max_keys: number of small keys in dungeon
max_items: number of non-dungeon items in dungeon
has_bosskey: True if dungeon has a boss key
'''
def __init__(
self, dungeon: str, location: typing.Sequence[int]):
'''
Args:
dungeon: internal dungeon name
location: (column, row) coordinates on dungeon display
'''
self.identifier = dungeon
self.displayname = INFO[dungeon]['name']
self.icon = (
image(INFO[dungeon]['icon'])[0] if INFO[dungeon]['icon'] else None)
self.reset()
self.location = location
self.gui = None
self.max_keys = None
self.max_items = None
self.has_bosskey = False
def register_widget(self, widget: typing.Any) -> None:
'''
Store GUI object.
Args:
widget: object to store
Writes:
gui
'''
self.gui = widget
def register_tracker(self, location_tracker: world.LocationTracker) -> None:
'''
Register location tracker containing placement rules.
Args:
location_tracker: tracker to register
'''
self.location_tracker = location_tracker
dungeon_info = self.location_tracker.dungeon_info(self.identifier)
self.max_keys = dungeon_info['keys']
self.max_items = dungeon_info['items']
self.has_bosskey = dungeon_info['bosskey']
for _ in range(self.keys):
self.location_tracker.add_item(
'Small Key ({0:s})'.format(self.identifier))
if self.bosskey:
self.location_tracker.add_item(
'Boss Key ({0:s})'.format(self.identifier))
def reset(self) -> None:
'''
Reset dungeon.
'''
self.keys = 0
self.bosskey = False
self.items = 0
self.reward = '?'
try:
self.gui.check_state(self)
except AttributeError:
pass
def key_up(self, *args) -> None:
'''
Increase key counter.
'''
if self.keys < self.max_keys:
self.keys += 1
self.location_tracker.add_item(
'Small Key ({0:s})'.format(self.identifier))
self.gui.check_state(self)
def key_down(self, *args) -> None:
'''
Decrease key counter.
'''
if self.keys > 0:
self.keys -= 1
self.location_tracker.remove_item(
'Small Key ({0:s})'.format(self.identifier))
self.gui.check_state(self)
def toggle_bosskey(self, *args) -> None:
'''
Toggle bosskey.
'''
self.bosskey = not self.bosskey
tracker_cmd = (self.location_tracker.add_item if self.bosskey
else self.location_tracker.remove_item)
tracker_cmd('Boss Key ({0:s})'.format(self.identifier))
self.gui.check_state(self)
def item_up(self, *args) -> None:
'''
Increase item counter.
'''
if self.items < self.max_items:
self.items += 1
self.gui.check_state(self)
def item_down(self, *args) -> None:
'''
Decrease item counter.
'''
if self.items > 0:
self.items -= 1
self.gui.check_state(self)
def remaining(self) -> int:
'''
Return remaining items.
Returns:
int: still to be found number of items
'''
return self.max_items - self.items
def cycle_reward(self, forward: bool) -> None:
'''
Cycle through rewards.
Args:
forward: True cycles forward, False backwards
'''
rewardlist = tuple(REWARDS)
idx = rewardlist.index(self.reward)
if forward:
idx += 1
idx %= len(REWARDS)
else:
idx -= 1
if idx < 0:
idx = len(REWARDS) - 1
self.reward = rewardlist[idx]
self.gui.check_state(self)
def store(self) -> dict:
'''
Return contained info for saving.
Returns:
dict: dictionary with dungeon info
'''
data = {
'identifier': self.identifier,
'keys': self.keys,
'bosskey': self.bosskey,
'items': self.items,
'reward': self.reward,
}
return data
def restore(self, data: dict) -> None:
'''
Restore contained info.
Args:
data: dictionary with required info
'''
mapping = 'identifier', 'keys', 'bosskey', 'items', 'reward'
for datum in mapping:
try:
self.__setattr__(datum, data[datum])
except AttributeError:
pass
if datum in ('keys', 'bosskey'):
keytype = 'Small Key' if datum == 'keys' else 'Boss Key'
keyattr = self.keys if datum == 'keys' else self.bosskey
for _ in range(int(keyattr)):
self.location_tracker.add_item(
'{0:s} ({1:s})'.format(keytype, self.identifier))
DUNGEONS = (
d('Deku Tree', (0, 0)), d('Dodongos Cavern', (0, 1)),
d('Jabu Jabus Belly', (0, 2)), d('Ice Cavern', (0, 3)),
d('Bottom of the Well', (0, 4)), d('Gerudo Training Grounds', (0, 5)),
d('Forest Temple', (1, 0)), d('Fire Temple', (1, 1)),
d('Water Temple', (1, 2)), d('Shadow Temple', (1, 3)),
d('Spirit Temple', (1, 4)), d('Ganons Castle', (1, 5))
) | z5-tracker | /z5_tracker-1.2.1-py3-none-any.whl/z5tracker/dungeons/dungeonobj.py | dungeonobj.py |
import configparser
import os.path
import json
from ..config import layout as storage
from ..items.lists import ITEMS
from .. import world
from .dungeonobj import DUNGEONS
from .dungeonobj import d as dobj
from .dungeonobj import image
__all__ = 'dobj', 'image', 'get_layout', 'DungeonTracker'
def get_layout() -> dict:
'''
Load (or create) dungeon layout.
Returns:
dict: dungeon layout in format {identifier: (column, row)}
'''
try:
layout = storage.load('Dungeons')
except (storage.NoConfig, configparser.Error):
layout = {}
layout['Dungeons'] = {}
for dungeon in DUNGEONS:
layout['Dungeons'][dungeon.identifier] = dungeon.location
layout['Items'] = {}
for item in ITEMS:
layout['Items'][item.identifier] = item.location
storage.new(layout)
layout = storage.load('Dungeons')
return layout
class DungeonTracker(dict):
'''
Dungeon tracker.
'''
def __init__(self, location_tracker: world.LocationTracker):
'''
Args:
location_tracker: location tracker containing item placement rules
'''
layout = get_layout()
super().__init__()
for dungeon in DUNGEONS:
dungeon.register_tracker(location_tracker)
dungeon.location = layout[dungeon.identifier.lower()]
self[dungeon.identifier] = dungeon
data = storage.load_save()
try:
self.restore(data['Dungeons'])
except KeyError:
pass
def reset(self) -> None:
'''
Reset all dungeon.
'''
for dungeon in self:
self[dungeon].reset()
def store(self) -> dict:
'''
Return current dungeon setup info for storage.
Returns:
inventory: dungeon setup info
'''
inventory = {}
for dungeon in self:
inventory[dungeon] = self[dungeon].store()
return inventory
def restore(self, inventory) -> None:
'''
Restore current dungeon setup from file.
Args:
inventory: information from file
'''
for dungeon in inventory:
if dungeon in self:
self[dungeon].restore(inventory[dungeon])
try:
self[dungeon].gui.check_state(self[dungeon])
except AttributeError:
pass | z5-tracker | /z5_tracker-1.2.1-py3-none-any.whl/z5tracker/dungeons/dungeons.py | dungeons.py |
import collections
__all__ = 'INFO', 'REWARDS', 'IMG'
INFO = {
'Deku Tree': {
'name': 'Inside the Deku Tree', 'reward': True, 'icon': 'emerald',
'location': (860, 2450),
'maps': ('item_child', 'item_adult', 'skulls_child', 'skulls_adult'),
'mapimg': 'dungeon_dekutree'},
'Dodongos Cavern': {
'name': "Dodongo's Cavern", 'reward': True, 'icon': 'ruby',
'location': (215, 1616),
'maps': ('item_child', 'item_adult', 'skulls_child', 'skulls_adult'),
'mapimg': 'dungeon_dodongos'},
'Jabu Jabus Belly': {
'name': "Jabu Jabu's Belly", 'reward': True, 'icon': 'sapphire',
'location': (310, 2460), 'maps': ('item_child', 'skulls_child'),
'mapimg': 'dungeon_jabujabu'},
'Ice Cavern': {
'name': 'Ice Cavern', 'reward': False, 'icon': 'serenade',
'location': (242, 2520), 'maps': ('item_adult', 'skulls_adult'),
'mapimg': 'dungeon_ice'},
'Bottom of the Well': {
'name': 'Bottom of the Well', 'reward': False, 'icon': 'lens',
'location': (300, 1930), 'maps': ('item_child', 'skulls_child'),
'mapimg': 'dungeon_well'},
'Gerudo Training Grounds': {
'name': 'Gerudo Training Grounds', 'reward': False,
'icon': 'gerudo_symbol_colored',
'location': (428, 540), 'maps': ('item_adult',),
'mapimg': 'dungeon_gerudo'},
'Forest Temple': {
'name': 'Forest Temple', 'reward': True, 'icon': 'forestmedallion',
'location': (570, 2140), 'maps': ('item_adult', 'skulls_adult'),
'mapimg': 'dungeon_forest'},
'Fire Temple': {
'name': 'Fire Temple', 'reward': True, 'icon': 'firemedallion',
'location': (35, 1924), 'maps': ('item_adult', 'skulls_adult'),
'mapimg': 'dungeon_fire'},
'Water Temple': {
'name': 'Water Temple', 'reward': True, 'icon': 'watermedallion',
'location': (1330, 1020), 'maps': ('item_adult', 'skulls_adult'),
'mapimg': 'dungeon_water'},
'Shadow Temple': {
'name': 'Shadow Temple', 'reward': True, 'icon': 'shadowmedallion',
'location': (330, 2160), 'maps': ('item_adult', 'skulls_adult'),
'mapimg': 'dungeon_shadow'},
'Spirit Temple': {
'name': 'Spirit Temple', 'reward': True, 'icon': 'spiritmedallion',
'location': (280, 70),
'maps': ('item_child', 'item_adult', 'skulls_child', 'skulls_adult'),
'mapimg': 'dungeon_spirit'},
'Ganons Castle': {
'name': "Ganon's Castle", 'reward': False, 'icon': 'lightarrow',
'location': (170, 1440), 'maps': ('item_adult',),
'mapimg': 'dungeon_ganon'},
}
REWARDS = collections.OrderedDict((
('?', 'unknown'),
('Kokiri Emerald', 'emerald'),
('Goron Ruby', 'ruby'),
('Zora Sapphire', 'sapphire'),
('Light Medallion', 'lightmedallion'),
('Forest Medallion', 'forestmedallion'),
('Fire Medallion', 'firemedallion'),
('Water Medallion', 'watermedallion'),
('Shadow Medallion', 'shadowmedallion'),
('Spirit Medallion', 'spiritmedallion')
))
IMG = {
'worldmap': 'overworld', 'key': 'smallkey', 'bosskey': 'bosskey',
'chest_full': 'chest_golden_closed', 'chest_empty': 'chest_golden_open'} | z5-tracker | /z5_tracker-1.2.1-py3-none-any.whl/z5tracker/dungeons/lists.py | lists.py |
# z80
Fast and flexible Z80/i8080 emulator.
[](https://travis-ci.org/kosarev/z80)
## Quick facts
* Implements accurate machine cycle-level emulation.
* Supports undocumented instructions, flags and registers.
* Passes the well-known `cputest`, `8080pre`, `8080exer`,
`8080exm`, `prelim` and `zexall` tests.
* Follows a modular event-driven design for flexible interfacing.
* Employs compile-time polymorphism for zero performance
overhead.
* Cache-friendly implementation without large code switches and
data tables.
* Offers default modules for the breakpoint support and generic
memory.
* Supports multiple independently customized emulator instances.
* Written in strict C++11.
* Does not rely on implementation-defined or unspecified
behavior.
* Single-header implementation.
* Provides a generic Python 3 API and instruments to create
custom bindings.
* MIT license.
## Contents
* [Hello world](#hello-world)
* [Adding memory](#adding-memory)
* [Input and output](#input-and-output)
* [Accessing processor's state](#accessing-processors-state)
* [Modules](#modules)
* [The root module](#the-root-module)
* [State modules](#state-modules)
* [Feedback](#feedback)
## Hello world
```c++
#include "z80.h"
class my_emulator : public z80::z80_cpu<my_emulator> {
public:
typedef z80::z80_cpu<my_emulator> base;
my_emulator() {}
void on_set_pc(z80::fast_u16 pc) {
std::printf("pc = 0x%04x\n", static_cast<unsigned>(pc));
base::on_set_pc(pc);
}
};
int main() {
my_emulator e;
e.on_step();
e.on_step();
e.on_step();
}
```
[hello.cpp](https://github.com/kosarev/z80/blob/master/examples/hello.cpp)
Building:
```shell
$ git clone [email protected]:kosarev/z80.git
$ cmake z80
$ make
$ make test
$ make hello # Or 'make examples' to build all examples at once.
```
Running:
```
$ ./examples/hello
pc = 0x0000
pc = 0x0001
pc = 0x0002
```
In this example we derive our custom emulator class,
`my_emulator`, from a
[mix-in](https://en.wikipedia.org/wiki/Mixin) that implements the
logic and default interfaces necessary to emulate the Zilog Z80
processor.
As you may guess, replacing `z80_cpu` with `i8080_cpu` would give
us a similar Intel 8080 emulator.
The `on_set_pc()` method overrides its default counterpart to
print the current value of the `PC` register before changing it.
For this compile-time polymorphism to be able to do its job, we
pass the type of the custom emulator to the processor mix-in as a
parameter.
The `main()` function creates an instance of the emulator and
asks it to execute a few instructions, thus triggering the custom
version of `on_set_pc()`.
The following section reveals what are those instructions and
where the emulator gets them from.
## Adding memory
Every time the CPU emulator needs to access memory, it calls
`on_read()` and `on_write()` methods.
Their default implementations do not really access any memory;
`on_read()` simply returns `0x00`, meaning the emulator in the
example above actually executes a series of `nop`s, and
`on_write()` does literally nothing.
Since both the reading and writing functions are considered by
the `z80::z80_cpu` class to be handlers, which we know because
they have the `on` preposition in their names, we can use the
same technique as with `on_set_pc()` above to override the
default handlers to actually read and write something.
```c++
class my_emulator : public z80::z80_cpu<my_emulator> {
public:
...
fast_u8 on_read(fast_u16 addr) {
assert(addr < z80::address_space_size);
fast_u8 n = memory[addr];
std::printf("read 0x%02x at 0x%04x\n", static_cast<unsigned>(n),
static_cast<unsigned>(addr));
return n;
}
void on_write(fast_u16 addr, fast_u8 n) {
assert(addr < z80::address_space_size);
std::printf("write 0x%02x at 0x%04x\n", static_cast<unsigned>(n),
static_cast<unsigned>(addr));
memory[addr] = static_cast<least_u8>(n);
}
private:
least_u8 memory[z80::address_space_size] = {
0x21, 0x34, 0x12, // ld hl, 0x1234
0x3e, 0x07, // ld a, 7
0x77, // ld (hl), a
};
};
```
[adding_memory.cpp](https://github.com/kosarev/z80/blob/master/examples/adding_memory.cpp)
Output:
```
read 0x21 at 0x0000
pc = 0x0001
read 0x34 at 0x0001
read 0x12 at 0x0002
pc = 0x0003
read 0x3e at 0x0003
pc = 0x0004
read 0x07 at 0x0004
pc = 0x0005
read 0x77 at 0x0005
pc = 0x0006
write 0x07 at 0x1234
```
## Input and output
Aside of memory, another major way the processors use to
communicate with the outside world is via input and output ports.
If you read the previous sections, it's now easy to guess that
there is a couple of handlers that do that.
These are `on_input()` and `on_output()`.
Note that the handlers have different types of parameters that
store the port address, because i8080 only supports 256 ports
while Z80 extends that number to 64K.
```c++
// i8080_cpu
fast_u8 on_input(fast_u8 port)
void on_output(fast_u8 port, fast_u8 n)
// z80_cpu
fast_u8 on_input(fast_u16 port)
void on_output(fast_u16 port, fast_u8 n)
```
The example:
```c++
class my_emulator : public z80::z80_cpu<my_emulator> {
public:
...
fast_u8 on_input(fast_u16 port) {
fast_u8 n = 0xfe;
std::printf("input 0x%02x from 0x%04x\n", static_cast<unsigned>(n),
static_cast<unsigned>(port));
return n;
}
void on_output(fast_u16 port, fast_u8 n) {
std::printf("output 0x%02x to 0x%04x\n", static_cast<unsigned>(n),
static_cast<unsigned>(port));
}
private:
least_u8 memory[z80::address_space_size] = {
0xdb, // in a, (0xfe)
0xee, 0x07, // xor 7
0xd3, // out (0xfe), a
};
};
```
[input_and_output.cpp](https://github.com/kosarev/z80/blob/master/examples/input_and_output.cpp)
## Accessing processor's state
Sometimes it's necessary to examine and/or alter the current
state of the CPU emulator and do that in a way that is
transparent to the custom code in overridden handlers.
For this purpose the default state interface implemented in the
`i8080_state<>` and `z80_state<>` classes provdes a number of
getters and setters for registers, register pairs, interrupt
flip-flops and other fields constituting the internal state of
the emulator.
By convention, calling such functions does not fire up any
handlers. The example below demonstrates a typical usage.
Note that there are no such accessors for memory as it is
external to the processor emulators and they themselves have to
use handlers, namely, the `on_read()` and `on_write()` ones, to
deal with memory.
```c++
class my_emulator : public z80::z80_cpu<my_emulator> {
public:
...
void on_step() {
std::printf("hl = %04x\n", static_cast<unsigned>(get_hl()));
base::on_step();
// Start over on every new instruction.
set_pc(0x0000);
}
```
[accessing_state.cpp](https://github.com/kosarev/z80/blob/master/examples/accessing_state.cpp)
## Modules
By overriding handlers we can extend and otherwise alter the
default behavior of CPU emulators.
That's good, but what do we do if it's not enough?
For example, what if the default representation of the
processor's internal state doesn't fit the needs of your
application?
Say, you might be forced to follow a particular order of
registers or you just want to control the way they are packed in
a structure because there's some external binary API to be
compatible with.
Or, what if you don't need to emulate the whole processor's
logic, and just want to check if a given sequence of bytes forms
a specific instruction?
That's where modules come into play.
To understand what they are and how to use them, let's take a
look at the definitions of the emulator classes and see what's
under the hood.
```c++
template<typename D>
class i8080_cpu : public i8080_executor<i8080_decoder<i8080_state<root<D>>>>
{};
template<typename D>
class z80_cpu : public z80_executor<z80_decoder<z80_state<root<D>>>>
{};
```
Each of these classes is no more than a stack of a few other
mix-ins.
The `root<>` template provides helpers that make it possible to
call handlers of the most derived class in the heirarchy, `D`,
which is why it takes that class as its type parameter.
It also contains dummy implementations of the standard handlers,
such as `on_output()`, so you don't have to define them when you
don't need them.
`i8080_state<>` and `z80_state<>` have been mentioned in the
previous section as classes that define transparent accessors to
the processor state, e.g., `set_hl()`.
They also define corresponding handlers, like `on_set_hl()`, that
other modules use to inspect and modify the state.
`i8080_decoder<>` and `z80_decoder<>` modules analyze op-codes
and fire up handlers for specific instructions, e.g, `on_halt()`.
Finally, the job of `i8080_executor<>` and `z80_executor<>` is to
implement handlers like `on_halt()` to actually execute
corresponding instructions.
The convention is that modules shall communicate with each other
only via handlers.
Indeed, if they would call the transparent accessors or refer to
data fields directly, then those accessors wouldn't be
transparent anymore and handlers would never be called.
This also means that modules are free to define transparent
accessors in a way that seems best for their purpose or even not
define them at all.
All and any of the standard modules can be used and customized
independently of each other.
Moreover, all and any of the modules can be replaced with custom
implementations.
New modules can be developed and used separately or together with
the standard ones.
In all cases the only requirement is to implement handlers other
modules rely on.
## The root module
```c++
template<typename D>
class root {
public:
typedef D derived;
...
fast_u8 on_read(fast_u16 addr) {
unused(addr);
return 0x00;
}
void on_write(fast_u16 addr, fast_u8 n) {
unused(addr, n);
}
...
protected:
const derived &self() const{ return static_cast<const derived&>(*this); }
derived &self() { return static_cast<derived&>(*this); }
};
```
The main function of the root module is to define the `self()`
method that other modules can use to call handlers. For example,
a decoder could do `self().on_ret()` whenever it runs into a
`ret` instruction.
Aside of that, the module contains dummy implementations of the
standard handlers that do nothing or, if they have to return
something, return some default values.
## State modules
```c++
template<typename B>
class i8080_state : public internals::cpu_state_base<B> {
public:
...
bool get_iff() const { ... }
void set_iff(bool f) { ... }
...
};
template<typename B>
class z80_state : public internals::cpu_state_base<z80_decoder_state<B>> {
public:
...
void exx_regs() { ... }
void on_exx_regs() { exx_regs(); }
...
};
```
The purpose of state modules is to provide handlers to access the
internal state of the emulated CPU.
They also usually store the fields of the state, thus defining
its layout in memory.
Regardless of the way the fields are represented and stored, the
default getting and setting handlers for register pairs use
access handlers for the corresponding 8-bit registers to obtain
or set the 16-bit values.
Furthermore, the low half of the register pair is always
retrieved and set before the high half.
This means that by default handlers for 8-bit registers are
getting called even if originally a value of a register pair they
are part of has been queried.
Custom implementations of processor states, however, are not
required to do so.
```c++
fast_u16 on_get_bc() {
// Always get the low byte first.
fast_u8 l = self().on_get_c();
fast_u8 h = self().on_get_b();
return make16(h, l);
void on_set_bc(fast_u16 n) {
// Always set the low byte first.
self().on_set_c(get_low8(n));
self().on_set_b(get_high8(n));
}
```
Aside of the usual getters and setters for the registers and
flip-flops, both the i8080 and Z80 states have to provide an
`on_ex_de_hl_regs()` handler that exchanges `hl` and `de`
registers the same way the `xchg` and `ex de, hl` do.
And the Z80 state additionally has to have an `on_exx_regs()`
that swaps register pairs just as the `exx` instruction does.
The default swapping handlers do their work by accessing
registers directly, without relying on the getting and setting
handlers, similarly to how silicon implementations of the
processors toggle internal flip-flops demux'ing access to
register cells without actually transferring their values.
Because the CPUs have a lot of similarities, processor-specific
variants of modules usually share some common code in helper base
classes that in turn are defined in the `internal` class.
That class defines entities that are internal to the
implementation of the library.
The client code is therefore supposed to be written as if the
module classes are derived directly from their type parameters,
`B`.
Note that `z80_state` has an additional mix-in in its inheritance
chain, `z80_decoder_state<>`, whereas `i8080_state` is derived
directly from the generic base.
This is because Z80 decoders are generally not stateless objects;
they have to track which of the `IX`, `IY` or `HL` registers has
to be used as the index register for the current instruction.
The decoder state class stores and provides access to that
information.
```c++
template<typename B>
class z80_decoder_state : public B {
public:
...
iregp get_iregp_kind() const { ... }
void set_iregp_kind(iregp r) { ... }
iregp on_get_iregp_kind() const { return get_iregp_kind(); }
void on_set_iregp_kind(iregp r) { set_iregp_kind(r); }
...
};
```
In its simplest form, a custom state module can be a structure defining the
necessary state fields together with corresponding access handlers.
```c++
template<typename B>
struct my_state : public B {
fast_u16 pc;
...
fast_u16 on_get_pc() const { return pc; }
void on_set_pc(fast_u16 n) { pc = n; }
...
// These always have to be explicitly defined.
void on_ex_de_hl_regs() {}
void on_ex_af_alt_af_regs() {}
void on_exx_regs() {}
};
```
[custom_state.cpp](https://github.com/kosarev/z80/blob/master/examples/custom_state.cpp)
## Feedback
Any notes on overall design, improving performance and testing
approaches are highly appreciated.
Please file an issue or use the email given at
<https://github.com/kosarev>.
Thanks!
| z80 | /z80-1.0b1.tar.gz/z80-1.0b1/README.md | README.md |
# z80dis
A python disassembler library for Z80
# Use
```
>>> from z80dis import z80
>>> z80.disasm(b'\xCB\xE7', 0)
'set 4,a'
```
Or, if you'd like access to the instruction internals, like opcode identifier, length, and operands:
```
>>> decoded = z80.decode(b'\xCB\xE7', 0)
>>> decoded.op
<OP.SET: 58>
>>> decoded.operands[0]
(<OPER_TYPE.IMM: 45>, 4)
>>> decoded.operands[1]
(<OPER_TYPE.REG_A: 1>,)
>>> decoded.len
2
```
The decoded structure can be supplied to disasm() to make a string:
```
>>> z80.disasm(decoded)
'set 4,a'
```
| z80dis | /z80dis-1.0.6.tar.gz/z80dis-1.0.6/README.md | README.md |
import os
from threading import Thread
import pygame
from zEvent import Event
pygame.mixer.init()
class Sound:
def __init__(self, filename: str):
"""
The __init__ function is called automatically every time the class is instantiated.
It sets up all of the attributes in the instance, and then initializes them to their default values (if any).
It's important to remember that all methods in a class will take self as their first argument, just like __init__ does.
Parameters
----------
self
Reference the object itself
filename:str
Store the filename of the sound file
Returns
-------
Nothing
Doc Author
----------
zCloze
"""
if not os.path.exists(filename):
raise FileNotFoundError(f"Not found '{filename}' file at '{os.getcwd()}'")
self.__pgs = pygame.mixer.Sound(filename)
self.OnPlay = Event() # DONE
self.OnStart = Event() # DONE
self.OnStop = Event() # DONE
self.OnPause = Event() # DONE
self.OnResume = Event()
self.OnEnd = Event() # DONE
def Play(self, loops: int = 1):
"""
The Play function plays the sound.
Parameters
----------
self
Reference the object instance
loops:int=1
Tell the play function how many times to loop the sound
INFINITY_LOOPS for infinite loops
Returns
-------
Nothing
Doc Author
----------
zCloze
"""
def play():
self._pgs_played = self.__pgs.play(loops)
self.OnStart.Active()
while self._pgs_played.get_busy(): self.OnPlay.Active()
self.OnEnd.Active()
Thread(target=play).start()
self._pgs_played: pygame.mixer.Channel
def Stop(self):
"""
The Stop function stops the sound.
Parameters
----------
self
Access the attributes and methods of the class in python
Returns
-------
Nothing
Doc Author
----------
zCloze
"""
self.__pgs.stop()
self.OnStop.Active()
def Pause(self):
"""
The Pause function pauses the sound.
Parameters
----------
self
Access the object's attributes
Returns
-------
Nothing
Doc Author
----------
zCloze
"""
if not hasattr(self, "_pgs_played"):
raise Exception(f"Pause called without play")
self._pgs_played.pause()
self.OnPause.Active()
def Resume(self):
"""
The Resume function resumes the sound from where it was paused.
Parameters
----------
self
Access the class attributes
Returns
-------
Nothing
Doc Author
----------
zCloze
"""
if not hasattr(self, "_pgs_played"):
raise Exception(f"Resume called without play")
self._pgs_played.unpause()
self.OnResume.Active()
INFINITY_LOOPS = -1 | zAudio-zCloze | /zaudio_zcloze-0.0.1.tar.gz/zaudio_zcloze-0.0.1/src/zAudio_zCloze/__init__.py | __init__.py |
zChainer
========
scikit-learn like interface and stacked autoencoder for chainer
Requirements
------------
- numpy
- scikit-learn
- chainer >= 1.5
Installation
------------
::
pip install zChainer
Usage
-----
Autoencoder
~~~~~~~~~~~
.. code:: python
import numpy as np
import chainer.functions as F
import chainer.links as L
from chainer import ChainList, optimizers
from zChainer import NNAutoEncoder, utility
data = (..).astype(np.float32)
encoder = ChainList(
L.Linear(784, 200),
L.Linear(200, 100))
decoder =ChainList(
L.Linear(200, 784),
L.Linear(100, 200))
# You can set your own forward function. Default is as below.
#def forward(self, x):
# h = F.dropout(F.relu(self.model[0](x)))
# return F.dropout(F.relu(self.model[1](h)))
#
#NNAutoEncoder.forward = forward
ae = NNAutoEncoder(encoder, decoder, optimizers.Adam(), epoch=100, batch_size=100,
log_path="./ae_log_"+utility.now()+".csv", export_path="./ae_"+utility.now()+".model")
ae.fit(data)
Training and Testing
~~~~~~~~~~~~~~~~~~~~
.. code:: python
import numpy as np
import chainer.functions as F
import chainer.links as L
from chainer import ChainList, optimizers
from zChainer import NNManager, utility
import pickle
X_train = (..).astype(np.float32)
y_train = (..).astype(np.int32)
X_test = (..).astype(np.float32)
y_test = (..).astype(np.int32)
# Create a new network
model = ChainList(L.Linear(784, 200), L.Linear(200, 100), L.Linear(100, 10))
# or load a serialized model
#f = open("./ae_2015-12-01_11-26-45.model")
#model = pickle.load(f)
#f.close()
#model.add_link(L.Linear(100,10))
def forward(self, x):
h = F.relu(self.model[0](x))
h = F.relu(self.model[1](h))
return F.relu(self.model[2](h))
def output(self, y):
y_trimed = y.data.argmax(axis=1)
return np.array(y_trimed, dtype=np.int32)
NNManager.forward = forward
NNManager.output = output
nn = NNManager(model, optimizers.Adam(), F.softmax_cross_entropy,
epoch=100, batch_size=100,
log_path="./training_log_"+utility.now()+".csv")
nn.fit(X_train, y_train, is_classification=True)
nn.predict(X_test, y_test)
| zChainer | /zChainer-0.3.2.tar.gz/zChainer-0.3.2/README.txt | README.txt |
.. image:: https://readthedocs.org/projects/zcluster/badge/?version=latest
**zCluster** is a package for measuring galaxy cluster photometric redshifts using
data from large public surveys. It can also produce photometric redshift estimates
and galaxy density maps for any point in the sky using the included `zField` tool.
* **Documentation:** https://zcluster.readthedocs.io
* **License:** `GPL v3 <COPYING>`_
* **Authors:** Matt Hilton, with contributions from Kabelo Kesebonye, Phumlani Phakathi,
Denisha Pillay, and Damien Ragavan (not all reflected on GitHub).
* **Installation:** ``pip install zCluster``
* **Support:** Please use the `GitHub issues page <https://github.com/ACTCollaboration/zCluster/issues>`_,
and/or contact `Matt Hilton <mailto:[email protected]>`_.
**zCluster** has built-in support for querying large photometric surveys - currently:
* SDSS (DR7 - DR12)
* SDSS Stripe 82 (from SDSS DR7)
* CFHTLenS
* PS1 (DR2)
* DECaLS (DR8 - DR10)
* DES (DR1, DR2 and Y3 internal)
* KiDS (DR4)
For details of the algorithm, its performance, and the output of the code, refer to
`Hilton et al. (2018) <https://ui.adsabs.harvard.edu/abs/2018ApJS..235...20H/abstract>`_, which presents
results based on SDSS, S82, and CFHTLenS, and/or
`Hilton et al. (2021) <https://ui.adsabs.harvard.edu/abs/2021ApJS..253....3H/abstract>`_, which presents
results based on DECaLS DR8. The other surveys listed above are work in progress (so use with caution; PS1 in
particular is problematic). `Pillay et al. (2021) <https://ui.adsabs.harvard.edu/abs/2021arXiv211104340P/abstract>`_
presents the first use of the package for producing projected galaxy density maps.
If you find **zCluster** useful in your work, please cite whichever one
of the above papers that you think is appropriate (together, of course, with the appropriate papers
for the optical/IR survey used).
**zCluster** can also run on user-supplied .fits table photometric catalogs, provided that they have columns
named ``ID``\ , ``RADeg``\ , ``decDeg``\ , and magnitude column names in the form ``u_MAG_AUTO``\ ,
``u_MAGERR_AUTO`` etc..
**zCluster** is under active development, and not all documentation is up to date. The package also
contains some experimental features that are not necessarily well tested.
| zCluster | /zCluster-0.3.0.tar.gz/zCluster-0.3.0/README.rst | README.rst |
**zCluster** is written in `Python <https://www.python.org/>`_ (3.6+), and requires the
following additional modules to be installed (currently used versions are given in
brackets, later versions also probably work):
* numpy (1.13.3)
* scipy (1.3.0)
* matplotlib (2.1.1)
* astLib (0.11.7)
* astropy (4.0)
* mastcasjobs (for PS1; https://github.com/rlwastro/mastcasjobs)
* casjobs (for PS1; https://github.com/dfm/casjobs)
To run on DES photometry, there is an additional dependency:
* easyaccess (1.4.5)
If you want to run the code in parallel, you will also need:
* mpi4py (3.0.0)
Note that if you want to run the code on a cluster, the bottleneck will be fetching the photometric catalogs
over the internet. The MPI mode is still useful though on any machine with multiple cores.
The latest tagged version of **zCluster** can be installed using ``pip``:
.. code-block::
pip install zCluster
Other dependencies will be installed by ``pip``.
You may also install using the standard ``setup.py`` script, e.g., as root:
.. code-block::
sudo python setup.py install
Alternatively,
.. code-block::
python setup.py install --user
will install ``zCluster`` under ``$HOME/.local`` (on Ubuntu), and in some other default location on Mac.
You can also use the ``--prefix`` option, e.g.,
.. code-block::
python setup.py install --prefix=$HOME/local
and then add ``$HOME/local/bin`` to $PATH, and e.g., ``$HOME/local/lib/python3.6/site-packages`` to
$PYTHONPATH (adjust the path according to your Python version number).
.. code-block::
export PATH=$HOME/local/bin:$PATH
export PYTHONPATH=$HOME/local/lib/python3.6/site-packages:$PYTHONPATH
If **zCluster** has installed correctly, then you should find its command line tools are available, for
example,
.. code-block::
zCluster -h
should display a helpful message about the command-line options for the main ``zCluster`` command.
| zCluster | /zCluster-0.3.0.tar.gz/zCluster-0.3.0/INSTALL.rst | INSTALL.rst |
from __future__ import print_function
try:
import configparser
except ImportError:
import ConfigParser as configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg, "r") as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
LONG_VERSION_PY['git'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.18) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version",
"date": None}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass():
"""Get the custom setuptools/distutils subclasses used by Versioneer."""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/warner/python-versioneer/issues/52
cmds = {}
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if 'py2exe' in sys.modules: # py2exe enabled?
try:
from py2exe.distutils_buildexe import py2exe as _py2exe # py3
except ImportError:
from py2exe.build_exe import py2exe as _py2exe # py2
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (EnvironmentError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1) | zCluster | /zCluster-0.3.0.tar.gz/zCluster-0.3.0/versioneer.py | versioneer.py |
Here we provide a quick example of running **zCluster** on part of
the `Burenin et al. (2007) <https://ui.adsabs.harvard.edu/abs/2007ApJS..172..561B/abstract>`_ 400SD cluster catalog.
.. note:: The catalog files needed for this tutorial can be
found in the `examples/ <https://github.com/ACTCollaboration/zCluster/tree/master/examples/>`_
directory of the **zCluster** source distribution.
There are two cluster catalog files provided, ``400SDAll.csv`` and ``400SDSmall.csv``. The latter contains just the
first 20 rows of the former, and is enough to check that the code is working.
To run :ref:`zClusterCommand` using galaxy photometry from SDSS DR12, use:
.. code-block::
zCluster 400SDSmall.csv SDSSDR12
To plot a comparison of the results with the spectroscopic redshifts in the 400SD catalog, run:
.. code-block::
zClusterComparisonPlot 400SDSmall.csv zCluster_400SDSmall_SDSSDR12.fits
These examples use the default options - to see others, run each command with the ``-h`` flag.
| zCluster | /zCluster-0.3.0.tar.gz/zCluster-0.3.0/examples/README_EXAMPLE.rst | README_EXAMPLE.rst |
# zCurve
[](https://zenodo.org/badge/latestdoi/367796024)
zCurve is a Python module with methods to efficiently map multidimensional data to a single dimension while preserving locality of the data points.
This mapping is commonly known as Z-order, Lebesgue curve, Morton space filling curve, Morton order or Morton code.

*Image by David Eppstein, 2008*
The Morton code of a multi-dimensional data point is calculated by bitwise interlacing the binary representations of its coordinate values.
zCurve provides two functions for handling the encoding and decoding of data points with _arbitrary_ dimensionality and _arbitrary_ coordinate size:
```python
interlace(*data_point: int, dims: int = None, bits_per_dim: int = None) -> int
```
```python
deinterlace(code_point: int, dims: int = 3) -> List[int]
```
When handling large multi-dimensional dataset (n > 10.000), zCurve offers some simple but convenient means of parallelizing the Morton encoding and decoding:
```python
par_interlace(data_points: List[List[int]], dims: int = None, bits_per_dim: int = None) -> List[int]
```
```python
par_deinterlace(code_points: List[int], dims: int = 3) -> List[List[int]]
```
Given the Morton codes of a multi-dimensional dataset, we can perform multi-dimensional range search using only a one-dimensional data structure.
For range searching, zCurve offers two functions for calculating the necesaary `LITMAX` and `BIGMIN` values:
```python
prev_morton(code_point: int, rmin_code: int, rmax_code: int, dims: int = 3) -> int
```
```python
next_morton(code_point: int, rmin_code: int, rmax_code: int, dims: int = 3) -> int
```
This implementation is based on the following paper
> Tropf, Herbert, and Helmut Herzog. "Multidimensional Range Search in Dynamically Balanced Trees." ANGEWANDTE INFO. 2 (1981): 71-77.
and it makes heavy use of the excellent [gmpy2 module](https://gmpy2.readthedocs.io/en/latest/).
## Installation
```bash
pip install zCurve
```
## Usage
### Basics
````python
import zCurve as z
````
imports the module.
```python
code = z.interlace(2,16,8)
```
interlaces the 3D data point `(2,16,8)` into Morton code point `10248`.
When explicitly specify dimensionality and bits per dimension of your data point
```python
code = z.interlace(2,16,8, dims=3, bits_per_dim=5)
```
performance will benefit substantially.
```python
z.deinterlace(4711)
```
deinterlaces the Morton code point `4711` into the 3D data point `(29,1,3)`.
### Parallel interlacing/deinterlacing
Given a potentially large list of n-dimensional `data_points`
````python
from random import randrange
bit_size = 16
max_val = 2**bit_size - 1
no_samples = 10**6
data_points = [(randrange(0, max_val), randrange(0, max_val), randrange(0, max_val)) for i in range(no_samples)]
````
we can speed up things by using `par_interlace` and `par_deinterlace`
```python
morton_codes = z.par_interlace(data_points, dims=3, bits_per_dim=16)
data_points == z.par_deinterlaces(morton_codes, dims=3)
````
### Range searching

*Image by Tropf and Herzog, 1981*
When range searching, we can prune the search space by calculating `BIGMIN` (aka "GetNextZ-address") and `LITMAX` (aka "GetPrevZ-address") values.
```python
point = z.interlace(6, 3, dims=2) # => 30
rmin = z.interlace(5, 3, dims=2) # => 27
rmax = z.interlace(10, 5, dims=2) # => 102
BIGMIN = z.next_morton(point, rmin, rmax, dims=2) # => 31
LITMAX = z.prev_morton(point, rmin, rmax, dims=2) # => 27
```
In addition, we can easily check if a given Morton code point is within a specified range
```python
z.in_range(58,27,102, dims=2) # => False
z.in_range(49,27,102, dims=2) # => True
```
## Citation
```bibtex
@misc{rmrschub_2021_zCurve,
author = {René Schubotz},
title = {{zCurve: Multi-dimensional indexing using Morton space filling curves.}},
month = may,
year = 2021,
doi = {10.5281/zenodo.4777584},
version = {0.0.4},
publisher = {Zenodo},
url = {https://github.com/rmrschub/zCurve}
}
```
## License
<a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by-nc-sa/4.0/80x15.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/4.0/">Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License</a>.
| zCurve | /zCurve-0.0.4.tar.gz/zCurve-0.0.4/README.md | README.md |
Changelog
=========
4.3 (2022-12-18)
----------------
- Sort imports with isort.
- Add support for Python 3.11.
4.2 (2021-10-22)
----------------
- Add support for Python 3.8, 3.9 and 3.10.
- Drop support for Python 3.4.
- Add an exception for the HTTP status code 418.
- Don't override the `content-type` header if already set.
(`#12 <https://github.com/zopefoundation/zExceptions/pull/12>`_)
4.1 (2018-10-05)
----------------
- Add support for Python 3.7.
4.0 (2018-01-27)
----------------
- Drop support for string exceptions.
3.6.1 (2017-05-17)
------------------
- Increase Python 3 compatibility
3.6 (2017-02-05)
----------------
- Add realm as an argument to unauthorized exceptions, its presence
causing a `WWW-Authenticate` header to be emitted.
- Set `location` header during `__init__` of redirect exceptions.
3.5 (2017-02-05)
----------------
- Drop support for Python 3.3, add support for Python 3.6.
- Use `str(self)` as detail if it is not set.
- Add a `setHeader` method to add a response header to an HTTPException.
- `upgradeException` now also supports finding an HTTPException class
with the same name as a non-HTTPException class.
3.4 (2016-09-08)
----------------
- Use `HTTPException.body_template` when title and detail are set.
- Add new title and detail attributes to HTTPException.
3.3 (2016-08-06)
----------------
- Add exception classes for all HTTP status codes.
3.2 (2016-07-22)
----------------
- Implement basic subset of Response features in HTTPException class.
3.1 (2016-07-22)
----------------
- Mark exceptions with appropriate zope.publisher interfaces.
- Add a new common base class `zExceptions.HTTPException` to all exceptions.
3.0 (2016-04-03)
----------------
- Add compatibility with PyPy and Python 3.
- Arguments to the Unauthorized exception are assumed to be utf8-encoded
if they are bytes.
2.13.0 (2010-06-05)
-------------------
- Released as separate package.
| zExceptions | /zExceptions-4.3.tar.gz/zExceptions-4.3/CHANGES.rst | CHANGES.rst |
Changelog
=========
3.1 (2020-11-18)
----------------
- Add support for Python 3.6 up to 3.9.
- Drop support for Python 3.3 and 3.4.
- Flake8 the code.
3.0 (2016-04-03)
----------------
- Add coverage testing.
- Add Python 3.3 - 3.5 compatibility.
- Declare currently-supported Python versions, and test them.
- Normalize package structure (``README.rst``, ``CHANGES.rst``). Synthesize
package description from README.rst and CHANGES.rst.
- Use nose for testing instead of zope.testrunner and test
for 100% test coverage.
2.12.0 (2012-08-30)
-------------------
- Rely on refactored base class from ``ZConfig`` for testing logging.
| zLOG | /zLOG-3.1.tar.gz/zLOG-3.1/CHANGES.rst | CHANGES.rst |
.. image:: https://github.com/zopefoundation/zLOG/workflows/tests/badge.svg
:target: https://github.com/zopefoundation/zLOG/actions?query=workflow%3Atests
.. image:: https://coveralls.io/repos/github/zopefoundation/zLOG/badge.svg?branch=master
:target: https://coveralls.io/github/zopefoundation/zLOG?branch=master
.. image:: https://img.shields.io/pypi/v/zLOG.svg
:target: https://pypi.org/project/zLOG/
:alt: Current version on PyPI
.. image:: https://img.shields.io/pypi/pyversions/zLOG.svg
:target: https://pypi.org/project/zLOG/
:alt: Supported Python versions
``zLOG``
========
This package provides a general logging facility that, at this point,
is just a small shim over Python's logging module. Therefore, unless
you need to support a legacy package from the Zope 2 world, you're
probably better off using Python's logging module.
| zLOG | /zLOG-3.1.tar.gz/zLOG-3.1/README.rst | README.rst |
<!-- SPDX-License-Identifier: CC-BY-4.0 -->


# Feilong
## Description
Feilong is a development sdk for managing z/VM. It provides a set of APIs to operate z/VM including guest, image, network, volume etc.
Just like os-win for nova hyperv driver and oslo.vmware for nova vmware driver, Feilong is for nova z/VM driver and other z/VM related openstack driver such as neutron, ceilometer.
## Quickstart
Please refer to [Quick Start Guide](https://cloudlib4zvm.readthedocs.io/en/latest/quickstart.html).
## Documentation
Please refer to [Documentation of Feilong](https://cloudlib4zvm.readthedocs.io/en/latest/index.html).
## License
This package is licensed under the [Apache 2.0 License](LICENSE)
## Bug reporting
If you encounter any problem with this package, please open a bug against
[cloud connector issue tracker](https://bugs.launchpad.net/python-zvm-sdk/+bug)
## Governance
Feilong is a hosted project at the [Open Mainframe Project](https://openmainframeproject.com), and is openly governed as defined in [GOVERNANCE.md](GOVERNANCE.md).
----
<a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/80x15.png" /></a><br />Documentation license: <a rel="license" href="http://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</a>.
| zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/README.md | README.md |
### Welcome
We welcome contributions to python-zvm-sdk!
### Repository
The repository for python-zvm-sdk on GitHub:
https://github.com/openmainframeproject/python-zvm-sdk
### Reporting bugs
If you are a user and you find a bug, please submit a [bug](https://bugs.launchpad.net/python-zvm-sdk). Please try to provide sufficient information for someone else to reproduce the issue. One of the project's maintainers should respond to your issue within 24 hours. If not, please bump the issue and request that it be reviewed.
### Fixing bugs
Review the [bug list](https://bugs.launchpad.net/python-zvm-sdk) and find something that interests you.
We are using the [GerritHub](https://review.gerrithub.io/) process to manage code contributions.
To work on something, whether a new feature or a bugfix:
## 1. Clone python-zvm-sdk locally
```
git clone https://github.com/openmainframeproject/python-zvm-sdk.git
```
## 2. Add the GerritHub repository as a remote as gerrit
```
git remote add gerrit ssh://<username>@review.gerrithub.io:29418/openmainframeproject/python-zvm-sdk
```
Where ```<username>``` is your GerritHub username.
And, you should add the public key of your workstation into your GerritHub SSH public keys.
## 3. Create a branch
Create a descriptively-named branch off of your cloned repository
```
cd python-zvm-sdk
git checkout -b fix-bug-xxxx
```
## 4. Commit your code
Commit to that branch locally
## 5. Commit messages
Commit messages must have a short description no longer than 50 characters followed by a blank line and a longer, more descriptive message that includes reference to issue(s) being addressed so that they will be automatically closed on a merge e.g. ```Closes #1234``` or ```Fixes #1234```.
## 6. Run checks
Run checks via issue:
```
tox -v
```
## 7. Once all checks passed, you can submit your change for review:
```
git review <branch-name>
```
## 8. Any code changes that affect documentation should be accompanied by corresponding changes (or additions) to the documentation and tests. This will ensure that if the merged PR is reversed, all traces of the change will be reversed as well.
| zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/CONTRIBUTING.md | CONTRIBUTING.md |
import abc
import re
import shutil
import uuid
import six
import threading
import os
from zvmsdk import config
from zvmsdk import constants
from zvmsdk import database
from zvmsdk import dist
from zvmsdk import exception
from zvmsdk import log
from zvmsdk import smtclient
from zvmsdk import utils as zvmutils
from zvmsdk import vmops
from zvmsdk import utils
_VolumeOP = None
CONF = config.CONF
LOG = log.LOG
# instance parameters:
NAME = 'name'
OS_TYPE = 'os_type'
# volume parameters:
SIZE = 'size'
TYPE = 'type'
LUN = 'lun'
# connection_info parameters:
ALIAS = 'alias'
PROTOCOL = 'protocol'
FCPS = 'fcps'
WWPNS = 'wwpns'
DEDICATE = 'dedicate'
_LOCK_RESERVE_FCP = threading.RLock()
def get_volumeop():
global _VolumeOP
if not _VolumeOP:
_VolumeOP = VolumeOperatorAPI()
return _VolumeOP
@six.add_metaclass(abc.ABCMeta)
class VolumeOperatorAPI(object):
"""Volume operation APIs oriented towards SDK driver.
The reason to design these APIs is to facilitate the SDK driver
issuing a volume related request without knowing details. The
details among different distributions, different instance status,
different volume types and so on are all hidden behind these APIs.
The only thing the issuer need to know is what it want to do on
which targets.
In fact, that's an ideal case. In the real world, something like
connection_info still depends on different complex and the issuer
needs to know how to deal with its case. Even so, these APIs can
still make things much easier.
"""
_fcp_manager_obj = None
def __init__(self):
if not VolumeOperatorAPI._fcp_manager_obj:
VolumeOperatorAPI._fcp_manager_obj = FCPVolumeManager()
self._volume_manager = VolumeOperatorAPI._fcp_manager_obj
def attach_volume_to_instance(self, connection_info):
self._volume_manager.attach(connection_info)
def detach_volume_from_instance(self, connection_info):
self._volume_manager.detach(connection_info)
def volume_refresh_bootmap(self, fcpchannel, wwpn, lun,
wwid='',
transportfiles='', guest_networks=None, fcp_template_id=None):
return self._volume_manager.volume_refresh_bootmap(fcpchannel, wwpn,
lun, wwid=wwid,
transportfiles=transportfiles,
guest_networks=guest_networks,
fcp_template_id=fcp_template_id)
def get_volume_connector(self, assigner_id, reserve,
fcp_template_id=None, sp_name=None):
return self._volume_manager.get_volume_connector(
assigner_id, reserve, fcp_template_id=fcp_template_id,
sp_name=sp_name)
def check_fcp_exist_in_db(self, fcp, raise_exec=True):
return self._volume_manager.check_fcp_exist_in_db(fcp, raise_exec)
def get_fcp_usage(self, fcp):
return self._volume_manager.get_fcp_usage(fcp)
def set_fcp_usage(self, assigner_id, fcp, reserved, connections,
fcp_template_id):
return self._volume_manager.set_fcp_usage(fcp, assigner_id,
reserved, connections,
fcp_template_id)
def create_fcp_template(self, name, description: str = '',
fcp_devices: str = '',
host_default: bool = False,
default_sp_list: list = [],
min_fcp_paths_count: int = None):
return self._volume_manager.fcp_mgr.create_fcp_template(
name, description, fcp_devices, host_default, default_sp_list,
min_fcp_paths_count)
def edit_fcp_template(self, fcp_template_id, name=None,
description=None, fcp_devices=None,
host_default=None, default_sp_list=None,
min_fcp_paths_count=None):
return self._volume_manager.fcp_mgr.edit_fcp_template(
fcp_template_id, name=name, description=description,
fcp_devices=fcp_devices, host_default=host_default,
default_sp_list=default_sp_list, min_fcp_paths_count=min_fcp_paths_count)
def get_fcp_templates(self, template_id_list=None, assigner_id=None,
default_sp_list=None, host_default=None):
return self._volume_manager.fcp_mgr.get_fcp_templates(
template_id_list, assigner_id, default_sp_list, host_default)
def get_fcp_templates_details(self, template_id_list=None, raw=False,
statistics=True, sync_with_zvm=False):
return self._volume_manager.fcp_mgr.get_fcp_templates_details(
template_id_list, raw=raw, statistics=statistics,
sync_with_zvm=sync_with_zvm)
def delete_fcp_template(self, template_id):
return self._volume_manager.fcp_mgr.delete_fcp_template(template_id)
@six.add_metaclass(abc.ABCMeta)
class VolumeConfiguratorAPI(object):
"""Volume configure APIs to implement volume config jobs on the
target instance, like: attach, detach, and so on.
The reason to design these APIs is to hide the details among
different Linux distributions and releases.
"""
def __init__(self):
self._vmop = vmops.get_vmops()
self._dist_manager = dist.LinuxDistManager()
self._smtclient = smtclient.get_smtclient()
def check_IUCV_is_ready(self, assigner_id):
# Make sure the iucv channel is ready for communication with VM
ready = True
try:
self._smtclient.execute_cmd(assigner_id, 'pwd')
except exception.SDKSMTRequestFailed as err:
if 'UNAUTHORIZED_ERROR' in err.format_message():
# If unauthorized, we must raise exception
errmsg = err.results['response'][0]
msg = ('IUCV failed to get authorization from VM %(vm)s with '
'error %(err)s' % {'vm': assigner_id,
'err': errmsg})
LOG.error(msg)
raise exception.SDKVolumeOperationError(rs=6,
userid=assigner_id,
msg=errmsg)
else:
# In such case, we can continue without raising exception
ready = False
msg = ('Failed to connect VM %(vm)s with error '
'%(err)s, assume it is OFF status '
'and continue' % {'vm': assigner_id,
'err': err.results['response'][0]})
LOG.debug(msg)
return ready
def _get_status_code_from_systemctl(self, assigner_id, command):
"""get the status code from systemctl status
for example, if systemctl status output:
Main PID: 28406 (code=exited, status=0/SUCCESS)
this function will return the 3 behind status=
"""
output = self._smtclient.execute_cmd_direct(assigner_id, command)
exit_code = 0
for line in output['response']:
if 'Main PID' in line:
# the status code start with = and before /FAILURE
pattern = '(?<=status=)([0-9]+)'
ret = re.search(pattern, line)
exit_code = int(ret.group(1))
break
return exit_code
def config_attach(self, fcp_list, assigner_id, target_wwpns, target_lun,
multipath, os_version, mount_point):
LOG.info("Begin to configure volume (WWPN:%s, LUN:%s) on the "
"virtual machine %s with FCP devices "
"%s." % (target_wwpns, target_lun, assigner_id, fcp_list))
linuxdist = self._dist_manager.get_linux_dist(os_version)()
self.configure_volume_attach(fcp_list, assigner_id, target_wwpns,
target_lun, multipath, os_version,
mount_point, linuxdist)
iucv_is_ready = self.check_IUCV_is_ready(assigner_id)
if iucv_is_ready:
# If VM is active rather than shutdown, then restart zvmguestconfigure
# to run punch script(i.e. reader file) in the VM operating system
active_cmds = linuxdist.create_active_net_interf_cmd()
ret = self._smtclient.execute_cmd_direct(
assigner_id, active_cmds,
timeout=CONF.volume.punch_script_execution_timeout)
LOG.info('attach scripts return values: %s' % ret)
if ret['rc'] != 0:
# if return code is 64 means timeout
# no need to check the exist code of systemctl and return
if ret['rc'] == 64:
errmsg = ('Failed to configure volume in the virtual machine '
'%s for volume (WWPN:%s, LUN:%s) '
'because exceed the timeout %s.'
% (assigner_id, target_wwpns, target_lun,
CONF.volume.punch_script_execution_timeout))
LOG.error(errmsg)
raise exception.SDKVolumeOperationError(
rs=8, userid=assigner_id, msg=errmsg)
# get exit code of zvmguestconfigure.service from VM OS,
# the exit code reflects the result of running punch script
get_status_cmd = 'systemctl status zvmguestconfigure.service'
exit_code = self._get_status_code_from_systemctl(
assigner_id, get_status_cmd)
# Attach script exit code explanation:
# 1: failed because multipathd service is not active
# 2: failed because input parameters may have problems
# 3: failed because can not found intersection between input WWPNs and lszfcp output
# 4: failed because no disk file found in the target VM, means no volume shown in the target VM
if exit_code == 1:
errmsg = ('Failed to configure volume because the '
'multipathd service is not active '
'in the target virtual machine'
'(userid:%s).' % assigner_id)
elif exit_code == 2:
errmsg = ('Failed to configure volume because the '
'configuration process terminate early with '
'exit code %s, refer to the /var/log/messages in '
'target virtual machine(userid:%s) for more '
'details.' % (exit_code, assigner_id))
elif exit_code == 3:
errmsg = ('Failed to configure volume because can not '
'find valid target WWPNs for FCP devices %s, '
'refer to the /var/log/messages in the target '
'virtual machine(userid:%s) for more '
'details.' % (fcp_list, assigner_id))
elif exit_code == 4:
errmsg = ('Failed to configure volume because the '
'volume(target WWPN:%s, LUN:%s) did not show up in '
'the target virtual machine(userid:%s), please '
'check Fibre Channel connectivity between '
'the FCP devices(%s) and target WWPN.'
% (target_wwpns, target_lun, assigner_id, fcp_list))
else:
errmsg = ('Failed to configure volume in the target '
'virtual machine(userid:%s) for volume'
'(target WWPN:%s, LUN:%s) on FCP devices %s with '
'exit code: %s, refer to the /var/log/messages '
'in the target virtual machine for more details.'
% (assigner_id, target_wwpns, target_lun,
fcp_list, exit_code))
LOG.error(errmsg)
raise exception.SDKVolumeOperationError(rs=8,
userid=assigner_id,
msg=errmsg)
LOG.info("Configuration of volume (WWPN:%s, LUN:%s) on the "
"target virtual machine %s with FCP devices "
"%s is done." % (target_wwpns, target_lun, assigner_id,
fcp_list))
def config_detach(self, fcp_list, assigner_id, target_wwpns, target_lun,
multipath, os_version, mount_point, connections):
LOG.info("Begin to deconfigure volume (WWPN:%s, LUN:%s) on the "
"virtual machine %s with FCP devices "
"%s." % (target_wwpns, target_lun, assigner_id, fcp_list))
linuxdist = self._dist_manager.get_linux_dist(os_version)()
self.configure_volume_detach(fcp_list, assigner_id, target_wwpns,
target_lun, multipath, os_version,
mount_point, linuxdist, connections)
iucv_is_ready = self.check_IUCV_is_ready(assigner_id)
if iucv_is_ready:
# If VM is active rather than shutdown, then restart zvmguestconfigure
# to run punch script(i.e. reader file) in the VM operating system
active_cmds = linuxdist.create_active_net_interf_cmd()
ret = self._smtclient.execute_cmd_direct(
assigner_id, active_cmds,
timeout=CONF.volume.punch_script_execution_timeout)
LOG.info('detach scripts return values: %s' % ret)
if ret['rc'] != 0:
# if return code is 64 means timeout
# no need to check the exist code of systemctl and return
if ret['rc'] == 64:
errmsg = ('detach script execution in the virtual machine '
'%s for volume (WWPN:%s, LUN:%s) '
'exceed the timeout %s.'
% (assigner_id, target_wwpns, target_lun,
CONF.volume.punch_script_execution_timeout))
LOG.error(errmsg)
raise exception.SDKVolumeOperationError(
rs=9, userid=assigner_id, msg=errmsg)
get_status_cmd = 'systemctl status zvmguestconfigure.service'
exit_code = self._get_status_code_from_systemctl(
assigner_id, get_status_cmd)
# Detach script exit code explanation:
# 1: failed because multipathd service is not active
# 3: failed because can not found intersection between input WWPNs and lszfcp output
# 4: failed because no disk file found in the target VM, means no volume shown in the target VM
# 5: failed to flush a multipath device map
if exit_code == 1:
errmsg = ('Failed to deconfigure volume because the '
'multipathd service is not active '
'in the target virtual machine'
'(userid:%s).' % assigner_id)
elif exit_code == 3:
errmsg = ('Failed to deconfigure volume because can not '
'find valid target WWPNs for FCP devices %s, '
'refer to the /var/log/messages in the target '
'virtual machine(userid:%s) for more '
'details.' % (fcp_list, assigner_id))
elif exit_code == 4:
errmsg = ('Failed to deconfigure volume because the '
'volume(target WWPN:%s, LUN:%s) did not show up in '
'the target virtual machine(userid:%s), please '
'check Fibre Channel connectivity between '
'the FCP devices(%s) and target WWPN.'
% (target_wwpns, target_lun, assigner_id, fcp_list))
elif exit_code == 5:
errmsg = ('Failed to deconfigure volume because '
'getting error when flushing the multipath '
'device maps, refer to the /var/log/messages in '
'the target virtual machine(userid:%s) for '
'more details.' % assigner_id)
else:
errmsg = ('Failed to deconfigure volume in the target '
'virtual machine(userid:%s) for volume'
'(target WWPN:%s, LUN:%s) on FCP devices %s with '
'exit code: %s, refer to the /var/log/messages '
'in the target virtual machine for more details.'
% (assigner_id, target_wwpns, target_lun,
fcp_list, exit_code))
LOG.error(errmsg)
raise exception.SDKVolumeOperationError(rs=9,
userid=assigner_id,
msg=errmsg)
LOG.info("Deconfiguration of volume (WWPN:%s, LUN:%s) on the "
"target virtual machine %s with FCP devices "
"%s is done." % (target_wwpns, target_lun, assigner_id,
fcp_list))
def _create_file(self, assigner_id, file_name, data):
temp_folder = self._smtclient.get_guest_temp_path(assigner_id)
file_path = os.path.join(temp_folder, file_name)
with open(file_path, "w") as f:
f.write(data)
return file_path, temp_folder
def configure_volume_attach(self, fcp_list, assigner_id, target_wwpns,
target_lun, multipath, os_version,
mount_point, linuxdist):
"""new==True means this is first attachment"""
# get configuration commands
fcp_list_str = ' '.join(fcp_list)
target_wwpns_str = ' '.join(target_wwpns)
config_cmds = linuxdist.get_volume_attach_configuration_cmds(
fcp_list_str, target_wwpns_str, target_lun, multipath,
mount_point)
LOG.debug('Got volume attachment configuation cmds for %s,'
'the content is:%s'
% (assigner_id, config_cmds))
# write commands into script file
config_file, config_file_path = self._create_file(assigner_id,
'atvol.sh',
config_cmds)
LOG.debug('Creating file %s to contain volume attach '
'configuration file' % config_file)
# punch file into guest
fileClass = "X"
try:
self._smtclient.punch_file(assigner_id, config_file, fileClass)
finally:
LOG.debug('Removing the folder %s ', config_file_path)
shutil.rmtree(config_file_path)
def configure_volume_detach(self, fcp_list, assigner_id, target_wwpns,
target_lun, multipath, os_version,
mount_point, linuxdist, connections):
# get configuration commands
fcp_list_str = ' '.join(fcp_list)
target_wwpns_str = ' '.join(target_wwpns)
config_cmds = linuxdist.get_volume_detach_configuration_cmds(
fcp_list_str, target_wwpns_str, target_lun, multipath,
mount_point, connections)
LOG.debug('Got volume detachment configuation cmds for %s,'
'the content is:%s'
% (assigner_id, config_cmds))
# write commands into script file
config_file, config_file_path = self._create_file(assigner_id,
'devol.sh',
config_cmds)
LOG.debug('Creating file %s to contain volume detach '
'configuration file' % config_file)
# punch file into guest
fileClass = "X"
try:
self._smtclient.punch_file(assigner_id, config_file, fileClass)
finally:
LOG.debug('Removing the folder %s ', config_file_path)
shutil.rmtree(config_file_path)
class FCP(object):
def __init__(self, init_info):
self._dev_no = None
self._dev_status = None
self._npiv_port = None
self._chpid = None
self._physical_port = None
self._assigned_id = None
self._owner = None
self._parse(init_info)
@staticmethod
def _get_value_from_line(info_line: str):
"""Get the value behind the last colon and transfer to lower cases.
For example, input str is 'xxxxxx: VAlval'
return value will be: valval"""
val = info_line.split(':')[-1].strip().lower()
return val if val else None
def _parse(self, init_info):
"""Initialize a FCP device object from several lines of string
describing properties of the FCP device.
Here is a sample:
FCP device number: 1D1E
Status: Free
NPIV world wide port number: C05076DE330003C2
Channel path ID: 27
Physical world wide port number: C05076DE33002E41
Owner: NONE
The format comes from the response of xCAT, do not support
arbitrary format.
"""
lines_per_item = constants.FCP_INFO_LINES_PER_ITEM
if isinstance(init_info, list) and (len(init_info) == lines_per_item):
for line in init_info:
if 'FCP device number' in line:
self._dev_no = self._get_value_from_line(line)
elif 'Status' in line:
self._dev_status = self._get_value_from_line(line)
elif 'NPIV world wide port number' in line:
self._npiv_port = self._get_value_from_line(line)
elif 'Channel path ID' in line:
self._chpid = self._get_value_from_line(line)
if len(self._chpid) != 2:
LOG.warn("CHPID value %s of FCP device %s is "
"invalid!" % (self._chpid, self._dev_no))
elif 'Physical world wide port numbe' in line:
self._physical_port = self._get_value_from_line(line)
elif 'Owner' in line:
self._owner = self._get_value_from_line(line)
else:
LOG.info('Unknown line found in FCP information:%s', line)
else:
LOG.warning('When parsing FCP information, got an invalid '
'instance %s', init_info)
def get_dev_no(self):
return self._dev_no
def get_dev_status(self):
return self._dev_status
def get_npiv_port(self):
return self._npiv_port
def set_npiv_port(self, new_npiv_port: str):
self._npiv_port = new_npiv_port
def set_physical_port(self, new_phy_port: str):
self._physical_port = new_phy_port
def get_physical_port(self):
return self._physical_port
def get_chpid(self):
return self._chpid
def get_owner(self):
return self._owner
def is_valid(self):
# FIXME: add validation later
return True
def to_tuple(self):
"""Tranfer this object to a tuple type, format is like
(fcp_id, wwpn_npiv, wwpn_phy, chpid, state, owner)
for example:
('1a06', 'c05076de33000355', 'c05076de33002641', '27', 'active',
'user1')
"""
return (self.get_dev_no(), self.get_npiv_port(),
self.get_physical_port(), self.get_chpid(),
self.get_dev_status(), self.get_owner())
class FCPManager(object):
def __init__(self):
# _fcp_path_info store the FCP path mapping index by path no
self._fcp_path_mapping = {}
self.db = database.FCPDbOperator()
self._smtclient = smtclient.get_smtclient()
# Sync FCP DB
self.sync_db()
def sync_db(self):
"""Sync FCP DB with the FCP info queried from zVM"""
with zvmutils.ignore_errors():
self._sync_db_with_zvm()
def _get_all_fcp_info(self, assigner_id, status=None):
fcp_info = self._smtclient.get_fcp_info_by_status(assigner_id, status)
return fcp_info
def increase_fcp_connections(self, fcp_list, assigner_id=None):
"""Increase connections of the given FCP devices
:param fcp_list: (list) a list of FCP devices
:param assigner_id: (str) the userid of the virtual machine
:return fcp_connections: (dict)
fcp_connections example {'1a10': 1, '1b10', 0}
the values are the connections of the FCP device
"""
with database.get_fcp_conn():
fcp_connections = {}
for fcp in fcp_list:
# increase connections by 1
fcp_connections[fcp] = self.db.increase_connections_by_assigner(fcp, assigner_id)
return fcp_connections
def decrease_fcp_connections(self, fcp_list):
"""Decrease connections of FCP devices by 1
:param fcp_list: (list) a list of FCP devices
:return fcp_connections: (dict)
fcp_connections example { '1a10': 1, '1b10', 0 }
the values are the connections of the FCP device
"""
with database.get_fcp_conn():
fcp_connections = {}
for fcp in fcp_list:
try:
LOG.info('Decreasing the connections of FCP device {}'.format(fcp))
# Decrease connections of FCP device by 1
fcp_connections[fcp] = self.db.decrease_connections(fcp)
except exception.SDKObjectNotExistError:
fcp_connections[fcp] = 0
pass
return fcp_connections
def _valid_fcp_devcie_wwpn(self, fcp_list, assigner_id):
"""This method is to
check if the FCP wwpn_npiv or wwpn_phy is empty string,
if yes, raise error"""
for fcp in fcp_list:
fcp_id, wwpn_npiv, wwpn_phy, *_ = fcp
if not wwpn_npiv:
# wwpn_npiv not found in FCP DB
errmsg = ("NPIV WWPN of FCP device %s not found in "
"database." % fcp_id)
LOG.error(errmsg)
raise exception.SDKVolumeOperationError(rs=11,
userid=assigner_id,
msg=errmsg)
# We use initiator to build up zones on fabric, for NPIV, the
# virtual ports are not yet logged in when we creating zones.
# so we will generate the physical virtual initiator mapping
# to determine the proper zoning on the fabric.
# Refer to #7039 for details about avoid creating zones on
# the fabric to which there is no fcp connected.
if not wwpn_phy:
errmsg = ("Physical WWPN of FCP device %s not found in "
"database." % fcp[0])
LOG.error(errmsg)
raise exception.SDKVolumeOperationError(rs=11,
userid=assigner_id,
msg=errmsg)
def reserve_fcp_devices(self, assigner_id, fcp_template_id, sp_name):
"""
Reserve FCP devices by assigner_id and fcp_template_id. In this method:
1. If fcp_template_id is specified, then use it. If not, get the sp
default FCP Multipath Template, if no sp default template, use host default
FCP Multipath Template.
If host default template is not found, then raise error.
2. Get FCP list from db by assigner and fcp_template whose reserve=1
3. If fcp_list is not empty, just to use them.
4. If fcp_list is empty, get one from each path,
then update 'reserved' and 'tmpl_id' in fcp table.
Returns: fcp_list and fcp_template_id.
The fcp list data structure: [(fcp_id, wwpn_npiv, wwpn_phy)].
An example of fcp_list:
[('1c10', 'c12345abcdefg1', 'c1234abcd33002641'),
('1d10', 'c12345abcdefg2', 'c1234abcd33002641')]
"""
with database.get_fcp_conn():
fcp_tmpl_id = fcp_template_id
if not fcp_tmpl_id:
LOG.info("FCP Multipath Template id is not specified when reserving FCP "
"devices for assigner %s." % assigner_id)
if sp_name:
LOG.info("Get the default FCP Multipath Template id for Storage "
"Provider %s " % sp_name)
default_tmpl = self.db.get_sp_default_fcp_template([sp_name])
if not sp_name or not default_tmpl:
LOG.info("Can not find the default FCP Multipath Template id for "
"storage provider %s. Get the host default FCP "
"template id for assigner %s" % (sp_name,
assigner_id))
default_tmpl = self.db.get_host_default_fcp_template()
if default_tmpl:
fcp_tmpl_id = default_tmpl[0][0]
LOG.info("The default FCP Multipath Template id is %s." % fcp_tmpl_id)
else:
errmsg = ("No FCP Multipath Template is specified and "
"no default FCP Multipath Template is found.")
LOG.error(errmsg)
raise exception.SDKVolumeOperationError(rs=11,
userid=assigner_id,
msg=errmsg)
global _LOCK_RESERVE_FCP
_LOCK_RESERVE_FCP.acquire()
try:
# go here, means try to attach volumes
# first check whether this userid already has a FCP device
# get the FCP devices belongs to assigner_id
fcp_list = self.db.get_allocated_fcps_from_assigner(
assigner_id, fcp_tmpl_id)
LOG.info("Previously allocated records %s for "
"instance %s in FCP Multipath Template %s." %
([f['fcp_id'] for f in fcp_list],
assigner_id, fcp_tmpl_id))
if not fcp_list:
# Sync DB to update FCP state,
# so that allocating new FCPs is based on the latest FCP state
self._sync_db_with_zvm()
# allocate new ones if fcp_list is empty
LOG.info("There is no allocated FCP devices for virtual machine %s, "
"allocating new ones." % assigner_id)
if CONF.volume.get_fcp_pair_with_same_index:
'''
If use get_fcp_pair_with_same_index,
then fcp pair is randomly selected from below combinations.
[fa00,fb00],[fa01,fb01],[fa02,fb02]
'''
free_unreserved = self.db.get_fcp_devices_with_same_index(
fcp_tmpl_id)
else:
'''
If use get_fcp_pair,
then fcp pair is randomly selected from below combinations.
[fa00,fb00],[fa01,fb00],[fa02,fb00]
[fa00,fb01],[fa01,fb01],[fa02,fb01]
[fa00,fb02],[fa01,fb02],[fa02,fb02]
'''
free_unreserved = self.db.get_fcp_devices(fcp_tmpl_id)
if not free_unreserved:
return [], fcp_tmpl_id
available_list = free_unreserved
fcp_ids = [fcp[0] for fcp in free_unreserved]
# record the assigner id in the fcp DB so that
# when the vm provision with both root and data volumes
# the root and data volume would get the same FCP devices
# with the get_volume_connector call.
assigner_id = assigner_id.upper()
self.db.reserve_fcps(fcp_ids, assigner_id, fcp_tmpl_id)
LOG.info("Newly allocated %s fcp for %s assigner "
"and FCP Multipath Template %s" %
(fcp_ids, assigner_id, fcp_tmpl_id))
else:
# reuse the old ones if fcp_list is not empty
LOG.info("Found allocated fcps %s for %s in FCP Multipath Template %s, "
"will reuse them."
% ([f['fcp_id'] for f in fcp_list],
assigner_id, fcp_tmpl_id))
path_count = self.db.get_path_count(fcp_tmpl_id)
if len(fcp_list) != path_count:
LOG.warning("FCPs previously assigned to %s includes %s, "
"it is not equal to the path count: %s." %
(assigner_id, fcp_list, path_count))
self._valid_fcp_devcie_wwpn(fcp_list, assigner_id)
# we got it from db, let's reuse it
available_list = fcp_list
return available_list, fcp_tmpl_id
except Exception as err:
errmsg = ("Failed to reserve FCP devices "
"for assigner %s by FCP Multipath Template %s error: %s"
% (assigner_id, fcp_template_id, err.message))
LOG.error(errmsg)
raise exception.SDKVolumeOperationError(rs=11,
userid=assigner_id,
msg=errmsg)
finally:
_LOCK_RESERVE_FCP.release()
def unreserve_fcp_devices(self, assigner_id, fcp_template_id):
"""
Unreserve FCP devices by assigner_id and fcp_template_id.
In this method:
1. Get FCP list from db by assigner and
fcp_template whose reserved=1
2. If fcp_list is not empty,
choose the ones with connections=0,
and then set reserved=0 in fcp table in db
3. If fcp_list is empty, return empty list
Returns: The fcp list data structure:
[(fcp_id, wwpn_npiv, wwpn_phy, connections)].
An example of fcp_list:
[('1c10', 'c12345abcdefg1', 'c1234abcd33002641', 1),
('1d10', 'c12345abcdefg2', 'c1234abcd33002641', 0)]
If no fcp can be gotten from db, return empty list.
"""
with database.get_fcp_conn():
try:
if fcp_template_id is None:
errmsg = ("fcp_template_id is not specified "
"while releasing FCP devices.")
LOG.error(errmsg)
raise exception.SDKVolumeOperationError(rs=11,
userid=assigner_id,
msg=errmsg)
fcp_list = self.db.get_reserved_fcps_from_assigner(
assigner_id, fcp_template_id)
if fcp_list:
self._valid_fcp_devcie_wwpn(fcp_list, assigner_id)
# the data structure of fcp_list is
# [(fcp_id, wwpn_npiv, wwpn_phy, connections)]
# only unreserve the fcp with connections=0
fcp_ids = [fcp['fcp_id'] for fcp in fcp_list
if fcp['connections'] == 0]
if fcp_ids:
self.db.unreserve_fcps(fcp_ids)
LOG.info("Unreserve fcp device %s from "
"instance %s and FCP Multipath Template %s."
% (fcp_ids, assigner_id, fcp_template_id))
return fcp_list
return []
except Exception as err:
errmsg = ("Failed to unreserve FCP devices for "
"assigner %s by FCP Multipath Template %s. Error: %s"
% (assigner_id, fcp_template_id, err.message))
LOG.error(errmsg)
raise exception.SDKVolumeOperationError(rs=11,
userid=assigner_id,
msg=errmsg)
def get_all_fcp_pool(self, assigner_id):
"""Return a dict of all FCPs in ZVM
fcp_dict_in_zvm example (key=FCP):
{
'1a06': <zvmsdk.volumeop.FCP object at 0x3ff94f74128>,
'1a07': <zvmsdk.volumeop.FCP object at 0x3ff94f74160>,
'1b06': <zvmsdk.volumeop.FCP object at 0x3ff94f74588>,
'1b07': <zvmsdk.volumeop.FCP object at 0x3ff94f74710>
}
"""
all_fcp_info = self._get_all_fcp_info(assigner_id)
lines_per_item = constants.FCP_INFO_LINES_PER_ITEM
all_fcp_pool = {}
num_fcps = len(all_fcp_info) // lines_per_item
for n in range(0, num_fcps):
start_line = lines_per_item * n
end_line = lines_per_item * (n + 1)
fcp_init_info = all_fcp_info[start_line:end_line]
fcp = FCP(fcp_init_info)
dev_no = fcp.get_dev_no()
all_fcp_pool[dev_no] = fcp
return all_fcp_pool
def get_fcp_dict_in_db(self):
"""Return a dict of all FCPs in FCP_DB
Note: the key of the returned dict is in lowercase.
example (key=FCP)
{
'fcp_id': (fcp_id, userid, connections, reserved, wwpn_npiv,
wwpn_phy, chpid, state, owner, tmpl_id),
'1a06': ('1a06', 'C2WDL003', 2, 1, 'c05076ddf7000002',
'c05076ddf7001d81', 27, 'active', 'C2WDL003', ''),
'1b08': ('1b08', 'C2WDL003', 2, 1, 'c05076ddf7000002',
'c05076ddf7001d81', 27, 'active', 'C2WDL003', ''),
'1c08': ('1c08', 'C2WDL003', 2, 1, 'c05076ddf7000002',
'c05076ddf7001d81', 27, 'active', 'C2WDL003', ''),
}
"""
try:
# Get all FCPs found in DB.
fcp_in_db = self.db.get_all_fcps_of_assigner()
except exception.SDKObjectNotExistError:
fcp_in_db = list()
# this method is called by _sync_db_with_zvm,
# change this msg to warning
# level since no record in db is normal during sync
# such as when there is no fcp_list configured
msg = ("No fcp records found in database and ignore "
"the exception.")
LOG.warning(msg)
fcp_dict_in_db = {fcp[0].lower(): fcp for fcp in fcp_in_db}
return fcp_dict_in_db
def get_fcp_dict_in_zvm(self):
"""Return a dict of all FCPs in ZVM
Note: the key of the returned dict is in lowercase.
fcp_dict_in_zvm example (key=FCP):
{
'1a06': <zvmsdk.volumeop.FCP object at 0x3ff94f74128>,
'1a07': <zvmsdk.volumeop.FCP object at 0x3ff94f74160>,
'1b06': <zvmsdk.volumeop.FCP object at 0x3ff94f74588>,
'1b07': <zvmsdk.volumeop.FCP object at 0x3ff94f74710>
}
"""
# Get the userid of smt server
smt_userid = zvmutils.get_smt_userid()
# Return a dict of all FCPs in ZVM
fcp_dict_in_zvm = self.get_all_fcp_pool(smt_userid)
fcp_id_to_object = {fcp.lower(): fcp_dict_in_zvm[fcp]
for fcp in fcp_dict_in_zvm}
return fcp_id_to_object
def sync_fcp_table_with_zvm(self, fcp_dict_in_zvm):
"""Update FCP records queried from zVM into FCP table."""
with database.get_fcp_conn():
# Get a dict of all FCPs already existed in FCP table
fcp_dict_in_db = self.get_fcp_dict_in_db()
# Divide FCPs into three sets
inter_set = set(fcp_dict_in_zvm) & set(fcp_dict_in_db)
del_fcp_set = set(fcp_dict_in_db) - inter_set
add_fcp_set = set(fcp_dict_in_zvm) - inter_set
# Add new records into FCP table
fcp_info_need_insert = [fcp_dict_in_zvm[fcp].to_tuple()
for fcp in add_fcp_set]
LOG.info("New FCP devices found on z/VM: {}".format(add_fcp_set))
self.db.bulk_insert_zvm_fcp_info_into_fcp_table(
fcp_info_need_insert)
# Delete FCP records from FCP table
# if it is connections=0 and reserve=0
LOG.info("FCP devices exist in FCP table but not in "
"z/VM any more: {}".format(del_fcp_set))
fcp_ids_secure_to_delete = set()
fcp_ids_not_found = set()
for fcp in del_fcp_set:
# example of a FCP record in fcp_dict_in_db
# (fcp_id, userid, connections, reserved, wwpn_npiv,
# wwpn_phy, chpid, state, owner, tmpl_id)
(fcp_id, userid, connections, reserved, wwpn_npiv_db,
wwpn_phy_db, chpid_db, fcp_state_db,
fcp_owner_db, tmpl_id) = fcp_dict_in_db[fcp]
if connections == 0 and reserved == 0:
fcp_ids_secure_to_delete.add(fcp)
else:
# these records not found in z/VM
# but still in-use in FCP table
fcp_ids_not_found.add(fcp)
self.db.bulk_delete_from_fcp_table(
fcp_ids_secure_to_delete)
LOG.info("FCP devices removed from FCP table: {}".format(
fcp_ids_secure_to_delete))
# For records not found in ZVM, but still in-use in DB
# mark them as not found
if fcp_ids_not_found:
self.db.bulk_update_state_in_fcp_table(fcp_ids_not_found,
'notfound')
LOG.info("Ignore the request of deleting in-use "
"FCPs: {}.".format(fcp_ids_not_found))
# Update status for FCP records already existed in DB
LOG.info("FCP devices exist in both FCP table and "
"z/VM: {}".format(inter_set))
fcp_ids_need_update = set()
for fcp in inter_set:
# example of a FCP record in fcp_dict_in_db
# (fcp_id, userid, connections, reserved, wwpn_npiv,
# wwpn_phy, chpid, state, owner, tmpl_id)
(fcp_id, userid, connections, reserved, wwpn_npiv_db,
wwpn_phy_db, chpid_db, fcp_state_db,
fcp_owner_db, tmpl_id) = fcp_dict_in_db[fcp]
# Get physical WWPN and NPIV WWPN queried from z/VM
wwpn_phy_zvm = fcp_dict_in_zvm[fcp].get_physical_port()
wwpn_npiv_zvm = fcp_dict_in_zvm[fcp].get_npiv_port()
# Get CHPID queried from z/VM
chpid_zvm = fcp_dict_in_zvm[fcp].get_chpid()
# Get FCP device state queried from z/VM
# Possible state returned by ZVM:
# 'active', 'free' or 'offline'
fcp_state_zvm = fcp_dict_in_zvm[fcp].get_dev_status()
# Get owner of FCP device queried from z/VM
# Possible FCP owner returned by ZVM:
# VM userid: if the FCP is attached to a VM
# A String "NONE": if the FCP is not attached
fcp_owner_zvm = fcp_dict_in_zvm[fcp].get_owner()
# Check WWPNs need update or not
if wwpn_npiv_db == '' or (connections == 0 and reserved == 0):
# The WWPNs are secure to be updated when:
# case1(wwpn_npiv_db == ''): the wwpn_npiv_db is empty, for example, upgraded from 114.
# case2(connections == 0 and reserved == 0): the FCP device is not in use.
if wwpn_npiv_db != wwpn_npiv_zvm or wwpn_phy_db != wwpn_phy_zvm:
# only need to update wwpns when they are different
fcp_ids_need_update.add(fcp)
else:
# For an in-used FCP device, even its WWPNs(wwpn_npiv_zvm, wwpn_phy_zvm) are changed in z/VM,
# we can NOT update the wwpn_npiv, wwpn_phy columns in FCP DB because the host mapping from
# storage provider backend is still using the old WWPNs recorded in FCP DB.
# To detach the volume and delete the host mapping successfully, we need make sure the WWPNs records
# in FCP DB unchanged in this case.
# Because we will copy all properties in fcp_dict_in_zvm[fcp] to DB when update a FCP property
# (for example, state, owner, etc),
# we overwrite the (wwpn_npiv_zvm, wwpn_phy_zvm) in fcp_dict_in_zvm[fcp]
# to old (wwpn_npiv_db, wwpn_phy_db), so that their values will not be changed when update other
# properties.
fcp_dict_in_zvm[fcp].set_npiv_port(wwpn_npiv_db)
fcp_dict_in_zvm[fcp].set_physical_port(wwpn_phy_db)
# Other cases need to update FCP record in DB
if chpid_db != chpid_zvm:
# Check chpid changed or not
fcp_ids_need_update.add(fcp)
elif fcp_state_db != fcp_state_zvm:
# Check state changed or not
fcp_ids_need_update.add(fcp)
elif fcp_owner_db != fcp_owner_zvm:
# Check owner changed or not
fcp_ids_need_update.add(fcp)
else:
LOG.debug("No need to update record of FCP "
"device {}".format(fcp))
fcp_info_need_update = [fcp_dict_in_zvm[fcp].to_tuple()
for fcp in fcp_ids_need_update]
self.db.bulk_update_zvm_fcp_info_in_fcp_table(fcp_info_need_update)
LOG.info("FCP devices need to update records in "
"fcp table: {}".format(fcp_info_need_update))
def _sync_db_with_zvm(self):
"""Sync FCP DB with the FCP info queried from zVM"""
LOG.info("Enter: Sync FCP DB with FCP info queried from z/VM.")
LOG.info("Querying FCP status on z/VM.")
# Get a dict of all FCPs in ZVM
fcp_dict_in_zvm = self.get_fcp_dict_in_zvm()
# Update the dict of all FCPs into FCP table in database
self.sync_fcp_table_with_zvm(fcp_dict_in_zvm)
LOG.info("Exit: Sync FCP DB with FCP info queried from z/VM.")
def create_fcp_template(self, name, description: str = '',
fcp_devices: str = '',
host_default: bool = False,
default_sp_list: list = None,
min_fcp_paths_count: int = None):
"""Create a FCP Multipath Template and return the basic information of
the created template, for example:
{
'fcp_template': {
'name': 'bjcb-test-template',
'id': '36439338-db14-11ec-bb41-0201018b1dd2',
'description': 'This is Default template',
'host_default': True,
'storage_providers': ['sp4', 'v7k60'],
'min_fcp_paths_count': 2
}
}
"""
LOG.info("Try to create a"
" FCP Multipath Template with name:%s,"
"description:%s, fcp devices: %s, host_default: %s,"
"storage_providers: %s, min_fcp_paths_count: %s."
% (name, description, fcp_devices, host_default,
default_sp_list, min_fcp_paths_count))
# Generate a template id for this new template
tmpl_id = str(uuid.uuid1())
# Get fcp devices info index by path
fcp_devices_by_path = utils.expand_fcp_list(fcp_devices)
# If min_fcp_paths_count is not None,need validate the value
if min_fcp_paths_count and min_fcp_paths_count > len(fcp_devices_by_path):
msg = ("min_fcp_paths_count %s is larger than fcp device path count %s, "
"adjust fcp_devices or min_fcp_paths_count."
% (min_fcp_paths_count, len(fcp_devices_by_path)))
LOG.error(msg)
raise exception.SDKConflictError(modID='volume', rs=23, msg=msg)
# Insert related records in FCP database
self.db.create_fcp_template(tmpl_id, name, description,
fcp_devices_by_path, host_default,
default_sp_list, min_fcp_paths_count)
min_fcp_paths_count_db = self.db.get_min_fcp_paths_count(tmpl_id)
# Return template basic info
LOG.info("A FCP Multipath Template was created with ID %s." % tmpl_id)
return {'fcp_template': {'name': name,
'id': tmpl_id,
'description': description,
'host_default': host_default,
'storage_providers': default_sp_list if default_sp_list else [],
'min_fcp_paths_count': min_fcp_paths_count_db}}
def edit_fcp_template(self, fcp_template_id, name=None,
description=None, fcp_devices=None,
host_default=None, default_sp_list=None,
min_fcp_paths_count=None):
""" Edit a FCP Multipath Template
The kwargs values are pre-validated in two places:
validate kwargs types
in zvmsdk/sdkwsgi/schemas/volume.py
set a kwarg as None if not passed by user
in zvmsdk/sdkwsgi/handlers/volume.py
If any kwarg is None, the kwarg will not be updated.
:param fcp_template_id: template id
:param name: template name
:param description: template desc
:param fcp_devices: FCP devices divided into
different paths by semicolon
Format:
"fcp-devices-from-path0;fcp-devices-from-path1;..."
Example:
"0011-0013;0015;0017-0018",
:param host_default: (bool)
:param default_sp_list: (list)
Example:
["SP1", "SP2"]
:param min_fcp_paths_count the min fcp paths count, if it is None,
will not update this field in db.
:return:
Example
{
'fcp_template': {
'name': 'bjcb-test-template',
'id': '36439338-db14-11ec-bb41-0201018b1dd2',
'description': 'This is Default template',
'host_default': True,
'storage_providers': ['sp4', 'v7k60'],
'min_fcp_paths_count': 2
}
}
"""
LOG.info("Enter: edit_fcp_template with args {}".format(
(fcp_template_id, name, description, fcp_devices,
host_default, default_sp_list, min_fcp_paths_count)))
# DML in FCP database
result = self.db.edit_fcp_template(fcp_template_id, name=name,
description=description,
fcp_devices=fcp_devices,
host_default=host_default,
default_sp_list=default_sp_list,
min_fcp_paths_count=min_fcp_paths_count)
LOG.info("Exit: edit_fcp_template")
return result
def _update_template_fcp_raw_usage(self, raw_usage, raw_item):
"""group raw_item with template_id and path
raw_item format:
[(fcp_id|tmpl_id|path|assigner_id|connections|reserved|
wwpn_npiv|wwpn_phy|chpid|state|owner|tmpl_id)]
return format:
{
template_id: {
path: [(fcp_id, template_id, assigner_id,
connections,
reserved, wwpn_npiv, wwpn_phy,
chpid, state, owner,
tmpl_id),()]
}
}
"""
(fcp_id, template_id, path_id, assigner_id, connections,
reserved, wwpn_npiv, wwpn_phy, chpid, state, owner,
tmpl_id) = raw_item
if not raw_usage.get(template_id, None):
raw_usage[template_id] = {}
if not raw_usage[template_id].get(path_id, None):
raw_usage[template_id][path_id] = []
# remove path_id from raw data, keep the last templ_id to
# represent from which template this FCP has been allocated out.
return_raw = (fcp_id, template_id, assigner_id, connections,
reserved, wwpn_npiv, wwpn_phy, chpid, state,
owner, tmpl_id)
raw_usage[template_id][path_id].append(return_raw)
return raw_usage
def extract_template_info_from_raw_data(self, raw_data):
"""
raw_data format:
[(id|name|description|is_default|sp_name)]
return format:
{
temlate_id: {
"id": id,
"name": name,
"description": description,
"host_default": is_default,
"storage_providers": [sp_name]
}
}
"""
template_dict = {}
for item in raw_data:
id, name, description, is_default, min_fcp_paths_count, sp_name = item
if min_fcp_paths_count < 0:
min_fcp_paths_count = self.db.get_path_count(id)
if not template_dict.get(id, None):
template_dict[id] = {"id": id,
"name": name,
"description": description,
"host_default": bool(is_default),
"storage_providers": [],
"min_fcp_paths_count": min_fcp_paths_count}
# one FCP Multipath Template can be multiple sp's default template
if sp_name and sp_name not in template_dict[id]["storage_providers"]:
template_dict[id]["storage_providers"].append(sp_name)
return template_dict
def _update_template_fcp_statistics_usage(self, statistics_usage,
raw_item):
"""Transform raw usage in FCP database into statistic data.
:param statistics_usage: (dict) to store statistics info
:param raw_item: [list] to represent db query result
raw_item format:
(fcp_id|tmpl_id|path|assigner_id|connections|reserved|
wwpn_npiv|wwpn_phy|chpid|state|owner|tmpl_id)
the first three properties are from template_fcp_mapping table,
and the others are from fcp table. These three properties will
always have values.
when the device is not in fcp table, all the properties in fcp
table will be None. For example: template '12345678' has a fcp
"1aaa" on path 0, but this device is not in fcp table, the
query result will be as below.
1aaa|12345678|0||||||||||
Note: the FCP id in the returned dict is in uppercase.
statistics_usage return result format:
{
template_id: {
path1: {},
path2: {}}
}
"""
# get statistic data about:
# available, allocated, notfound,
# unallocated_but_active, allocated_but_free
# CHPIDs
(fcp_id, template_id, path_id, assigner_id, connections,
reserved, _, _, chpid, state, owner, _) = raw_item
# The raw_item is for each fcp device, so there are multiple
# items for each single FCP Multipath Template.
# But the return result needs to group all the items by FCP Multipath Template,
# so construct a dict statistics_usage[template_id]
# with template_id as key to group the info.
# template_id key also will be used to join with template base info
if not statistics_usage.get(template_id, None):
statistics_usage[template_id] = {}
if not statistics_usage[template_id].get(path_id, None):
statistics_usage[template_id][path_id] = {
"total": [],
"total_count": 0,
"single_fcp": [],
"range_fcp": [],
"available": [],
"available_count": 0,
"allocated": [],
"reserve_only": [],
"connection_only": [],
"unallocated_but_active": {},
"allocated_but_free": [],
"notfound": [],
"offline": [],
"CHPIDs": {}}
# when this fcp_id is not None, means the fcp exists in zvm, i.e in
# fcp table, then it will have detail info from fcp table
# when this fcp_id is None, means the fcp does not exist in zvm, no
# detail info, just add into 'not_found' with the tmpl_fcp_id returns
# from template_fcp_mapping table
# Show upper case for FCP id
fcp_id = fcp_id.upper()
# If a fcp not found in z/VM, will not insert into fcp table, then the
# db query result will be None. So connections not None represents
# the fcp is found in z/VM
if connections is not None:
# Store each FCP in section "total"
statistics_usage[template_id][path_id]["total"].append(fcp_id)
# case G: (state = notfound)
# this FCP in database but not found in z/VM
if state == "notfound":
statistics_usage[
template_id][path_id]["notfound"].append(fcp_id)
LOG.warning("Found a FCP device "
"%s in FCP Multipath Template %s, but not found in "
"z/VM." % (str(fcp_id), str(template_id)))
# case H: (state = offline)
# this FCP in database but offline in z/VM
if state == "offline":
statistics_usage[template_id][path_id]["offline"].append(
fcp_id)
LOG.warning("Found state of a FCP "
"device %s is offline in database." % str(fcp_id))
# found this FCP in z/VM
if connections == 0:
if reserved == 0:
# case A: (reserve=0 and conn=0 and state=free)
# this FCP is available for use
if state == "free":
statistics_usage[
template_id][path_id]["available"].append(fcp_id)
LOG.debug("Found "
"an available FCP device %s in "
"database." % str(fcp_id))
# case E: (conn=0 and reserve=0 and state=active)
# this FCP is available in database but its state
# is active in smcli output
if state == "active":
statistics_usage[
template_id][path_id]["unallocated_but_active"].\
update({fcp_id: owner})
LOG.warning("Found a FCP "
"device %s available in database but its "
"state is active, it may be occupied by "
"a userid outside of this ZCC." % str(
fcp_id))
else:
# case C: (reserve=1 and conn=0)
# the fcp should be in task or a bug happen
statistics_usage[
template_id][path_id]["reserve_only"].append(fcp_id)
LOG.warning("Found a FCP "
"device %s reserve_only." % str(fcp_id))
else:
# connections != 0
if reserved == 0:
# case D: (reserve = 0 and conn != 0)
# must have a bug result in this
statistics_usage[template_id][
path_id]["connection_only"].append(fcp_id)
LOG.warning("Found a FCP "
"device %s unreserved in database but "
"its connections is not 0." % str(fcp_id))
else:
# case B: (reserve=1 and conn!=0)
# ZCC allocated this to a userid
statistics_usage[
template_id][path_id]["allocated"].append(fcp_id)
LOG.debug("Found an allocated "
"FCP device: %s." % str(fcp_id))
# case F: (conn!=0 and state=free)
if state == "free":
statistics_usage[template_id][
path_id]["allocated_but_free"].append(fcp_id)
LOG.warning("Found a FCP "
"device %s allocated by ZCC but its state is "
"free." % str(fcp_id))
# case I: ((conn != 0) & assigner_id != owner)
elif assigner_id.lower() != owner.lower() and state != "notfound":
LOG.warning("Found a FCP "
"device %s allocated by ZCC but its assigner "
"differs from owner." % str(fcp_id))
if chpid:
if not statistics_usage[template_id][path_id]["CHPIDs"].get(chpid, None):
statistics_usage[
template_id][path_id]["CHPIDs"].update({chpid: []})
statistics_usage[
template_id][path_id]["CHPIDs"][chpid].append(fcp_id)
# this FCP in template_fcp_mapping table but not found in z/VM
else:
# add into 'total' and 'not_found'
statistics_usage[template_id][path_id]["total"].append(fcp_id)
statistics_usage[template_id][path_id]["notfound"].append(fcp_id)
LOG.warning("Found a FCP device "
"%s in FCP Multipath Template %s, but not found in "
"z/VM." % (str(fcp_id), str(template_id)))
return statistics_usage
def _shrink_fcp_list_in_statistics_usage(self, statistics_usage):
"""shrink fcp list in statistics sections to range fcp
for example, before shrink:
template_statistics[path]["total"] = "1A0A, 1A0B, 1A0C, 1A0E"
after shink:
template_statistics[path]["total"] = "1A0A - 1A0C, 1A0E"
"""
for template_statistics in statistics_usage.values():
for path in template_statistics:
# count total and available fcp before shrink
if template_statistics[path]["total"]:
template_statistics[path][
"total_count"] = len(template_statistics[path][
"total"])
if template_statistics[path]["available"]:
template_statistics[path][
"available_count"] = len(template_statistics[path][
"available"])
# only below sections in statistics need to shrink
need_shrink_sections = ["total",
"available",
"allocated",
"reserve_only",
"connection_only",
"allocated_but_free",
"notfound",
"offline"]
# Do NOT transform unallocated_but_active,
# because its value also contains VM userid.
# e.g. [('1b04','owner1'), ('1b05','owner2')]
# Do NOT transform CHPIDs, total_count, single_fcp,
# range_fcp and available_count
for section in need_shrink_sections:
fcp_list = template_statistics[path][section]
template_statistics[path][section] = (
utils.shrink_fcp_list(fcp_list))
# shrink for each CHIPID
for chpid, fcps in template_statistics[
path]['CHPIDs'].items():
fcp_list = fcps
template_statistics[path]['CHPIDs'][chpid] = (
utils.shrink_fcp_list(fcp_list))
def _split_singe_range_fcp_list(self, statistics_usage):
# after shrink, total fcps can have both range and singe fcps,
# for example: template_statistics[path]['total'] = "1A0A - 1A0C, 1A0E"
# UI needs 'range_fcp' and 'singe_fcp' to input in different areas
# so split the total fcps to 'range_fcp' and 'singe_fcp' as below:
# template_statistics[path]['range_fcp'] = "1A0A - 1A0C"
# template_statistics[path]['single_fcp'] = "1A0E"
for template_statistics in statistics_usage.values():
for path in template_statistics:
range_fcp = []
single_fcp = []
total_fcp = template_statistics[path]['total'].split(',')
for fcp in total_fcp:
if '-' in fcp:
range_fcp.append(fcp.strip())
else:
single_fcp.append(fcp.strip())
template_statistics[path]['range_fcp'] = ', '.join(range_fcp)
template_statistics[path]['single_fcp'] = ', '.join(single_fcp)
def get_fcp_templates(self, template_id_list=None, assigner_id=None,
default_sp_list=None, host_default=None):
"""Get template base info by template_id_list or filters
:param template_id_list: (list) a list of template id,
if it is None, get FCP Multipath Templates with other parameter
:param assigner_id: (str) a string of VM userid
:param default_sp_list: (list) a list of storage provider or 'all',
to get storage provider's default FCP Multipath Templates
when sp_host_list = ['all'], will get all storage providers' default
FCP Multipath Templates. For example, there are 3 FCP Multipath Templates are set as
storage providers' default template, then all these 3 FCP Multipath Templates
will return as below:
{
"fcp_templates": [
{
"id": "36439338-db14-11ec-bb41-0201018b1dd2",
"name": "default_template",
"description": "This is Default template",
"host_default": True,
"storage_providers": [
"v7k60",
"sp4"
]
},
{
"id": "36439338-db14-11ec-bb41-0201018b1dd3",
"name": "test_template",
"description": "just for test",
"host_default": False,
"storage_providers": [
"ds8k60c1"
]
},
{
"id": "12345678",
"name": "templatet1",
"description": "test1",
"host_default": False,
"storage_providers": [
"sp3"
]
}
]
}
when sp_host_list is a storage provider name list, will return these
providers' default FCP Multipath Templates.
Example:
sp_host_list = ['v7k60', 'ds8k60c1']
return:
{
"fcp_templates": [
{
"id": "36439338-db14-11ec-bb41-0201018b1dd2",
"name": "default_template",
"description": "This is Default template",
"host_default": True,
"storage_providers": [
"v7k60",
"sp4"
]
},
{
"id": "36439338-db14-11ec-bb41-0201018b1dd3",
"name": "test_template",
"description": "just for test",
"host_default": False,
"storage_providers": [
"ds8k60c1"
]
}
]
}
:param host_default: (boolean) whether or not get host default fcp
template
:return: (dict) the base info of template
"""
ret = []
if template_id_list:
not_exist = []
for template_id in template_id_list:
if not self.db.fcp_template_exist_in_db(template_id):
not_exist.append(template_id)
if not_exist:
obj_desc = ("FCP Multipath Templates {} ".format(not_exist))
raise exception.SDKObjectNotExistError(obj_desc=obj_desc)
raw = self.db.get_fcp_templates(template_id_list)
elif assigner_id:
raw = self.db.get_fcp_template_by_assigner_id(assigner_id)
elif default_sp_list:
raw = self.db.get_sp_default_fcp_template(default_sp_list)
elif host_default is not None:
raw = self.db.get_host_default_fcp_template(host_default)
else:
# if no parameter, will get all FCP Multipath Templates
raw = self.db.get_fcp_templates(template_id_list)
template_list = self.extract_template_info_from_raw_data(raw)
for value in template_list.values():
ret.append(value)
return {"fcp_templates": ret}
def get_fcp_templates_details(self, template_id_list=None, raw=False,
statistics=True, sync_with_zvm=False):
"""Get FCP Multipath Templates detail info.
:param template_list: (list) if is None, will get all the templates on
the host
:return: (dict) the raw and/or statistic data of temlate_list FCP
devices
if sync_with_zvm:
self.fcp_mgr._sync_db_with_zvm()
if FCP DB is NOT empty and raw=True statistics=True
{
"fcp_templates":[
{
"id":"36439338-db14-11ec-bb41-0201018b1dd2",
"name":"default_template",
"description":"This is Default template",
"host_default":True,
"storage_providers":[
"sp4",
"v7k60"
],
"raw":{
# (fcp_id, template_id, assigner_id, connections,
# reserved, wwpn_npiv, wwpn_phy, chpid, state, owner,
# tmpl_id)
"0":[
[
"1a0f",
"36439338-db14-11ec-bb41-0201018b1dd2",
"HLP0000B",
0,
0,
"c05076de3300038b",
"c05076de33002e41",
"27",
"free",
"none",
"36439338-db14-11ec-bb41-0201018b1dd2"
],
[
"1a0e",
"36439338-db14-11ec-bb41-0201018b1dd2",
"",
0,
0,
"c05076de330003a2",
"c05076de33002e41",
"27",
"free",
"none",
"36439338-db14-11ec-bb41-0201018b1dd2"
]
],
"1":[
[
"1c0d",
"36439338-db14-11ec-bb41-0201018b1dd2",
"",
0,
0,
"c05076de33000353",
"c05076de33002641",
"32",
"free",
"none",
"36439338-db14-11ec-bb41-0201018b1dd2"
]
]
},
"statistics":{
# case A: (reserve = 0 and conn = 0 and state = free)
# FCP is available and in free status
"available": ('1A00','1A05',...)
# case B: (reserve = 1 and conn != 0)
# nomral in-use FCP
"allocated": ('1B00','1B05',...)
# case C: (reserve = 1, conn = 0)
# the fcp should be in task or a bug cause this
# situation
"reserve_only": ('1C00', '1C05', ...)
# case D: (reserve = 0 and conn != 0)
# should be a bug result in this situation
"connection_only": ('1C00', '1C05', ...)
# case E: (reserve = 0, conn = 0, state = active)
# FCP occupied out-of-band
'unallocated_but_active': {'1B04': 'owner1',
'1B05': 'owner2'}
# case F: (conn != 0, state = free)
# we allocated it in db but the FCP status is free
# this is an situation not expected
"allocated_but_free": ('1D00','1D05',...)
# case G: (state = notfound)
# not found in smcli
"notfound": ('1E00','1E05',...)
# case H: (state = offline)
# offline in smcli
"offline": ('1F00','1F05',...)
# case I: ((conn != 0) & assigner_id != owner)
# assigner_id-in-DB differ from smcli-returned-owner
# only log about this
# case J: fcp by chpid
"0":{
"total":"1A0E - 1A0F",
"available":"1A0E - 1A0F",
"allocated":"",
"reserve_only":"",
"connection_only":"",
"unallocated_but_active":{},
"allocated_but_free":"",
"notfound":"",
"offline":"",
"CHPIDs":{
"27":"1A0E - 1A0F"
}
},
"1":{
"total":"1C0D",
"available":"1C0D",
"allocated":"",
"reserve_only":"",
"connection_only":"",
"unallocated_but_active":{},
"allocated_but_free":"",
"notfound":"",
"offline":"",
"CHPIDs":{
"32":"1C0D"
}
}
}
}
]
}
"""
not_exist = []
if template_id_list:
for template_id in template_id_list:
if not self.db.fcp_template_exist_in_db(template_id):
not_exist.append(template_id)
if not_exist:
obj_desc = ("FCP Multipath Templates {} ".format(not_exist))
raise exception.SDKObjectNotExistError(obj_desc=obj_desc)
if sync_with_zvm:
self._sync_db_with_zvm()
statistics_usage = {}
raw_usage = {}
template_info = {}
ret = []
# tmpl_cmd result format:
# [(id|name|description|is_default|sp_name)]
# devices_cmd result format:
# [(fcp_id|tmpl_id|path|assigner_id|connections|reserved|
# wwpn_npiv|wwpn_phy|chpid|state|owner|tmpl_id)]
tmpl_result, devices_result = self.db.get_fcp_templates_details(
template_id_list)
# extract template base info into template_info
template_info = self.extract_template_info_from_raw_data(tmpl_result)
# template_info foramt:
# {
# temlate_id: {
# "id": id,
# "name": name,
# "description": description,
# "is_default": is_default,
# "storage_providers": [sp_name]
# }
# }
if raw:
for item in devices_result:
self._update_template_fcp_raw_usage(raw_usage, item)
for template_id, base_info in template_info.items():
if template_id in raw_usage:
base_info.update({"raw": raw_usage[template_id]})
else:
# some template does not have fcp devices, so there is no
# raw_usage for such template
base_info.update({"raw": {}})
# after join raw info, template_info format is like this:
# {
# temlate_id: {
# "id": id,
# "name": name,
# "description": description,
# "is_default": is_default,
# "storage_providers": [sp_name],
# "raw": {
# path1: {},
# path2: {}}
# }
# }
# }
# get fcp statistics usage
if statistics:
for item in devices_result:
self._update_template_fcp_statistics_usage(
statistics_usage, item)
LOG.info("statistic FCP usage before shrink: %s"
% statistics_usage)
self._shrink_fcp_list_in_statistics_usage(statistics_usage)
self._split_singe_range_fcp_list(statistics_usage)
LOG.info("statistic FCP usage after shrink: %s"
% statistics_usage)
# update base info with statistics_usage
# statistics_usage format:
# {
# template_id1: {
# path1: {},
# path2: {}},
# template_id2: {
# path1: {},
# path2: {}}
# }
for template_id, base_info in template_info.items():
# only the FCP Multipath Template which has fcp in zvm has
# statistics_usage data
if template_id in statistics_usage:
base_info.update(
{"statistics": statistics_usage[template_id]})
else:
# some templates do not have fcp devices or do not have
# valid fcp in zvm, so do not have statistics_usage data
base_info.update({"statistics": {}})
# after join statistics info, template_info format is like this:
# {
# temlate_id: {
# "id": id,
# "name": name,
# "description": description,
# "host_default": is_default,
# "storage_providers": [sp_name],
# "statistics": {
# path1: {},
# path2: {}}
# }
# }
# }
for value in template_info.values():
ret.append(value)
return {"fcp_templates": ret}
def delete_fcp_template(self, template_id):
"""Delete FCP Multipath Template by id.
:param template_id: (str)
:return: no return result
"""
return self.db.delete_fcp_template(template_id)
# volume manager for FCP protocol
class FCPVolumeManager(object):
def __init__(self):
self.fcp_mgr = FCPManager()
self.config_api = VolumeConfiguratorAPI()
self._smtclient = smtclient.get_smtclient()
self._lock = threading.RLock()
# previously FCPDbOperator is initialized twice, here we
# just do following variable redirection to avoid too much
# reference code changes
self.db = self.fcp_mgr.db
def _dedicate_fcp(self, fcp, assigner_id):
self._smtclient.dedicate_device(assigner_id, fcp, fcp, 0)
def _add_disks(self, fcp_list, assigner_id, target_wwpns, target_lun,
multipath, os_version, mount_point,):
self.config_api.config_attach(fcp_list, assigner_id, target_wwpns,
target_lun, multipath, os_version,
mount_point)
def _rollback_do_attach(self, fcp_list, assigner_id, target_wwpns, target_lun,
multipath, os_version, mount_point):
"""
Rollback for the following completed operations:
1. oeration on VM operating system done by _add_disks()
i.e. online FCP devices and the volume from VM OS
2. operations on z/VM done by _dedicate_fcp()
i.e. dedicate FCP device from assigner_id
3. operations on FCP DB done by get_volume_connector()
i.e. reserve FCP device and set FCP Multipath Template id from FCP DB
:param fcp_list: (list) a list of FCP devices
:param assigner_id: (str) the userid of the virtual machine
:return: None
"""
# Operation on VM OS:
# offline volume and FCP devices from VM OS
with zvmutils.ignore_errors():
fcp_connections = {fcp: self.db.get_connections_from_fcp(fcp)
for fcp in fcp_list}
# _remove_disks() offline FCP devices only when total_connections is 0,
# i.e. detaching the last volume from the FCP devices
total_connections = sum(fcp_connections.values())
self._remove_disks(fcp_list, assigner_id, target_wwpns, target_lun,
multipath, os_version, mount_point, total_connections)
LOG.info("Rollback on VM OS: offline the volume from VM OS")
# Operation on z/VM:
# undedicate FCP device from assigner_id
for fcp in fcp_list:
with zvmutils.ignore_errors():
if fcp_connections[fcp] == 0:
self._undedicate_fcp(fcp, assigner_id)
LOG.info("Rollback on z/VM: undedicate FCP device: %s" % fcp)
# Operation on FCP DB:
# if connections is 0,
# then unreserve the FCP device and cleanup tmpl_id
no_connection_fcps = [fcp for fcp in fcp_connections
if fcp_connections[fcp] == 0]
if no_connection_fcps:
with zvmutils.ignore_errors():
self.db.unreserve_fcps(no_connection_fcps)
LOG.info("Rollback on FCP DB: Unreserve FCP devices %s", no_connection_fcps)
def _do_attach(self, fcp_list, assigner_id, target_wwpns, target_lun,
multipath, os_version, mount_point, is_root_volume):
"""Attach a volume
First, we need translate fcp into local wwpn, then
dedicate fcp to the user if it's needed, after that
call smt layer to call linux command
"""
LOG.info("Start to attach volume to FCP devices "
"%s on machine %s." % (fcp_list, assigner_id))
# _DBLOCK_FCP is the lock used in get_fcp_conn(),
# here it is used to ensure the operation of FCP DB is thread safe.
# Example:
# Before thread-1 enters _attach(),
# 2 FCP devices are reserved (fcp1, fcp2)
# by get_volume_connectoer() for this attach.
# If thread-1 fails increasing connections for 2nd FCP (fcp2),
# then, thread-2 must wait before thread-1 completes rollback
# for the state of reserved and connections of both FCPs
# More details refer to pull request #668
with database._DBLOCK_FCP:
try:
# The three operations must be put in the same with-block:
# - Operation on FCP DB: increase_fcp_connections()
# - Operation on z/VM: _dedicate_fcp()
# - Operation on VM OS: _add_disks()
# So as to auto-rollabck connections on FCP DB,
# if any of the three operations raises exception.
with database.get_fcp_conn():
# Operation on FCP DB:
# increase connections by 1 and set assigner_id.
#
# Call increase_fcp_connections() within the try-block,
# so that _rollback_do_attach() can be called to unreserve FCP
# devices if increase_fcp_connections() raise exception.
#
# fcp_connections examples:
# {'1a10': 1, '1b10': 1} => attaching 1st volume
# {'1a10': 2, '1b10': 2} => attaching 2nd volume
# {'1a10': 2, '1b10': 1} => connections differ in abnormal case (due to bug)
# the values are the connections of the FCP device
fcp_connections = self.fcp_mgr.increase_fcp_connections(fcp_list, assigner_id)
LOG.info("The connections of FCP devices before "
"being dedicated to virtual machine %s is: %s."
% (assigner_id, fcp_connections))
if is_root_volume:
LOG.info("We are attaching root volume, dedicating FCP devices %s "
"to virtual machine %s has been done by refresh_bootmap; "
"skip the remain steps of volume attachment."
% (fcp_list, assigner_id))
return []
# Operation on z/VM:
# dedicate FCP devices to the assigner_id in z/VM
for fcp in fcp_list:
# only dedicate the FCP device on z/VM
# if connections is 1 (i.e. 1st volume attached for the FCP dev)
# because otherwise the FCP device has been dedicated already.
# if _dedicate_fcp() raise exception for a FCP device,
# we must stop the attachment
# i.e. go to except-block to do _rollback_do_attach()
# rather than continue to do _dedicate_fcp() for the next FCP device
if fcp_connections[fcp] == 1:
LOG.info("Start to dedicate FCP %s to "
"%s in z/VM." % (fcp, assigner_id))
# dedicate the FCP to the assigner in z/VM
self._dedicate_fcp(fcp, assigner_id)
LOG.info("Dedicating FCP %s to %s in z/VM is "
"done." % (fcp, assigner_id))
else:
LOG.info("This is not the first time to "
"attach volume to FCP %s, "
"skip dedicating the FCP device in z/VM." % fcp)
# Operation on VM operating system
# online the volume in the virtual machine
LOG.info("Start to configure volume in the operating "
"system of %s." % assigner_id)
self._add_disks(fcp_list, assigner_id, target_wwpns,
target_lun, multipath, os_version,
mount_point)
LOG.info("Configuring volume in the operating "
"system of %s is done." % assigner_id)
LOG.info("Attaching volume to FCP devices %s on virtual machine %s is "
"done." % (fcp_list, assigner_id))
except Exception as err:
LOG.error(str(err))
# Rollback for the following completed operations:
# 1. Operation on VM OS done by _add_disks()
# 2. operations on z/VM done by _dedicate_fcp()
# 3. operations on FCP DB done by get_volume_connector()
LOG.info("Enter rollback: _rollback_do_attach")
self._rollback_do_attach(fcp_list, assigner_id, target_wwpns, target_lun,
multipath, os_version, mount_point)
LOG.info("Exit rollback: _rollback_do_attach")
raise
def _rollback_do_detach(self, fcp_connections, assigner_id, target_wwpns, target_lun,
multipath, os_version, mount_point):
"""
Rollback for the following completed operations:
1. oeration on VM operating system done by _remove_disks()
i.e. remove FCP devices and the volume from VM OS
2. operations on z/VM done by _undedicate_fcp()
i.e. undedicate FCP device from assigner_id
:param fcp_list: (list) a list of FCP devices
:param assigner_id: (str) the userid of the virtual machine
:return: None
"""
# Operation on z/VM:
# dedicate FCP devices to the virtual machine
for fcp in fcp_connections:
with zvmutils.ignore_errors():
# _undedicate_fcp() has been done in _do_detach() if fcp_connections[fcp] == 0,
# so we do _dedicate_fcp() as rollback with the same if-condition
if fcp_connections[fcp] == 0:
# dedicate the FCP to the assigner in z/VM
self._dedicate_fcp(fcp, assigner_id)
LOG.info("Rollback on z/VM: dedicate FCP device: %s" % fcp)
# Operation on VM operating system:
# online the volume in the virtual machine
with zvmutils.ignore_errors():
fcp_list = [f for f in fcp_connections]
self._add_disks(fcp_list, assigner_id,
target_wwpns, target_lun,
multipath, os_version, mount_point)
LOG.info("Rollback on VM operating system: "
"online volume for virtual machine %s"
% assigner_id)
def volume_refresh_bootmap(self, fcpchannels, wwpns, lun,
wwid='',
transportfiles=None, guest_networks=None,
fcp_template_id=None):
if not fcp_template_id:
min_fcp_paths_count = len(fcpchannels)
else:
min_fcp_paths_count = self.db.get_min_fcp_paths_count(fcp_template_id)
if min_fcp_paths_count == 0:
errmsg = ("No FCP devices were found in the FCP Multipath Template %s,"
"stop refreshing bootmap." % fcp_template_id)
LOG.error(errmsg)
raise exception.SDKBaseException(msg=errmsg)
with zvmutils.acquire_lock(self._lock):
LOG.debug('Enter lock scope of volume_refresh_bootmap.')
ret = self._smtclient.volume_refresh_bootmap(fcpchannels, wwpns,
lun, wwid=wwid,
transportfiles=transportfiles,
guest_networks=guest_networks,
min_fcp_paths_count=min_fcp_paths_count)
LOG.debug('Exit lock of volume_refresh_bootmap with ret %s.' % ret)
return ret
def attach(self, connection_info):
"""Attach a volume to a guest
connection_info contains info from host and storage side
this mostly includes
host side FCP: this can get host side wwpn
storage side wwpn
storage side lun
all the above assume the storage side info is given by caller
"""
fcps = connection_info['zvm_fcp']
wwpns = connection_info['target_wwpn']
target_lun = connection_info['target_lun']
assigner_id = connection_info['assigner_id'].upper()
multipath = connection_info['multipath'].lower()
if multipath == 'true':
multipath = True
else:
multipath = False
os_version = connection_info['os_version']
mount_point = connection_info['mount_point']
is_root_volume = connection_info.get('is_root_volume', False)
if is_root_volume is False and \
not zvmutils.check_userid_exist(assigner_id):
LOG.error("The virtual machine '%s' does not exist on z/VM." % assigner_id)
raise exception.SDKObjectNotExistError(
obj_desc=("Guest '%s'" % assigner_id), modID='volume')
else:
# transfer to lower cases
fcp_list = [x.lower() for x in fcps]
target_wwpns = [wwpn.lower() for wwpn in wwpns]
try:
self._do_attach(fcp_list, assigner_id,
target_wwpns, target_lun,
multipath, os_version,
mount_point,
is_root_volume)
except Exception:
for fcp in fcp_list:
with zvmutils.ignore_errors():
_userid, _reserved, _conns, _tmpl_id = self.get_fcp_usage(fcp)
LOG.info("After rollback, property of FCP device %s "
"is (assigner_id: %s, reserved:%s, "
"connections: %s, FCP Multipath Template id: %s)."
% (fcp, _userid, _reserved, _conns, _tmpl_id))
raise
def _undedicate_fcp(self, fcp, assigner_id):
self._smtclient.undedicate_device(assigner_id, fcp)
def _remove_disks(self, fcp_list, assigner_id, target_wwpns, target_lun,
multipath, os_version, mount_point, connections):
self.config_api.config_detach(fcp_list, assigner_id, target_wwpns,
target_lun, multipath, os_version,
mount_point, connections)
def _do_detach(self, fcp_list, assigner_id, target_wwpns, target_lun,
multipath, os_version, mount_point, is_root_volume,
update_connections_only):
"""Detach a volume from a guest"""
LOG.info("Start to detach volume on virtual machine %s from "
"FCP devices %s" % (assigner_id, fcp_list))
with database.get_fcp_conn():
# Operation on FCP DB:
# decrease connections by 1
# fcp_connections is like {'1a10': 0, '1b10': 2}
# the values are the connections of the FCP device
fcp_connections = self.fcp_mgr.decrease_fcp_connections(fcp_list)
# If is root volume we only need update database record
# because the dedicate is done by volume_refresh_bootmap
# If update_connections set to True, means upper layer want
# to update database record only. For example, try to delete
# the instance, then no need to waste time on undedicate
if is_root_volume or update_connections_only:
if update_connections_only:
LOG.info("Update connections only, undedicating FCP devices %s "
"from virtual machine %s has been done; skip the remain "
"steps of volume detachment" % (fcp_list, assigner_id))
else:
LOG.info("We are detaching root volume, undedicating FCP devices %s "
"from virtual machine %s has been done; skip the remain "
"steps of volume detachment" % (fcp_list, assigner_id))
return
# when detaching volumes, if userid not exist, no need to
# raise exception. we stop here after the database operations done.
if not zvmutils.check_userid_exist(assigner_id):
LOG.warning("Virtual machine %s does not exist when trying to detach "
"volume from it. skip the remain steps of volume "
"detachment", assigner_id)
return
try:
LOG.info("Start to remove volume in the operating "
"system of %s." % assigner_id)
# Operation on VM operating system:
# offline the volume in the virtual machine
#
# When detaching the non-last volume from the FCPs in fcp_connections,
# normally, the connections of partial FCPs are non-zero, so that
# sum(fcp_connections.values()) > 0
# fcp_connections is like {'1a10': 2, '1b10': 2}
# in this case, _remove_disks() must be called with total_connections > 0,
# so as NOT to offline the FCPs from VM Linux operating system
# When detaching the last volume from the FCPs in fcp_connections,
# normally, the connections of all FCPs are 0, so that
# sum(fcp_connections.values()) == 0,
# fcp_connections is like {'1a10': 0, '1b10': 0}
# in this case, _remove_disks() must be called with total_connections as 0,
# so as to offline the FCPs from VM Linux operating system
# abnormally, the connections of partial FCPs are 0, so that
# sum(fcp_connections.values()) > 0
# fcp_connections is like {'1a10': 0, '1b10': 3}
# in this case, _remove_disks() must be called with total_connections > 0,
# so as NOT to offline the FCPs from VM Linux operating system
total_connections = sum(fcp_connections.values())
self._remove_disks(fcp_list, assigner_id, target_wwpns, target_lun,
multipath, os_version, mount_point, total_connections)
LOG.info("Removing volume in the operating "
"system of %s is done." % assigner_id)
# Operation on z/VM:
# undedicate FCP device from the virtual machine
for fcp in fcp_list:
if fcp_connections[fcp] == 0:
# As _remove_disks() has been run successfully,
# we need to try our best to undedicate every FCP device
try:
LOG.info("Start to undedicate FCP %s from "
"%s on z/VM." % (fcp, assigner_id))
self._undedicate_fcp(fcp, assigner_id)
LOG.info("FCP %s undedicated from %s on z/VM is "
"done." % (fcp, assigner_id))
except exception.SDKSMTRequestFailed as err:
rc = err.results['rc']
rs = err.results['rs']
if (rc == 404 or rc == 204) and rs == 8:
# We ignore the two exceptions raised when FCP device is already undedicated.
# Example of exception when rc == 404:
# zvmsdk.exception.SDKSMTRequestFailed:
# Failed to undedicate device from userid 'JACK0003'.
# RequestData: 'changevm JACK0003 undedicate 1d1a',
# Results: '{'overallRC': 8,
# 'rc': 404, 'rs': 8, 'errno': 0,
# 'strError': 'ULGSMC5404E Image device not defined',
# 'response': ['(Error) ULTVMU0300E
# SMAPI API failed: Image_Device_Undedicate_DM,
# Example of exception when rc == 204:
# zvmsdk.exception.SDKSMTRequestFailed:
# Failed to undedicate device from userid 'JACK0003'.
# RequestData: 'changevm JACK0003 undedicate 1b17',
# Results: '{'overallRC': 8,
# 'rc': 204, 'rs': 8, 'errno': 0,
# 'response': ['(Error) ULTVMU0300E
# SMAPI API failed: Image_Device_Undedicate,
msg = ('ignore an exception because the FCP device {} '
'has already been undedicdated on z/VM: {}'
).format(fcp, err.format_message())
LOG.warn(msg)
else:
# raise to do _rollback_do_detach()
raise
else:
LOG.info("The connections of FCP device %s is not 0, "
"skip undedicating the FCP device on z/VM." % fcp)
LOG.info("Detaching volume on virtual machine %s from FCP devices %s is "
"done." % (assigner_id, fcp_list))
except Exception as err:
LOG.error(str(err))
# Rollback for the following completed operations:
# 1. Operation on VM OS done by _remove_disks()
# 2. operations on z/VM done by _udedicate_fcp()
LOG.info("Enter rollback: _rollback_do_detach")
self._rollback_do_detach(fcp_connections, assigner_id, target_wwpns, target_lun,
multipath, os_version, mount_point)
LOG.info("Exit rollback: _rollback_do_detach")
raise
def detach(self, connection_info):
"""Detach a volume from a guest
"""
fcps = connection_info['zvm_fcp']
wwpns = connection_info['target_wwpn']
target_lun = connection_info['target_lun']
assigner_id = connection_info['assigner_id'].upper()
multipath = connection_info['multipath'].lower()
os_version = connection_info['os_version']
mount_point = connection_info['mount_point']
if multipath == 'true':
multipath = True
else:
multipath = False
is_root_volume = connection_info.get('is_root_volume', False)
update_connections_only = connection_info.get(
'update_connections_only', False)
# transfer to lower cases
fcp_list = [x.lower() for x in fcps]
target_wwpns = [wwpn.lower() for wwpn in wwpns]
try:
self._do_detach(fcp_list, assigner_id,
target_wwpns, target_lun,
multipath, os_version, mount_point,
is_root_volume, update_connections_only)
except Exception:
for fcp in fcp_list:
with zvmutils.ignore_errors():
_userid, _reserved, _conns, _tmpl_id = self.get_fcp_usage(fcp)
LOG.info("After rollback, property of FCP device %s "
"is (assigner_id: %s, reserved:%s, "
"connections: %s, FCP Multipath Template id: %s)."
% (fcp, _userid, _reserved, _conns, _tmpl_id))
raise
def get_volume_connector(self, assigner_id, reserve,
fcp_template_id=None, sp_name=None):
"""Get connector information of the instance for attaching to volumes.
Connector information is a dictionary representing the Fibre
Channel(FC) port(s) that will be making the connection.
The properties of FC port(s) are as follows::
{
'zvm_fcp': [fcp1, fcp2]
'wwpns': [npiv_wwpn1, npiv_wwpn2]
'phy_to_virt_initiators':{
npiv_wwpn1: phy_wwpn1,
npiv_wwpn2: phy_wwpn2,
}
'host': LPARname_VMuserid,
'fcp_paths': 2, # the count of fcp paths
'fcp_template_id': fcp_template_id # if user doesn't specify it,
it is either the SP default or the host
default template id
}
"""
with database.get_fcp_conn():
if fcp_template_id and \
not self.db.fcp_template_exist_in_db(fcp_template_id):
errmsg = ("fcp_template_id %s doesn't exist." % fcp_template_id)
LOG.error(errmsg)
raise exception.SDKVolumeOperationError(
rs=11, userid=assigner_id, msg=errmsg)
# get lpar name of the userid,
# if no host name got, raise exception
zvm_host = zvmutils.get_lpar_name()
if zvm_host == '':
errmsg = "failed to get z/VM LPAR name."
LOG.error(errmsg)
raise exception.SDKVolumeOperationError(
rs=11, userid=assigner_id, msg=errmsg)
"""
Reserve or unreserve FCP device
according to assigner id and FCP Multipath Template id.
"""
if reserve:
LOG.info("get_volume_connector: Enter reserve_fcp_devices.")
# The data structure of fcp_list is:
# [(fcp_id, wwpn_npiv, wwpn_phy)]
fcp_list, fcp_template_id = self.fcp_mgr.reserve_fcp_devices(
assigner_id, fcp_template_id, sp_name)
LOG.info("get_volume_connector: Exit reserve_fcp_devices {}".format(
[f['fcp_id'] for f in fcp_list]))
else:
LOG.info("get_volume_connector: Enter unreserve_fcp_devices.")
# The data structure of fcp_list is:
# [(fcp_id, wwpn_npiv, wwpn_phy, connections)]
# An example of fcp_list:
# [('1c10', 'c12345abcdefg1', 'c1234abcd33002641', 1),
# ('1d10', 'c12345abcdefg2', 'c1234abcd33002641', 0)]
fcp_list = self.fcp_mgr.unreserve_fcp_devices(
assigner_id, fcp_template_id)
LOG.info("get_volume_connector: Exit unreserve_fcp_devices {}".format(
[f['fcp_id'] for f in fcp_list]))
empty_connector = {'zvm_fcp': [],
'wwpns': [],
'host': '',
'phy_to_virt_initiators': {},
'fcp_paths': 0,
'fcp_template_id': fcp_template_id}
if not fcp_list:
errmsg = ("Not enough available FCP devices found from "
"FCP Multipath Template(id={})".format(fcp_template_id))
LOG.error(errmsg)
return empty_connector
# get wwpns of fcp devices
wwpns = []
phy_virt_wwpn_map = {}
fcp_ids = []
for fcp in fcp_list:
wwpn_npiv = fcp[1]
fcp_ids.append(fcp[0])
wwpns.append(wwpn_npiv)
phy_virt_wwpn_map[wwpn_npiv] = fcp[2]
# return the LPARname+VMuserid as host
ret_host = zvm_host + '_' + assigner_id
connector = {'zvm_fcp': fcp_ids,
'wwpns': wwpns,
'phy_to_virt_initiators': phy_virt_wwpn_map,
'host': ret_host,
'fcp_paths': len(fcp_list),
'fcp_template_id': fcp_template_id}
LOG.info('get_volume_connector returns %s for '
'assigner %s and FCP Multipath Template %s'
% (connector, assigner_id, fcp_template_id))
return connector
def check_fcp_exist_in_db(self, fcp, raise_exec=True):
all_fcps_raw = self.db.get_all()
all_fcps = []
for item in all_fcps_raw:
all_fcps.append(item[0].lower())
if fcp not in all_fcps:
if raise_exec:
LOG.error("fcp %s not exist in db!", fcp)
raise exception.SDKObjectNotExistError(
obj_desc=("FCP '%s'" % fcp), modID='volume')
else:
LOG.warning("fcp %s not exist in db!", fcp)
return False
else:
return True
def get_fcp_usage(self, fcp):
userid, reserved, connections, tmpl_id = self.db.get_usage_of_fcp(fcp)
LOG.debug("Got userid:%s, reserved:%s, connections:%s, tmpl_id: %s "
"of FCP:%s" % (userid, reserved, connections, fcp, tmpl_id))
return userid, reserved, connections, tmpl_id
def set_fcp_usage(self, fcp, assigner_id, reserved, connections,
fcp_template_id):
self.db.update_usage_of_fcp(fcp, assigner_id, reserved, connections,
fcp_template_id)
LOG.info("Set usage of fcp %s to userid:%s, reserved:%s, "
"connections:%s, tmpl_id: %s." % (fcp, assigner_id,
reserved, connections,
fcp_template_id)) | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmsdk/volumeop.py | volumeop.py |
HYPERVISOR_TYPE = 'zvm'
ARCHITECTURE = 's390x'
ALLOWED_VM_TYPE = 'zLinux'
PROV_METHOD = 'netboot'
ZVM_USER_DEFAULT_PRIVILEGE = 'G'
CONFIG_DRIVE_FORMAT = 'tgz'
DEFAULT_EPH_DISK_FMT = 'ext3'
DISK_FUNC_NAME = 'setupDisk'
# the count of lines that one FCP info has
FCP_INFO_LINES_PER_ITEM = 6
RINV_HOST_KEYWORDS = {
"zcc_userid": "ZCC USERID:",
"zvm_host": "z/VM Host:",
"zhcp": "zHCP:",
"cec_vendor": "CEC Vendor:",
"cec_model": "CEC Model:",
"hypervisor_os": "Hypervisor OS:",
"hypervisor_name": "Hypervisor Name:",
"architecture": "Architecture:",
"lpar_cpu_total": "LPAR CPU Total:",
"lpar_cpu_used": "LPAR CPU Used:",
"lpar_memory_total": "LPAR Memory Total:",
"lpar_memory_used": "LPAR Memory Used:",
"lpar_memory_offline": "LPAR Memory Offline:",
"ipl_time": "IPL Time:",
}
DISKPOOL_KEYWORDS = {
"disk_total": "Total:",
"disk_used": "Used:",
"disk_available": "Free:",
}
DISKPOOL_VOLUME_KEYWORDS = {
"diskpool_volumes": "Diskpool Volumes:",
}
SET_VSWITCH_KEYWORDS = ["grant_userid", "user_vlan_id",
"revoke_userid", "real_device_address",
"port_name", "controller_name",
"connection_value", "queue_memory_limit",
"routing_value", "port_type", "persist",
"gvrp_value", "mac_id", "uplink",
"nic_userid", "nic_vdev",
"lacp", "interval", "group_rdev",
"iptimeout", "port_isolation", "promiscuous",
"MAC_protect", "VLAN_counters"]
DEV_STATUS = {'0': 'Device is not active.',
'1': 'Device is active.',
'2': 'Device is a backup device'}
DEV_ERROR = {'0': 'No error.',
'1': 'Port name conflict.',
'2': 'No layer 2 support.',
'3': 'Real device does not exist.',
'4': 'Real device is attached elsewhere.',
'5': 'Real device is not compatible type.',
'6': 'Initialization error.',
'7': 'Stalled OSA.',
'8': 'Stalled controller.',
'9': 'Controller connection severed.',
'10': 'Primary or secondary routing conflict.',
'11': 'Device is offline.',
'12': 'Device was detached.',
'13': 'IP/Ethernet type mismatch.',
'14': 'Insufficient memory in controller '
'virtual machine.',
'15': 'TCP/IP configuration conflict.',
'16': 'No link aggregation support.',
'17': 'OSA-E attribute mismatch.',
'18': 'Reserved for future use.',
'19': 'OSA-E is not ready.',
'20': 'Reserved for future use.',
'21': 'Attempting restart for device.',
'22': 'Exclusive user error.',
'23': 'Device state is invalid.',
'24': 'Port number is invalid for device.',
'25': 'No OSA connection isolation.',
'26': 'EQID mismatch.',
'27': 'Incompatible controller.',
'28': 'BACKUP detached.',
'29': 'BACKUP not ready.',
'30': 'BACKUP attempting restart.',
'31': 'EQID mismatch.',
'32': 'No HiperSockets bridge support.',
'33': 'HiperSockets bridge error.'}
SWITCH_STATUS = {'1': 'Virtual switch defined.',
'2': 'Controller not available.',
'3': 'Operator intervention required.',
'4': 'Disconnected.',
'5': 'Virtual devices attached to controller. '
'Normally a transient state.',
'6': 'OSA initialization in progress. '
'Normally a transient state.',
'7': 'OSA device not ready',
'8': 'OSA device ready.',
'9': 'OSA devices being detached. '
'Normally a transient state.',
'10': 'Virtual switch delete pending. '
'Normally a transient state.',
'11': 'Virtual switch failover recovering. '
'Normally a transient state.',
'12': 'Autorestart in progress. '
'Normally a transient state.'}
ZVM_VOLUMES_FILE = 'zvm_volumes'
ZVM_VOLUME_STATUS = ['free', 'in-use']
VOLUME_MULTI_PASS = 'MULTI'
POWER_STATE_ON = u'on'
POWER_STATE_OFF = u'off'
DATABASE_VOLUME = 'sdk_volume.sqlite'
DATABASE_NETWORK = 'sdk_network.sqlite'
DATABASE_GUEST = 'sdk_guest.sqlite'
DATABASE_IMAGE = 'sdk_image.sqlite'
DATABASE_FCP = 'sdk_fcp.sqlite'
IMAGE_TYPE = {
'DEPLOY': 'netboot',
'CAPTURE': 'staging'}
FILE_TYPE = {
'IMPORT': 'imported',
'EXPORT': 'exported'}
SDK_DATA_PATH = '/var/lib/zvmsdk/'
IUCV_AUTH_USERID_PATH = '/etc/zvmsdk/iucv_authorized_userid' | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmsdk/constants.py | constants.py |
import json
import six
import socket
import sys
import threading
import traceback
from zvmsdk import api
from zvmsdk import config
from zvmsdk import exception
from zvmsdk import log
from zvmsdk import returncode
if six.PY3:
import queue as Queue
else:
import Queue
CONF = config.CONF
LOG = log.LOG
class SDKServer(object):
def __init__(self):
# Initailize SDK API
self.sdkapi = api.SDKAPI()
self.server_socket = None
self.request_queue = Queue.Queue(maxsize=
CONF.sdkserver.request_queue_size)
def log_error(self, msg):
thread = threading.current_thread().name
msg = ("[%s] %s" % (thread, msg))
LOG.error(msg)
def log_info(self, msg):
thread = threading.current_thread().name
msg = ("[%s] %s" % (thread, msg))
LOG.info(msg)
def log_warn(self, msg):
thread = threading.current_thread().name
msg = ("[%s] %s" % (thread, msg))
LOG.warning(msg)
def log_debug(self, msg):
thread = threading.current_thread().name
msg = ("[%s] %s" % (thread, msg))
LOG.debug(msg)
def construct_internal_error(self, msg):
self.log_error(msg)
error = returncode.errors['internal']
results = error[0]
results['modID'] = returncode.ModRCs['sdkserver']
results.update({'rs': 1,
'errmsg': error[1][1] % {'msg': msg},
'output': ''})
return results
def construct_api_name_error(self, msg):
self.log_error(msg)
error = returncode.errors['API']
results = error[0]
results['modID'] = returncode.ModRCs['sdkserver']
results.update({'rs': 1,
'errmsg': error[1][1] % {'msg': msg},
'output': ''})
return results
def send_results(self, client, addr, results):
""" send back results to client in the json format of:
{'overallRC': x, 'modID': x, 'rc': x, 'rs': x, 'errmsg': 'msg',
'output': 'out'}
"""
json_results = json.dumps(results)
json_results = json_results.encode()
sent = 0
total_len = len(json_results)
got_error = False
while (sent < total_len):
this_sent = client.send(json_results[sent:])
if this_sent == 0:
got_error = True
break
sent += this_sent
if got_error or sent != total_len:
self.log_error("(%s:%s) Failed to send back results to client, "
"results: %s" % (addr[0], addr[1], json_results))
else:
self.log_debug("(%s:%s) Results sent back to client successfully."
% (addr[0], addr[1]))
def serve_API(self, client, addr):
""" Read client request and call target SDK API"""
self.log_debug("(%s:%s) Handling new request from client." %
(addr[0], addr[1]))
results = None
try:
data = client.recv(4096)
data = bytes.decode(data)
# When client failed to send the data or quit before sending the
# data, server side would receive null data.
# In such case, server would not send back any info and just
# terminate this thread.
if not data:
self.log_warn("(%s:%s) Failed to receive data from client." %
(addr[0], addr[1]))
return
api_data = json.loads(data)
# API_data should be in the form [funcname, args_list, kwargs_dict]
if not isinstance(api_data, list) or len(api_data) != 3:
msg = ("(%s:%s) SDK server got wrong input: '%s' from client."
% (addr[0], addr[1], data))
results = self.construct_internal_error(msg)
return
# Check called API is supported by SDK
(func_name, api_args, api_kwargs) = api_data
self.log_debug("(%s:%s) Request func: %s, args: %s, kwargs: %s" %
(addr[0], addr[1], func_name, str(api_args),
str(api_kwargs)))
try:
api_func = getattr(self.sdkapi, func_name)
except AttributeError:
msg = ("(%s:%s) SDK server got wrong API name: %s from"
"client." % (addr[0], addr[1], func_name))
results = self.construct_api_name_error(msg)
return
# invoke target API function
return_data = api_func(*api_args, **api_kwargs)
except exception.SDKBaseException as e:
self.log_error("(%s:%s) %s" % (addr[0], addr[1],
traceback.format_exc()))
# get the error info from exception attribute
# All SDKbaseexception should eventually has a
# results attribute defined which can be used by
# sdkserver here
if e.results is None:
msg = ("(%s:%s) SDK server got exception without results "
"defined, error: %s" % (addr[0], addr[1],
e.format_message()))
results = self.construct_internal_error(msg)
else:
results = {'overallRC': e.results['overallRC'],
'modID': e.results['modID'],
'rc': e.results['rc'],
'rs': e.results['rs'],
'errmsg': e.format_message(),
'output': ''}
except Exception as e:
self.log_error("(%s:%s) %s" % (addr[0], addr[1],
traceback.format_exc()))
msg = ("(%s:%s) SDK server got unexpected exception: "
"%s" % (addr[0], addr[1], repr(e)))
results = self.construct_internal_error(msg)
else:
if return_data is None:
return_data = ''
results = {'overallRC': 0, 'modID': None,
'rc': 0, 'rs': 0,
'errmsg': '',
'output': return_data}
# Send back the final results
try:
if results is not None:
self.send_results(client, addr, results)
except Exception as e:
# This should not happen in normal case.
# A special case is the server side socket is closed/removed
# before the send() action.
self.log_error("(%s:%s) %s" % (addr[0], addr[1], repr(e)))
finally:
# Close the connection to make sure the thread socket got
# closed even when it got unexpected exceptions.
self.log_debug("(%s:%s) Finish handling request, closing "
"socket." % (addr[0], addr[1]))
client.close()
def worker_loop(self):
# The worker thread would continuously fetch request from queue
# in a while loop.
while True:
try:
# This get() function raise Empty exception when there's no
# available item in queue
clt_socket, clt_addr = self.request_queue.get(block=False)
except Queue.Empty:
self.log_debug("No more item in request queue, worker will "
"exit now.")
break
except Exception as err:
self.log_error("Failed to get request item from queue, error: "
"%s. Worker will exit now." % repr(err))
break
else:
self.serve_API(clt_socket, clt_addr)
self.request_queue.task_done()
def setup(self):
# create server socket
try:
self.server_socket = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
except socket.error as msg:
self.log_error("Failed to create SDK server socket: %s" % msg)
sys.exit(1)
server_sock = self.server_socket
# bind server address and port
host = CONF.sdkserver.bind_addr
port = CONF.sdkserver.bind_port
try:
server_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_sock.bind((host, port))
except socket.error as msg:
self.log_error("Failed to bind to (%s, %d), reason: %s" %
(host, port, msg))
server_sock.close()
sys.exit(1)
# Start listening
server_sock.listen(5)
self.log_info("SDK server now listening")
def run(self):
# Keep running in a loop to handle client connections
while True:
# Wait client connection
conn, addr = self.server_socket.accept()
self.log_debug("(%s:%s) Client connected." % (addr[0],
addr[1]))
# This put() function would be blocked here until there's
# a slot in the queue
self.request_queue.put((conn, addr))
thread_count = threading.active_count()
if thread_count <= CONF.sdkserver.max_worker_count:
thread = threading.Thread(target=self.worker_loop)
self.log_debug("Worker count: %d, starting new worker: %s" %
(thread_count - 1, thread.name))
thread.start()
def start_daemon():
server = SDKServer()
try:
server.setup()
server.run()
finally:
# This finally won't catch exceptions from child thread, so
# the close here is safe.
if server.server_socket is not None:
server.log_info("Closing the server socket.")
server.server_socket.close() | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmsdk/sdkserver.py | sdkserver.py |
# -----------------------------------------------------------------------------
# Detail Module RC definition of each error
# -----------------------------------------------------------------------------
ModRCs = {
'smt': 1,
'guest': 10,
'network': 20,
'volume': 30,
'image': 40,
'monitor': 50,
'file': 60,
'sdkserver': 100,
'sdkwsgi': 120,
# The 'zvmsdk' is used as the default module if module is not specified
# when raising exception
'zvmsdk': 400
}
errors = {
# Each entry defined here corresponds to a kind of error indicated by the
# following list of info:
# 1. the dict of 'overallRC', 'rc'
# 2. the dict containing all the possible rs and its error message
# 3. The general error description. This should be used for doc generation
# Invalid input error
'input': [{'overallRC': 100, 'modID': ModRCs['zvmsdk'], 'rc': 100},
{1: ("Invalid API arg count, API: %(api)s, %(expected)d expected"
" while %(provided)d provided."),
2: ("Invalid API arg type, API: %(api)s, expected types: "
"'%(expected)s', input types: '%(inputtypes)s'"),
3: ("Invalid API arg format, error: %(msg)s"),
4: ("Missing required option: %(msg)s"),
},
"Invalid API Input",
],
# General Errors for each module, same overallRC = 300
# Guest Operation failed
'guest': [{'overallRC': 300, 'modID': ModRCs['guest'], 'rc': 300},
{1: "Database operation failed, error: %(msg)s",
2: "Failed to add mdisks when creating guest, error: %(msg)s",
3: ("Failed to deploy image to userid: '%(userid)s', "
"unpackdiskimage failed with rc: %(unpack_rc)d, "
"error: %(err)s"),
4: ("Failed to deploy image to userid: '%(userid)s', "
"copy configure drive failed: %(err_info)s"),
5: ("Failed to capture userid %(userid)s to generate image, "
"error: %(msg)s"),
6: ("Failed to resize cpus of guest: '%(userid)s', "
"error: update cpu definition in user entry failed with "
"smt error: '%(err)s'."),
7: ("Failed to live resize cpus of guest: '%(userid)s', "
"error: define new cpu to active failed with smt error: "
"'%(err)s'."),
8: ("Failed to live resize cpus of guest: '%(userid)s', "
"error: rescan cpus to hot-plug new defined cpus failed: "
"'%(err)s'."),
9: ("Failed to resize memory of guest: '%(userid)s', "
"error: lock user entry failed with "
"smt error: '%(err)s'."),
10: ("Failed to resize memory of guest: '%(userid)s', "
"error: replace user entry failed with "
"smt error: '%(err)s'."),
11: ("Failed to live resize memory of guest: '%(userid)s', "
"error: define standby memory failed with "
"smt error: '%(err)s'."),
12: ("Failed to deploy image to userid: '%(userid)s', "
"get unpackdiskimage cmd failed: %(err)s"),
13: ("Failed to deploy image to userid: '%(userid)s', "
"ignition file is required when deploying RHCOS image"),
14: ("Failed to deploy image to userid: '%(userid)s', %(msg)s"),
15: ("Failed to live resize cpus of guest: '%(userid)s', "
"error: enable new defined cpus failed: '%(err)s'."),
16: ("Failed to start the guest: '%(userid)s', %(msg)s")
},
"Operation on Guest failed"
],
# Network Operation failed
'network': [{'overallRC': 300, 'modID': ModRCs['network'], 'rc': 300},
{1: "Database operation failed, error: %(msg)s",
2: "ZVMSDK network error: %(msg)s",
3: ("Failed to couple nic %(nic)s to vswitch %(vswitch)s "
"on the active guest system, error: %(couple_err)s, and "
"failed to revoke user direct's changes, "
"error: %(revoke_err)s "),
4: ("Failed to create nic %(nic)s for %(userid)s on the "
"active guest system, error: %(create_err)s, and "
"failed to revoke user direct's changes, "
"error: %(revoke_err)s "),
5: ("Failed to actively change network setting for user "
"%(userid)s, error: %(msg)s")
},
"Operation on Network failed"
],
# Image Operation failed
'image': [{'overallRC': 300, 'modID': ModRCs['image'], 'rc': 300},
{1: "Database operation failed, error: %(msg)s",
2: "No image schema found for %(schema)s",
3: "Image import error: Failed to calculate the md5sum of the"
" image",
4: "Image import error: The md5sum after import is not same as"
" source image, it is possible that the image has been "
"broken during import",
5: "Image import error: Failed to get the root disk size units"
" of the image via hexdump",
6: "Image import error: The header of image does not contain"
" built-in disk size units",
7: "Image import error: The image's disk type is not valid."
" Currently only FBA or CKD type image is supported",
8: "Image import error: Failed to get the physical size of"
" image in bytes",
9: "Import image from http server failed with reason %(err)s",
10: "Image import error: Copying image file from remote"
" filesystem failed with error %(err)s",
11: "The specified remote_host %(rh)s format invalid",
12: "Import image from local file system failed with error"
" %(err)s",
13: "Image import error: image name %(img)s already exist in "
"image database",
14: "Image import error: %(msg)s",
20: "The image record of %(img)s does not exist",
21: "Image Export error: Failed to copy image file to remote "
"host with reason: %(msg)s",
22: "Export image to local file system failed: %(err)s",
23: "Image file of %(img)s does not exist, "
"so failed to get its timestamp.",
},
"Operation on Image failed"
],
# Volume Operation failed
'volume': [{'overallRC': 300, 'modID': ModRCs['volume'], 'rc': 300},
{1: "Database operation failed, error: %(msg)s",
3: "Volume %(vol)s has already been attached on instance "
"%(inst)s",
4: "Volume %(vol)s is not attached on instance %(inst)s",
5: "Refresh bootmap fails, error code: %(errcode)s and "
"reason: %(errmsg)s",
6: "IUCV failed to get authorization from instance "
"%(userid)s with reason %(msg)s",
7: "Refresh bootmap timeout with reason %(msg)s",
8: "Failed to attach volume to instance "
"%(userid)s with reason %(msg)s",
9: "Failed to detach volume from instance "
"%(userid)s with reason %(msg)s",
10: "Failed to refresh bootmap for RHCOS: "
"transportfiles are required",
11: "Failed to get volume connector of %(userid)s "
"because %(msg)s",
},
"Operation on Volume failed"
],
# Monitor Operation failed
'monitor': [{'overallRC': 300, 'modID': ModRCs['monitor'], 'rc': 300},
{1: "Database operation failed, error: %(msg)s",
},
"Operation on Monitor failed"
],
# File Operation failed
'file': [{'overallRC': 300, 'modID': ModRCs['file'], 'rc': 300},
{1: "File import operation failed",
2: "File export operation failed"},
"Operation on file failed"
],
# REST API Request error (Only used by sdkwsgi)
# 'modID' would be set to ModRC['sdkwsgi']
'RESTAPI': [{'overallRC': 400, 'modID': ModRCs['sdkwsgi'], 'rc': 400},
{1: "Invalid request",
},
"REST API Request error"
],
# Object not exist
# Used when the operated object does not exist.
# 'modID' would be set to each module rc when raise the exception
# 'rs' is always 1
'notExist': [{'overallRC': 404, 'modID': None, 'rc': 404},
{1: "%(obj_desc)s does not exist.",
2: "Not found error: '%(msg)s'",
3: ("%(obj_desc)s does not exist in directory "
"although it is in DB. The guest could have been "
"deleted out of z/VM Cloud Connector.")},
"The operated object does not exist"
],
'alreadyExist': [{'overallRC': 409, 'modID': None, 'rc': 409},
{1: "%(obj_desc)s already exists."}
],
# Conflict Error (The to-be-updated object status conflict)
'conflict': [{'overallRC': 409, 'modID': None, 'rc': 409},
{1: "Guest '%(userid)s' is not in active status.",
2: ("Failed to live resize cpus of guest: '%(userid)s', "
"error: current active cpu count: '%(active)i' is "
"greater than requested count: '%(req)i'."),
3: ("Failed to resize cpus of guest: '%(userid)s', "
"error: maximum number of cpus is not defined in user "
"directory."),
4: ("Failed to resize cpus of guest: '%(userid)s', "
"error: the requested number of cpus: '%(req)i' exceeds "
"the maximum number of cpus allowed: '%(max)i'."),
5: ("Failed to set vswitch %(vsw)s, error: %(msg)s"),
6: ("Failed to create nic %(vdev)s for guest %(userid)s, "
"error: %(msg)s"),
7: ("Failed to create nic %(vdev)s for guest %(userid)s, "
"error: %(obj)s is locked"),
8: ("Failed to delete nic %(vdev)s for guest %(userid)s, "
"error: %(msg)s"),
9: ("Failed to delete nic %(vdev)s for guest %(userid)s, "
"error: %(obj)s is locked"),
10: ("Failed to couple nic %(vdev)s of guest %(userid)s "
"with vswitch %(vsw)s, error: %(msg)s"),
11: ("Failed to couple nic %(vdev)s of guest %(userid)s "
"with vswitch %(vsw)s, error: %(obj)s is locked"),
12: ("Failed to uncouple nic %(vdev)s of guest %(userid)s "
"error: %(msg)s"),
13: ("Failed to uncouple nic %(vdev)s of guest %(userid)s "
"error: %(obj)s is locked"),
14: ("Failed to dedicate OSA %(osa)s to guest %(userid)s "
"error: %(msg)s"),
15: ("Failed to dedicate OSA %(osa)s to guest %(userid)s "
"error: %(obj)s is locked"),
16: ("Failed to delete dedicated device from guest "
"%(userid)s %(vdev)s, error: %(msg)s"),
17: ("Failed to delete dedicated device from guest "
"%(userid)s %(vdev)s, error: %(obj)s is locked"),
18: ("Failed to live resize memory of guest: '%(userid)s', "
"error: current active memory size: '%(active)i'm is "
"greater than requested size: '%(req)i'm."),
19: ("Failed to resize memory of guest: '%(userid)s', "
"error: user definition is not in expected format, "
"cann't get the defined/max/reserved storage."),
20: ("Failed to resize memory of guest: '%(userid)s', "
"error: the requested memory size: '%(req)im' exceeds "
"the maximum memory size defined: '%(max)im'."),
21: ("Failed to live resize memory of guest: %(userid)s, "
"error: the memory size to be increased: '%(inc)im' "
"is greater than the maximum reserved memory size: "
"'%(max)im'."),
22: ("Failed to delete FCP Multipath Template, "
"error: %(msg)s"),
23: ("Failed to create or update FCP Multipath Template, "
"error: %(msg)s"),
24: ("Failed to edit FCP Multipath Template, "
"error: %(msg)s")
},
"The operated object status conflict"
],
# Object deleted.
# The operated object has been deleted and not exist any more.
# This can be used for some module that support deleted=1 in DB.
'deleted': [{'overallRC': 410, 'modID': None, 'rc': 410},
{},
"The operated object is deleted"
],
# Internal error
# Module Internal error, rc is not defined here, it will be set when raising
# exception. when module id is not specified, the 'zvmsdk' module rc will be
# used.
'internal': [{'overallRC': 500, 'modID': None, 'rc': 500},
{1: "Unexpected internal error in ZVM SDK, error: %(msg)s"},
"ZVM SDK Internal Error"
],
# Service Unavailable
# The SDK REST reject deploy/capture requests because of the concurrent
# capture/deploy running exceeds the maximum number.
'serviceUnavail': [{'overallRC': 503, 'modID': ModRCs['sdkwsgi'],
'rc': 503},
{1: "Max concurrent deploy/capture requests received, "
"request is rejected. %(req)s",
},
"z/VM Cloud Connector service is unavailable"
],
# Service not support
# The requested function has not been implemented in current release,
# the 'modID' would be set to each module rc when raise the exception
# 'rs' is always 1
'serviceNotSupport': [{'overallRC': 501, 'modID': None, 'rc': 501},
{1: "The requested function: %(func)s has not been "
"implemented in current release",
},
"z/VM Cloud Connector function not implemented"
],
}
# smt internal error
# This const defines the list of smt errors that should be converted to
# internal error in SDK layer.
# Each element in the list is a tuple consisting the 'overallRC', 'rc',
# list of 'rs'
# when the value is 'None', it means always match.
SMT_INTERNAL_ERROR = [(4, 4, range(1, 18)),
(2, 2, [99, ]),
(25, None, None),
(99, 99, [416, 417])
] | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmsdk/returncode.py | returncode.py |
import os
from six.moves import configparser
class Opt(object):
def __init__(self, opt_name, section='default',
opt_type='str', help='',
default=None, required=False):
self.name = opt_name
self.section = section
self.opt_type = opt_type
self.default = default
self.required = required
self.help = help
zvm_opts = [
# logging options
Opt('log_dir',
section='logging',
default='/var/log/zvmsdk/',
help='''
Directory where log file to be put into.
SDK has a set of logs to help administrator to debug
and aduit actions performed through SDK. Edit this option
if you want to put logs into specified place.
Please ensure the service running on the consume which
consumes SDK has the authorization to write to the path.
'''),
Opt('log_level',
section='logging',
default='logging.INFO',
help='''
Level of the log.
SDK utilize python logging package to help admin debug
or analyze issues. it's recommend to set this value
to logging.DEBUG to get more detailed logs and set it to
logging.INFO(default) in normal situation.
recommend values:
logging.ERROR: level above ERROR will be written to log file.
logging.WARNINS: level above WARNING(ERROR, WARNING)
will be written to log file.
logging.INFO: level above INFO(ERROR, WARNING, INFO)
will be written to log file.
logging.DEBUG: All log level (ERROR, WARNING, INFO, DEBUG)
will be written to log file.
'''),
# zvm options
Opt('default_nic_vdev',
section='zvm',
default='1000',
help='''
Virtual device number for default NIC address.
This value is the first NIC virtual device number,
each NIC needs 3 numbers for control/read/write, so by default
the first NIC's address is 1000, the second one is 1003 etc.
Possible values:
An integer value in hex format, between 0 and 65536 (x'FFFF').
It should not conflict with other device numbers in the z/VM guest's
configuration, for example device numbers of the root or ephemeral or
persistent disks.
Sample NIC definitions in the z/VM user directory:
NICDEF 1000 TYPE QDIO LAN SYSTEM <vswitch1> MACID <macid1>
NICDEF 1003 TYPE QDIO LAN SYSTEM <vswitch2> MACID <macid2>
'''
),
Opt('user_default_share_unit',
section='zvm',
opt_type='int',
default=100,
help='''
The default SHARE settings configuration.
The recommend value of SHARE. From z/VM doc, SHARE is relative value of
virtual machine and if you set SHARE to 100 while virtual CPUs are 4,
then each vCPU get 25 entitlement.
So the mechanism currently is:
1) If a share is given, set SHARE value to the VM
2) If no SHARE is given during creation, check user_default_share_unit
3) If user_default_share_unit is 0, do nothing
4) If user_default_share_unit it not 0(current default is 100),
then insert statement `SHARE RELATIVE user_default_share_unit*vCPU`
into user direct, for example, with user_default_share_unit=100,
4 vCPU will create `SHARE RELATIVE 400`.
This align the best practice of z/VM recommendation.
'''),
Opt('default_admin_userid',
section='zvm',
help='''
Default LOGONBY userid(s) for the cloud.
This is a set of z/VM userid(s) which are allowed to logon using the LOGONBY
keyword to the guests created by the z/VM SDK solution, compatible with
the LBYONLY keyword of the user directory statement. This value is only used
when a guest is created. If you change this value, existing guests' directory
entries are not automatically updated with the new value.
When an ESM is installed, this parameter only governs when the ESM
defers to CP's processing.
Usage note:
The default is empty string with nothing set.
'' is an invalid value and it will cause VM deploying failed.
Thus, DO NOT set default_admin_userid=''.
When a non-empty string is provided, blank chars will be used as delimiter,
you can use LOGONBY xxx command to log on the guest using the corresponding
admin userid's password.
For example, when you set this value to 'oper1 oper2 oper3 jones', it means
you can use any one of 'oper1', 'oper2', 'oper3', 'jones' as an admin user.
see the z/VM CP Planning and Administration for additional information.
Possible values:
A maximum of 8 blank-delimited strings. Each non-blank string must be a
valid z/VM userid.
e.g 'oper1 oper2' is a valid value.
'o1 o2 o3 o4 o5 o6 o7 o8 o9' is NOT a valid value.
'''),
# FIXME: remove this option when switch to smt
Opt('user_default_password',
section='zvm'),
Opt('disk_pool',
section='zvm',
default=None,
required=False,
help='''
zVM disk pool and type for root/ephemeral disks.
The option is combined by 2 parts and use : as separator.
The first part is the type of disks in the disk pool.
The disks in one disk pool must in same type (ECKD or FBA).
Possible values of the disk pool type:
A string, either ECKD or FBA.
The second part is the volume group name defined in your directory manager
on your z/VM system, which will be used for allocating disks for
new guest. A dollar sign ($) is not allowed in the name.
Sample disk_pool values:
ECKD:diskpo1
FBA:testpool
'''),
Opt('user_profile',
section='zvm',
required=True,
help='''
PROFILE name to use when creating a z/VM guest.
When SDK deploys an guest on z/VM, it can include some
common statements from a PROFILE definition.
This PROFILE must already be included in your z/VM user directory.
Possible values:
An 8 character name of a PROFILE that is already defined in the z/VM
user directory.
'''),
Opt('user_root_vdev',
section='zvm',
default='0100',
help='''
Virtual device number for root disk.
When SDK deploys an guest, it creates a root disk and potentially
several data disks. This value is the virtual device number of the root
disk.
Possible values:
An integer value in hex format, between 0 and 65536 (x'FFFF').
It should not conflict with other device numbers in the z/VM guest's
configuration, for example device numbers of the NICs or ephemeral or
persistent disks.
Sample root disk in user directory:
MDISK 0100 <disktype> <start> <end> <volumelabel> <readwrite>
'''),
Opt('user_default_max_cpu',
section='zvm',
default=32,
opt_type='int',
help='''
The default maximum number of virtual processers the user can define.
This value is used as the default value for maximum vcpu number when
create a guest with no max_cpu specified.
The number must be a decimal value between 1 and 64.
'''),
Opt('user_default_max_memory',
section='zvm',
default='64G',
help='''
The default maximum size of memory the user can define.
This value is used as the default value for maximum memory size when
create a guest with no max_mem specified.
The value can be specified by 1-4 bits of number suffixed by either
M (Megabytes) or G (Gigabytes) and the number must be a whole number,
values such as 4096.8M or 32.5G are not supported.
The value should be adjusted based on your system capacity.
'''),
Opt('user_default_max_reserved_memory',
section='zvm',
default='64G',
help='''
The default maximum size of reserved memory in a vm's direct entry.
This value is used as the default value for maximum reserved memory
size for a guest.
The value can be specified by 1-4 bits of number suffixed by either
M (Megabytes) or G (Gigabytes) and the number must be a whole number,
values such as 4096.8M or 32.5G are not supported.
The value should be adjusted based on your system capacity.
'''),
Opt('namelist',
section='zvm',
help='''
The name of a list containing names of virtual servers to be queried. The list
which contains the userid list by default is named: VSMWORK1 NAMELIST, see
DMSSICNF COPY key: NameListFileIdAny. The list has to be accessible to the
SMAPI servers.
The length of namelist must no longer than 64.
'''),
Opt('swap_force_mdisk',
section='zvm',
default=False,
help='''
For swap disk to create from mdisk instead of vdisk.
In boot from volume case, there might be no disk pool at all, then
the only choice is to use vdisk (or using FCP LUN which is complicated),
if customer doesn't want vdisk, then set this value to `True` so
VDISK will not be used and in turn it will fail check.
'''),
Opt('remotehost_sshd_port',
section='zvm',
default='22',
help='''
The port number of remotehost sshd.
'''),
Opt('bypass_smapiout',
section='zvm',
default=False,
help='''
Only used for SMAPIOUT is not ready.
'''),
# image options
Opt('default_compress_level',
section='image',
default='6',
opt_type='str',
help='''
Default compress level for captured image.
'''),
Opt('sdk_image_repository',
section='image',
default='/var/lib/zvmsdk/images',
help='''
Directory to store sdk images.
SDK image repository to store the imported images and the staging images that
is in snapshotting. Once snapshot finished, the image will be removed to the
netboot directory accordingly. Two kinds of image repository looks like:
/var/lib/zvmsdk/images/netboot/<image_osversion>/<imagename>
/var/lib/zvmsdk/images/staging/<image_osversion>/<imagename>
'''),
# file options
Opt('file_repository',
section='file',
default='/var/lib/zvmsdk/files',
help='''
Directory to store sdk imported or exported files.
SDK file repository to store the imported files and the files that will be
exported, the imported files will be put into <file_repository>/imported
the files to be exported will be put into <file_repository>/exported
'''),
# network options
Opt('my_ip',
section='network',
required=True,
help='''
IP address of the Linux machine which is running SDK on.
Some remote copy operations need to be performed during guest creation,
this option tell the SDK the host ip which can be used to perform copy
from and copy to operations.
'''),
# guest options
Opt('console_log_size',
section='guest',
default=100,
opt_type='int',
help='''
The maximum allowed console log size, in kilobytes.
Console logs might be transferred to sdk user, this option controls how
large each file can be. A smaller size may mean more calls will be needed
to transfer large consoles, which may not be desirable for performance reasons.
'''),
Opt('extend_partition_fs',
section='guest',
default='True',
help='''
Whether to automatically extend the partition and filesystem of guest.
If set to True, when deploying an image to a larger disk, zvmsdk
automatically extends the last partition and the file system to
use up the whole disk.
If do not want to do the extend action automaictly, you must set this option
to be False.
'''),
Opt('reachable_timeout',
section='guest',
default=180,
opt_type='int',
help='''
The maximum time waiting until the guest reachable after started.
When starting a guest, specify the timeout value will check the guest status
untils it becames reachable or timeout.
'''),
Opt('softstop_timeout',
section='guest',
default=120,
opt_type='int',
help='''
The maximum time waiting until the guest shut down.
Sometimes, the shutdown action will take a bit lone time to complete.
If you want to make sure the guest in shut-down status after executing action
of softstop, this will help.
'''),
Opt('softstop_interval',
section='guest',
default=10,
opt_type='int',
help='''
The interval time between 2 retries, in seconds.
This will take effect only when you set softstop_retries item.
What's more, the value of softstop_timeout/softstop_interval is
the times retried.
'''),
# monitor options
Opt('cache_interval',
section='monitor',
default=300,
opt_type='int',
help='''
Cached monitor data update interval
This is used to prevent excessive effort spent retrieving the
monitor data by calling the SDK backend utilities. When this cache
is enabled, a inspect call will only call the SDK backend utilities
when the inspected guest's info does not exist in the cache or
when the cache data is expired. And when an cache update is needed,
all the existing guests' data will be retrieved in a single call to
the backend.
When this value is below or equal to zero, the cache
will be disabled and each inspect call will need to call the backend
utilities to get the inspected guest's monitor data.
'''
),
# wsgi options
# this option is used when sending http request
# to sdk wsgi, default to none so no token validation
# will be used.
Opt('auth',
section='wsgi',
default='none',
opt_type='str',
help='''
Whether auth will be used.
When sending http request from outside to running zvmsdk,
Client will be requested to input username/password in order
to authorize the call.
Set this to 'none' indicated no auth will be used and 'auth'
means username and password need to be specified.
Possible value:
'none': no auth will be required
'auth': need auth, currently pyjwt is used to return a token
to caller if the username and password is correct.
''',
),
Opt('token_validation_period',
section='wsgi',
default=3600,
opt_type='int',
help='''
How long the token is valid.
If a token auth is used, the token return to user will be
expired after the period passed. This ensure an user who
get this token will not be authorized all the time, a new
token need to be recreated after certain time period.
''',
),
Opt('token_path',
section='wsgi',
default='/etc/zvmsdk/token.dat',
opt_type='str',
help='''
file path that contains admin-token to access sdk http server.
Admin-token in order to get a user-token from zvm sdk, and the user-token
will be used to validate request before user-token expire.
'''
),
Opt('max_concurrent_deploy_capture',
section='wsgi',
default=20,
opt_type='int',
help='''
The max total number of concurrent deploy and capture requests allowed in a
single z/VM Cloud Connector process.
If more requests than this value are revieved concurrently, the z/VM Cloud
Connector would reject the requests and return error to avoid resource
exhaustion.
.
'''
),
# Daemon server options
Opt('bind_addr',
section='sdkserver',
default='127.0.0.1',
opt_type='str',
help='''
The IP address that the SDK server is listen on.
When the SDK server deamon starts, it will try to bind to
this address and port bind_port, and wait for the SDK client
connection to handle API request.
'''
),
Opt('bind_port',
section='sdkserver',
opt_type='int',
default=2000,
help='''
The port that the SDK server is listen on.
This will work as a pair with bind_addr when the SDK server daemon
starts, more info can be found in that configuration description.
'''
),
Opt('request_queue_size',
section='sdkserver',
opt_type='int',
default=128,
help='''
The size of request queue in SDK server.
SDK server maintains a queue to keep all the accepted but not handled requests,
and the SDK server workers fetch requests from this queue.
To some extend, this queue size decides the max socket opened in SDK server.
This value should be adjusted according to the system resource.
'''
),
Opt('max_worker_count',
section='sdkserver',
opt_type='int',
default=64,
help='''
The maximum number of worker thread in SDK server to handle client requests.
These worker threads would work concurrently to handle requests from client.
This value should be adjusted according to the system resource and workload.
'''
),
# database options
Opt('dir',
section='database',
default='/var/lib/zvmsdk/databases/',
opt_type='str',
help='''
Directory to store database.
SDK databases are used to store a set of tables which contain the
information of network, volume, image, etc. This option is used to
tell SDK where to store the database files, make sure the process
running SDK is able to read write and execute the directory.
'''
),
# volume options
Opt('fcp_list',
section='volume',
default='',
opt_type='str',
help='''
volume fcp list.
SDK will only use the fcp devices in the scope of this value.
'''
),
Opt('refresh_bootmap_timeout',
section='volume',
default=1200,
opt_type='int',
help='''
The timeout value for waiting refresh_bootmap execution, in seconds.
The default value is 1200 seconds, if the execution of refresh_bootmap
reached the timeout, the process of refresh_bootmap will be stopped.
'''
),
Opt('punch_script_execution_timeout',
section='volume',
default=1800,
opt_type='int',
help='''
The timeout value for waiting attach/detach punch scripts
execution, in seconds.
The default value is 1800 seconds, if the execution of punch scripts
reached the timeout, the attach/detach will fail.
'''
),
Opt('get_fcp_pair_with_same_index',
section='volume',
default='0',
opt_type='int',
help='''
fcp pair selection algorithm
fcp_list example:
fa00-fa02; fb00-fb02
If use get_fcp_pair_with_same_index,
then fcp pair is randomly selected from below combinations.
[fa00,fb00],[fa01,fb01],[fa02,fb02]
If use get_fcp_pair,
then fcp pair is randomly selected from below combinations.
[fa00,fb00],[fa01,fb00],[fa02,fb00]
[fa00,fb01],[fa01,fb01],[fa02,fb01]
[fa00,fb02],[fa01,fb02],[fa02,fb02]
Possible value:
0 : use get_fcp_pair. this is the default
1 : use get_fcp_pair_with_same_index
'''
),
Opt('force_capture_disk',
section='zvm',
required=False,
opt_type='str',
default=None,
help='''
Virtual device number for capture function.
This value identity the virtual device number for capture
image when z/VM guest is power off.
Possible values:
An string value identify disk number like '0100'.
If this value has been configured, capture image function will use
this value as disk info to capture with first priority when z/VM
guest is power off.
This value don't work if z/VM guest status is power on.
Sample root disk in user directory:
MDISK 0100 <disktype> <start> <end> <volumelabel> <readwrite>
'''),
# tests options
Opt('images',
section='tests',
opt_type='str',
),
Opt('userid_prefix',
section='tests',
default='tst',
opt_type='str',
),
Opt('ip_addr_list',
section='tests',
default='192.168.0.2 192.168.0.3 192.168.0.4 192.168.0.5 192.168.0.6',
opt_type='str',
),
Opt('vswitch',
section='tests',
opt_type='str',
),
Opt('gateway_v4',
section='tests'),
Opt('cidr',
section='tests'),
Opt('restapi_url',
section='tests',
default='http://127.0.0.1:8888'),
Opt('zvm_fcp',
section='tests'),
Opt('target_wwpn',
section='tests'),
Opt('target_lun',
section='tests'),
Opt('mount_point',
section='tests'),
]
class ConfigOpts(object):
def __init__(self):
self.dicts = {}
def get_config_dicts_default(self, opts):
_dict = {}
for opt in opts:
sec = opt.section
if _dict.get(sec) is None:
_dict[sec] = {}
_dict[sec][opt.name] = {'required': opt.required,
'default': opt.default,
'type': opt.opt_type,
'help': opt.help}
return _dict
def register(self, opts):
# Register the defined options and parse to dict
self.dicts = self.get_config_dicts_default(opts)
return self.clear_and_to_dict()
def config(self):
# Load config file and merge with default definitions
# read config file
override_dicts = self.read_config_file_to_dicts()
# overwrite default value
try:
self.dicts = self.merge(self.dicts, override_dicts)
except ImportError:
pass
# Check config value
self._check_value(self.dicts)
# Clear unused attributes of each option, and parse to our defined dict
return self.clear_and_to_dict()
def read_config_file_to_dicts(self):
configs = {}
read_file = self.find_config_file(project="zvmsdk")
if read_file is None:
raise ConfFileMissingError()
else:
cf = configparser.ConfigParser()
cf.read(read_file)
# read each section and option to dict
secs = cf.sections()
for sec in secs:
configs[sec] = {}
# get all options of the section in a list
opts = cf.options(sec)
for opt in opts:
val = cf.get(sec, opt)
configs[sec][opt] = val
return configs
def merge(self, defaults, override):
# merge the defaults and overridden
# The overridden options would only have 'default' set in the
# resulted dicts
r = {}
for k, v in defaults.items():
if k in override:
if isinstance(v, dict) and isinstance(override[k], dict):
r[k] = self.merge(v, override[k])
elif isinstance(v, dict):
if override[k] is not None:
v['default'] = override[k]
r[k] = v
else:
r[k] = override[k]
else:
r[k] = v
return r
def clear_and_to_dict(self):
# This function would clear the dict to remove the unused keys
# ('required', 'default', 'type', 'help'), set the opt value to
# the final value merged in 'default'.
# And then, convert the python dict to our defined Dict object
clear_dict = {}
pydict = self.dicts
for k1, v1 in pydict.items():
r_con = {}
for k2, v2 in v1.items():
r_con[k2] = v2['default']
clear_dict[k1] = r_con
return self.toDict(clear_dict)
def _check_value(self, conf):
for k1, v1 in conf.items():
for k2, v2 in v1.items():
# Check required options
if v2['required'] and (v2['default'] is None):
raise RequiredOptMissingError(k1, k2)
# Convert type
if v2['type'] == 'int':
v2['default'] = int(v2['default'])
# Check format
if (k2 == "disk_pool") and (v2['default'] is not None):
self._check_zvm_disk_pool(v2['default'])
# check user_default_max_memory
if (k2 == "user_default_max_memory") and (
v2['default'] is not None):
self._check_user_default_max_memory(v2['default'])
# check user_default_max_reserved_memory
if (k2 == "user_default_max_reserved_memory") and (
v2['default'] is not None):
self._check_user_default_max_reserved_memory(v2['default'])
# check user_default_max_cpu
if (k2 == "user_default_max_cpu") and (
v2['default'] is not None):
self._check_user_default_max_cpu(v2['default'])
def _check_zvm_disk_pool(self, value):
disks = value.split(':')
if (len(disks) != 2) or (disks[0].upper() not in ['FBA', 'ECKD']) or (
disks[1] == ''):
raise OptFormatError("zvm", "disk_pool", value)
def _check_user_default_max_memory(self, value):
suffix = value[-1].upper()
size = value[:-1]
if (suffix not in ['G', 'M']) or (len(size) > 4) or (
size.strip('0123456789') != ''):
raise OptFormatError("zvm", "user_default_max_memory", value)
def _check_user_default_max_reserved_memory(self, value):
suffix = value[-1].upper()
size = value[:-1]
if (suffix not in ['G', 'M']) or (len(size) > 4) or (
size.strip('0123456789') != ''):
raise OptFormatError("zvm", "user_default_max_reserved_memory",
value)
def _check_user_default_max_cpu(self, value):
if (value < 1) or (value > 64):
raise OptFormatError("zvm", "user_default_max_cpu", value)
def toDict(self, d):
D = Dict()
for k, v in d.items():
D[k] = self.toDict(v) if isinstance(v, dict) else v
return D
def _fixpath(self, p):
"""Apply tilde expansion and absolutization to a path."""
return os.path.abspath(os.path.expanduser(p))
def _get_config_dirs(self):
"""Return a list of directories where config files may be located.
following directories are returned::
./
../etc
~/
/etc/zvmsdk/
"""
_cwd = os.path.split(os.path.abspath(__file__))[0]
_pdir = os.path.split(_cwd)[0]
_etcdir = ''.join((_pdir, '/', 'etc/'))
cfg_dirs = [
self._fixpath(_cwd),
self._fixpath('/etc/zvmsdk/'),
self._fixpath('/etc/'),
self._fixpath('~'),
self._fixpath(_etcdir),
]
return [x for x in cfg_dirs if x]
def _search_dirs(self, dirs, basename, extension=""):
"""Search a list of directories for a given filename or directory name.
Iterator over the supplied directories, returning the first file
found with the supplied name and extension.
:param dirs: a list of directories
:param basename: the filename
:param extension: the file extension, for example '.conf'
:returns: the path to a matching file, or None
"""
for d in dirs:
path = os.path.join(d, '%s%s' % (basename, extension))
if os.path.exists(path):
return path
return None
def find_config_file(self, project=None, extension='.conf'):
"""Return the config file.
:param project: "zvmsdk"
:param extension: the type of the config file
"""
cfg_dirs = self._get_config_dirs()
config_files = self._search_dirs(cfg_dirs, project, extension)
return config_files
class Dict(dict):
'''
Simple dict but support access as x.y style.
'''
def __init__(self, names=(), values=(), **kw):
super(Dict, self).__init__(**kw)
for k, v in zip(names, values):
self[k] = v
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'CONF' object has no attribute '%s'" % key)
def __setattr__(self, key, value):
self[key] = value
class RequiredOptMissingError(Exception):
"""Raised if an option is required but no value is supplied by the user."""
def __init__(self, grp_name, opt_name):
self.grp_name = grp_name
self.opt_name = opt_name
def __str__(self):
return "value required for option %s - %s" % (self.grp_name,
self.opt_name)
class ConfFileMissingError(Exception):
"""Raised if the configuration file zvmsdk.conf cann't be found."""
def __init__(self):
message = "zvmsdk.conf is not found."
super(ConfFileMissingError, self).__init__(message)
class OptFormatError(Exception):
"""Raised if an option is required but no value is supplied by the user."""
def __init__(self, grp_name, opt_name, value):
self.grp_name = grp_name
self.opt_name = opt_name
self.value = value
def __str__(self):
return "value %s for option %s - %s is invalid" % (self.value,
self.grp_name,
self.opt_name)
CONFOPTS = ConfigOpts()
CONF = CONFOPTS.register(zvm_opts)
def load_config():
global CONF
CONF = CONFOPTS.config() | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmsdk/config.py | config.py |
import os
import shutil
import tarfile
import yaml
from zvmsdk import config
from zvmsdk import dist
from zvmsdk import log
from zvmsdk import smtclient
_NetworkOPS = None
CONF = config.CONF
LOG = log.LOG
def get_networkops():
global _NetworkOPS
if _NetworkOPS is None:
_NetworkOPS = NetworkOPS()
return _NetworkOPS
class NetworkOPS(object):
"""Configuration check and manage MAC address API
oriented towards SDK driver
"""
def __init__(self):
self._smtclient = smtclient.get_smtclient()
self._dist_manager = dist.LinuxDistManager()
def create_nic(self, userid, vdev=None, nic_id=None,
mac_addr=None, active=False):
return self._smtclient.create_nic(userid, vdev=vdev, nic_id=nic_id,
mac_addr=mac_addr, active=active)
def get_vswitch_list(self):
return self._smtclient.get_vswitch_list()
def couple_nic_to_vswitch(self, userid, nic_vdev,
vswitch_name, active=False, vlan_id=-1):
self._smtclient.couple_nic_to_vswitch(userid, nic_vdev,
vswitch_name, active=active,
vlan_id=vlan_id)
def uncouple_nic_from_vswitch(self, userid, nic_vdev,
active=False):
self._smtclient.uncouple_nic_from_vswitch(userid,
nic_vdev,
active=active)
def add_vswitch(self, name, rdev=None, controller='*',
connection='CONNECT', network_type='ETHERNET',
router="NONROUTER", vid='UNAWARE', port_type='ACCESS',
gvrp='GVRP', queue_mem=8, native_vid=1, persist=True):
self._smtclient.add_vswitch(name, rdev=rdev, controller=controller,
connection=connection,
network_type=network_type,
router=router, vid=vid,
port_type=port_type, gvrp=gvrp,
queue_mem=queue_mem,
native_vid=native_vid,
persist=persist)
def grant_user_to_vswitch(self, vswitch_name, userid):
self._smtclient.grant_user_to_vswitch(vswitch_name, userid)
def revoke_user_from_vswitch(self, vswitch_name, userid):
try:
self._smtclient.revoke_user_from_vswitch(vswitch_name, userid)
except Exception as e:
# TODO: for APARs VM65925, VM65926, and VM65931 applied or z/VM 7.1
# this call won't be needed, so we can avoid raise exception
# and let it be as some configuration may block this API call.
LOG.debug('Error ignored: %s', str(e))
def set_vswitch_port_vlan_id(self, vswitch_name, userid, vlan_id):
self._smtclient.set_vswitch_port_vlan_id(vswitch_name, userid,
vlan_id)
def set_vswitch(self, vswitch_name, **kwargs):
self._smtclient.set_vswitch(vswitch_name, **kwargs)
def delete_vswitch(self, vswitch_name, persist=True):
self._smtclient.delete_vswitch(vswitch_name, persist)
def delete_nic(self, userid, vdev, active=False):
self._smtclient.delete_nic(userid, vdev,
active=active)
def network_configuration(self, userid, os_version, network_info,
active=False):
if self._smtclient.is_rhcos(os_version):
linuxdist = self._dist_manager.get_linux_dist(os_version)()
linuxdist.create_coreos_parameter_temp_file(network_info, userid)
else:
network_file_path = self._smtclient.get_guest_temp_path(userid)
LOG.debug('Creating folder %s to contain network configuration '
'files' % network_file_path)
# check whether network interface has already been set for the
# guest. If not, means this is the first time to set the network
# interface
first = self._smtclient.is_first_network_config(userid)
(network_doscript, active_cmds) = self._generate_network_doscript(
userid, os_version,
network_info,
network_file_path,
first, active=active)
fileClass = "X"
try:
self._smtclient.punch_file(userid, network_doscript, fileClass)
finally:
LOG.debug('Removing the folder %s ', network_file_path)
shutil.rmtree(network_file_path)
# update guest db to mark the network is already set
if first:
self._smtclient.update_guestdb_with_net_set(userid)
# using zvmguestconfigure tool to parse network_doscript
if active:
self._smtclient.execute_cmd(userid, active_cmds)
# Prepare and create network doscript for instance
def _generate_network_doscript(self, userid, os_version, network_info,
network_file_path, first, active=False):
path_contents = []
content_dir = {}
files_map = []
# Create network configuration files
LOG.debug('Creating network configuration files '
'for guest %s in the folder %s' %
(userid, network_file_path))
linuxdist = self._dist_manager.get_linux_dist(os_version)()
files_and_cmds = linuxdist.create_network_configuration_files(
network_file_path, network_info,
first, active=active)
(net_conf_files, net_conf_cmds,
clean_cmd, net_enable_cmd) = files_and_cmds
# Add network configure files to path_contents
if len(net_conf_files) > 0:
path_contents.extend(net_conf_files)
# restart_cmds = ''
# if active:
# restart_cmds = linuxdist.restart_network()
net_cmd_file = self._create_znetconfig(net_conf_cmds,
linuxdist,
net_enable_cmd,
active=active)
# Add znetconfig file to path_contents
if len(net_cmd_file) > 0:
path_contents.extend(net_cmd_file)
for (path, contents) in path_contents:
key = "%04i" % len(content_dir)
files_map.append({'target_path': path,
'source_file': "%s" % key})
content_dir[key] = contents
file_name = os.path.join(network_file_path, key)
if 'yaml' in path:
self._add_yaml_file(file_name, contents)
else:
self._add_file(file_name, contents)
self._create_invokeScript(network_file_path, clean_cmd, files_map)
network_doscript = self._create_network_doscript(network_file_path)
# get command about zvmguestconfigure
active_cmds = ''
if active:
active_cmds = linuxdist.create_active_net_interf_cmd()
return network_doscript, active_cmds
def _add_file(self, file_name, data):
with open(file_name, "w") as f:
f.write(data)
def _add_yaml_file(self, file_name, data):
with open(file_name, 'w') as stream:
yaml.dump(data, stream)
def _create_znetconfig(self, commands, linuxdist, append_cmd,
active=False):
LOG.debug('Creating znetconfig file')
if active:
znet_content = linuxdist.get_simple_znetconfig_contents()
else:
znet_content = linuxdist.get_znetconfig_contents()
net_cmd_file = []
if znet_content:
if len(commands) == 0:
znetconfig = '\n'.join(('#!/bin/bash', znet_content))
else:
znetconfig = '\n'.join(('#!/bin/bash', commands,
'sleep 2', znet_content))
if len(append_cmd) > 0:
znetconfig += '\nsleep 2'
znetconfig += '\n%s\n' % append_cmd
znetconfig += '\nrm -rf /tmp/znetconfig.sh\n'
# Create a temp file in instance to execute above commands
net_cmd_file.append(('/tmp/znetconfig.sh', znetconfig)) # nosec
return net_cmd_file
def _create_invokeScript(self, network_file_path, commands,
files_map):
"""invokeScript: Configure zLinux os network
invokeScript is included in the network.doscript, it is used to put
the network configuration file to the directory where it belongs and
call znetconfig to configure the network
"""
LOG.debug('Creating invokeScript shell in the folder %s'
% network_file_path)
invokeScript = "invokeScript.sh"
conf = "#!/bin/bash \n"
command = commands
for file in files_map:
target_path = file['target_path']
source_file = file['source_file']
# potential risk: whether target_path exist
# using cat does not change the target file selinux file type
command += 'cat ' + source_file + ' > ' + target_path + '\n'
command += 'sleep 2\n'
command += '/bin/bash /tmp/znetconfig.sh\n'
command += 'rm -rf invokeScript.sh\n'
scriptfile = os.path.join(network_file_path, invokeScript)
with open(scriptfile, "w") as f:
f.write(conf)
f.write(command)
def _create_network_doscript(self, network_file_path):
"""doscript: contains a invokeScript.sh which will do the special work
The network.doscript contains network configuration files and it will
be used by zvmguestconfigure to configure zLinux os network when it
starts up
"""
# Generate the tar package for punch
LOG.debug('Creating network doscript in the folder %s'
% network_file_path)
network_doscript = os.path.join(network_file_path, 'network.doscript')
tar = tarfile.open(network_doscript, "w")
for file in os.listdir(network_file_path):
file_name = os.path.join(network_file_path, file)
tar.add(file_name, arcname=file)
tar.close()
return network_doscript
def get_nic_info(self, userid=None, nic_id=None, vswitch=None):
return self._smtclient.get_nic_info(userid=userid, nic_id=nic_id,
vswitch=vswitch)
def vswitch_query(self, vswitch_name):
return self._smtclient.query_vswitch(vswitch_name)
def delete_network_configuration(self, userid, os_version, vdev,
active=False):
network_file_path = self._smtclient.get_guest_temp_path(userid)
linuxdist = self._dist_manager.get_linux_dist(os_version)()
file = linuxdist.get_network_configuration_files(vdev)
cmd = 'rm -f %s\n' % file
cmd += linuxdist.delete_vdev_info(vdev)
net_cmd_file = self._create_znetconfig(cmd, linuxdist, '',
active=active)
del_file = 'DEL%s.sh' % str(vdev).zfill(4)
file_name = os.path.join(network_file_path, del_file)
file_content = net_cmd_file[0][1]
self._add_file(file_name, file_content)
fileClass = "X"
try:
self._smtclient.punch_file(userid, file_name, fileClass)
finally:
LOG.debug('Removing the folder %s ', network_file_path)
shutil.rmtree(network_file_path)
if active:
active_cmds = linuxdist.create_active_net_interf_cmd()
self._smtclient.execute_cmd(userid, active_cmds)
def dedicate_OSA(self, userid, OSA_device, vdev=None, active=False):
return self._smtclient.dedicate_OSA(userid, OSA_device, vdev=vdev,
active=active) | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmsdk/networkops.py | networkops.py |
import functools
import hashlib
import math
# On SLES12, we found that if you import urllib.parse later
# than requests, you will find a error like 'not able to load
# urllib.parse, this is because urllib will be in sys.modules
# when first import requests
# as workaround here, we first import urllib then import requests
# later, we need consider to use urllib.request to replace
# requests if that's possible to avoid this kind of issue
from io import IOBase
import shutil
import six.moves.urllib.parse as urlparse
import requests
import threading
import os
import re
import six
import string
import subprocess
import tempfile
import time
from smtLayer import smt
from zvmsdk import config
from zvmsdk import constants as const
from zvmsdk import database
from zvmsdk import exception
from zvmsdk import log
from zvmsdk import returncode
from zvmsdk import utils as zvmutils
CONF = config.CONF
LOG = log.LOG
_LOCK = threading.Lock()
CHUNKSIZE = 4096
DIRMAINT_ERROR_MESSAGE = ("https://www-40.ibm.com/servers/resourcelink/"
"svc0302a.nsf/pages/zVMV7R2gc246282?OpenDocument")
CP_ERROR_MESSAGE = ("https://www-40.ibm.com/servers/resourcelink/"
"svc0302a.nsf/pages/zVMV7R2gc246270?OpenDocument")
_SMT_CLIENT = None
def get_smtclient():
global _SMT_CLIENT
if _SMT_CLIENT is None:
try:
_SMT_CLIENT = zvmutils.import_object(
'zvmsdk.smtclient.SMTClient')
except ImportError:
LOG.error("Unable to get smtclient")
raise ImportError
return _SMT_CLIENT
class SMTClient(object):
def __init__(self):
self._smt = smt.SMT()
self._pathutils = zvmutils.PathUtils()
self._NetDbOperator = database.NetworkDbOperator()
self._GuestDbOperator = database.GuestDbOperator()
self._ImageDbOperator = database.ImageDbOperator()
def _request(self, requestData):
try:
results = self._smt.request(requestData)
except Exception as err:
LOG.error('SMT internal parse encounter error')
raise exception.SDKInternalError(msg=err, modID='smt')
def _is_smt_internal_error(results):
internal_error_list = returncode.SMT_INTERNAL_ERROR
for error in internal_error_list:
if results['overallRC'] != error[0]:
# overallRC does not match, continue next
continue
if error[1] is not None and results['rc'] != error[1]:
# rc match failed
continue
if error[2] is not None and results['rs'] not in error[2]:
# rs match failed
continue
# All match finish successfully, return true
return True
return False
if results['overallRC'] != 0:
results.pop('logEntries')
# Check whether this smt error belongs to internal error, if so,
# raise internal error, otherwise raise clientrequestfailed error
if _is_smt_internal_error(results):
msg = "SMT internal error. Results: %s." % str(results)
rc = results.get('rc', 0)
if rc in [-110, -102, -103, -108]:
msg += ("This is likely to be caused by temporary z/VM "
"SMAPI down issue, Contact with your z/VM "
"administrators for further help")
raise exception.SDKInternalError(msg=msg,
modID='smt',
results=results)
else:
# no solution if we don't know, so empty string
solution = ''
rc = results.get('rc', 0)
if rc == 396:
solution = (("CP command failed, with error code %s."
"Check <%s> on z/VM CP error messages")
% (results['rs'], CP_ERROR_MESSAGE))
if rc == 596:
solution = (("DIRMAINT command failed, with error code %s."
"Check <%s> on z/VM DIRMAINT error messages")
% (results['rs'], DIRMAINT_ERROR_MESSAGE))
msg = (("SMT request failed. RequestData: '%s', Results: '%s'."
"%s") % (requestData, str(results), solution))
raise exception.SDKSMTRequestFailed(results, msg)
return results
def get_guest_temp_path(self, userid):
return self._pathutils.get_guest_temp_path(userid)
def get_guest_path(self, userid):
return self._pathutils.get_guest_path(userid)
def clean_temp_folder(self, tmp_folder):
return self._pathutils.clean_temp_folder(tmp_folder)
def _generate_vdev(self, base, offset):
"""Generate virtual device number based on base vdev
:param base: base virtual device number, string of 4 bit hex.
:param offset: offset to base, integer.
"""
vdev = hex(int(base, 16) + offset)[2:]
return vdev.rjust(4, '0')
def _generate_increasing_nic_id(self, nic_id):
"""Generate increasing nic id string
:param nic_id: hexadecimal nic id like '1000'
:return: increasing nic id, string like '0.0.1000,0.0.1001,0.0.1002'
"""
nic_id = str(hex(int(nic_id, 16)))[2:]
nic_id_1 = str(hex(int(nic_id, 16) + 1))[2:]
nic_id_2 = str(hex(int(nic_id, 16) + 2))[2:]
if len(nic_id_2) > 4:
errmsg = ("Virtual device number %s is not valid" % nic_id_2)
raise exception.SDKInvalidInputFormat(msg=errmsg)
return "0.0.%s,0.0.%s,0.0.%s" % (nic_id, nic_id_1, nic_id_2)
def generate_disk_vdev(self, start_vdev=None, offset=0):
"""Generate virtual device number for disks
:param offset: offset of user_root_vdev.
:return: virtual device number, string of 4 bit hex.
"""
if not start_vdev:
start_vdev = CONF.zvm.user_root_vdev
vdev = self._generate_vdev(start_vdev, offset)
if offset >= 0 and offset < 254:
return vdev
else:
msg = ("Failed to generate disk vdev, invalid virtual device"
"number for disk:%s" % vdev)
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=2, msg=msg)
def add_mdisks(self, userid, disk_list, start_vdev=None):
"""Add disks for the userid
:disks: A list dictionary to describe disk info, for example:
disk: [{'size': '1g',
'format': 'ext3',
'disk_pool': 'ECKD:eckdpool1'},
{'size': '1g',
'format': 'ext3'}]
"""
# Firstly, check disk_pool in disk_list, if disk_pool not specified
# and not configured(the default vaule is None), report error
# report error
for idx, disk in enumerate(disk_list):
disk_pool = disk.get('disk_pool') or CONF.zvm.disk_pool
disk['disk_pool'] = disk_pool
if disk_pool is None:
msg = ('disk_pool not configured for sdkserver.')
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=2, msg=msg)
for idx, disk in enumerate(disk_list):
if 'vdev' in disk:
# this means user want to create their own device number
vdev = disk['vdev']
else:
vdev = self.generate_disk_vdev(start_vdev=start_vdev,
offset=idx)
self._add_mdisk(userid, disk, vdev)
disk['vdev'] = vdev
sizeUpper = disk.get('size').strip().upper()
sizeUnit = sizeUpper[-1]
if sizeUnit != 'G' and sizeUnit != 'M':
sizeValue = sizeUpper
disk_pool = disk.get('disk_pool')
[diskpool_type, diskpool_name] = disk_pool.split(':')
if (diskpool_type.upper() == 'ECKD'):
# Convert the cylinders to bytes
convert = 737280
else:
# Convert the blocks to bytes
convert = 512
byteSize = float(float(int(sizeValue) * convert / 1024) / 1024)
unit = "M"
if (byteSize > 1024):
byteSize = float(byteSize / 1024)
unit = "G"
byteSize = "%.1f" % byteSize
disk['size'] = byteSize + unit
return disk_list
def remove_mdisks(self, userid, vdev_list):
for vdev in vdev_list:
self._remove_mdisk(userid, vdev)
def dedicate_device(self, userid, vaddr, raddr, mode):
"""dedicate device
:userid: The name of the image obtaining a dedicated device
:vaddr: The virtual device number of the device
:raddr: A real device number to be dedicated or attached
to the specified image
:mode: Specify a 1 if the virtual device is to be in read-only mode.
Otherwise, specify a 0.
"""
# dedicate device to directory entry
self._dedicate_device(userid, vaddr, raddr, mode)
def _dedicate_device(self, userid, vaddr, raddr, mode):
"""dedicate device."""
action = 'dedicate'
rd = ('changevm %(uid)s %(act)s %(va)s %(ra)s %(mod)i' %
{'uid': userid, 'act': action,
'va': vaddr, 'ra': raddr, 'mod': mode})
action = "dedicate device to userid '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
def get_fcp_info_by_status(self, userid, status=None):
"""get fcp information by the status.
:userid: (str) The name of the image to query fcp info
:status: (str) If status is None, will return the FCP devices
of all statuses. If status specified, will only return the
FCP devices of this status.
The status must be 'active', 'free' or 'offline'.
:returns: (list) a list of string lines that the command output.
"""
action = 'fcpinfo'
if status is None:
# if status is None, will transfer status to all
# to let smtLayer return the FCPs of all the statuses
status = "all"
# always set -k OWNER=YES
rd = ' '.join(['getvm', userid, action, status, "YES"])
action = "query fcp info of '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
results = self._request(rd)
return results['response']
def undedicate_device(self, userid, vaddr):
"""undedicate device
:userid: The name of the image obtaining a dedicated device
:vaddr: The virtual device number of the device
"""
# undedicate device to directory entry
self._undedicate_device(userid, vaddr)
def _undedicate_device(self, userid, vaddr):
"""undedicate device."""
action = 'undedicate'
rd = ('changevm %(uid)s %(act)s %(va)s' %
{'uid': userid, 'act': action,
'va': vaddr})
action = "undedicate device from userid '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
def get_image_performance_info(self, userid):
"""Get CPU and memory usage information.
:userid: the zvm userid to be queried
"""
pi_dict = self.image_performance_query([userid])
return pi_dict.get(userid, None)
def get_adapters_info(self, userid):
rd = ' '.join((
"SMAPI %s API Virtual_Network_Adapter_Query_Extended" % userid,
"--operands",
"-k 'image_device_number=*'"))
results = None
action = "get network info of userid '%s'" % str(userid)
with zvmutils.log_and_reraise_smt_request_failed(action):
results = self._request(rd)
ret = results['response']
# TODO: muti NIC support?
nic_count = 0
for line in ret:
if 'adapter_count=' in line:
nic_count = int(line.strip().split('=')[-1])
break
if nic_count < 1:
msg = 'get_network_info:No NIC found on userid %s' % userid
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
# save network info into dict by index from 1 to nic_count
# Firstly, get adapter information
adapters_info = []
adapter = dict()
# if found IP, no need to continue
found_mac = False
for line in ret:
if 'adapter_address=' in line:
adapter_addr = line.strip().split('=')[-1]
adapter['adapter_address'] = adapter_addr
if 'adapter_status=' in line:
adapter_type = line.strip().split('=')[-1]
adapter['adapter_status'] = adapter_type
if 'lan_owner=' in line:
lan_owner = line.strip().split('=')[-1]
adapter['lan_owner'] = lan_owner
if 'lan_name=' in line:
lan_name = line.strip().split('=')[-1]
adapter['lan_name'] = lan_name
if 'mac_address=' in line and not found_mac:
mac_addr = line.strip().split('=')[-1]
pattern = re.compile('.{2}')
mac_address = ':'.join(pattern.findall(mac_addr))
adapter['mac_address'] = mac_address
if 'mac_ip_version=' in line:
ip_version = line.strip().split('=')[-1]
adapter['mac_ip_version'] = ip_version
if 'mac_ip_address=' in line:
# once we found mac_ip_address, assume this is the MAC
# we are using, then jump to next adapter
mac_ip = line.strip().split('=')[-1]
adapter['mac_ip_address'] = mac_ip
found_mac = True
if 'adapter_info_end' in line:
adapters_info.append(adapter)
# clear adapter and process next
adapter = dict()
found_mac = False
return adapters_info
def _parse_vswitch_inspect_data(self, rd_list):
""" Parse the Virtual_Network_Vswitch_Query_Byte_Stats data to get
inspect data.
"""
def _parse_value(data_list, idx, keyword, offset):
return idx + offset, data_list[idx].rpartition(keyword)[2].strip()
vsw_dict = {}
with zvmutils.expect_invalid_resp_data():
# vswitch count
idx = 0
idx, vsw_count = _parse_value(rd_list, idx, 'vswitch count:', 2)
vsw_dict['vswitch_count'] = int(vsw_count)
# deal with each vswitch data
vsw_dict['vswitches'] = []
for i in range(vsw_dict['vswitch_count']):
vsw_data = {}
# skip vswitch number
idx += 1
# vswitch name
idx, vsw_name = _parse_value(rd_list, idx, 'vswitch name:', 1)
vsw_data['vswitch_name'] = vsw_name
# uplink count
idx, up_count = _parse_value(rd_list, idx, 'uplink count:', 1)
# skip uplink data
idx += int(up_count) * 9
# skip bridge data
idx += 8
# nic count
vsw_data['nics'] = []
idx, nic_count = _parse_value(rd_list, idx, 'nic count:', 1)
nic_count = int(nic_count)
for j in range(nic_count):
nic_data = {}
idx, nic_id = _parse_value(rd_list, idx, 'nic_id:', 1)
userid, toss, vdev = nic_id.partition(' ')
nic_data['userid'] = userid
nic_data['vdev'] = vdev
idx, nic_data['nic_fr_rx'] = _parse_value(rd_list, idx,
'nic_fr_rx:', 1
)
idx, nic_data['nic_fr_rx_dsc'] = _parse_value(rd_list, idx,
'nic_fr_rx_dsc:', 1
)
idx, nic_data['nic_fr_rx_err'] = _parse_value(rd_list, idx,
'nic_fr_rx_err:', 1
)
idx, nic_data['nic_fr_tx'] = _parse_value(rd_list, idx,
'nic_fr_tx:', 1
)
idx, nic_data['nic_fr_tx_dsc'] = _parse_value(rd_list, idx,
'nic_fr_tx_dsc:', 1
)
idx, nic_data['nic_fr_tx_err'] = _parse_value(rd_list, idx,
'nic_fr_tx_err:', 1
)
idx, nic_data['nic_rx'] = _parse_value(rd_list, idx,
'nic_rx:', 1
)
idx, nic_data['nic_tx'] = _parse_value(rd_list, idx,
'nic_tx:', 1
)
vsw_data['nics'].append(nic_data)
# vlan count
idx, vlan_count = _parse_value(rd_list, idx, 'vlan count:', 1)
# skip vlan data
idx += int(vlan_count) * 3
# skip the blank line
idx += 1
vsw_dict['vswitches'].append(vsw_data)
return vsw_dict
def _is_vdev_valid(self, vdev, vdev_info):
for used_vdev in vdev_info:
if (((int(vdev, 16) >= int(used_vdev, 16)) and
(int(vdev, 16) <= int(used_vdev, 16) + 2)) or
((int(vdev, 16) < int(used_vdev, 16)) and
(int(vdev, 16) >= int(used_vdev, 16) - 2))):
return False
return True
def get_power_state(self, userid):
"""Get power status of a z/VM instance."""
LOG.debug('Querying power stat of %s' % userid)
requestData = "PowerVM " + userid + " status"
action = "query power state of '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
results = self._request(requestData)
with zvmutils.expect_invalid_resp_data(results):
status = results['response'][0].partition(': ')[2]
return status
def _check_power_state(self, userid, action):
# Get the vm status
power_state = self.get_power_state(userid)
# Power on the vm if it is inactive
if power_state == 'off':
msg = ('The vm %s is powered off, please start up it '
'before %s' % (userid, action))
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
def guest_start(self, userid):
"""Power on VM."""
requestData = "PowerVM " + userid + " on"
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def guest_stop(self, userid, **kwargs):
"""Power off VM."""
requestData = "PowerVM " + userid + " off"
if 'timeout' in kwargs.keys() and kwargs['timeout']:
requestData += ' --maxwait ' + str(kwargs['timeout'])
if 'poll_interval' in kwargs.keys() and kwargs['poll_interval']:
requestData += ' --poll ' + str(kwargs['poll_interval'])
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def guest_softstop(self, userid, **kwargs):
"""Power off VM gracefully, it will call shutdown os then
deactivate vm"""
requestData = "PowerVM " + userid + " softoff --wait"
if 'timeout' in kwargs.keys() and kwargs['timeout']:
requestData += ' --maxwait ' + str(kwargs['timeout'])
else:
requestData += ' --maxwait ' + str(CONF.guest.softstop_timeout)
if 'poll_interval' in kwargs.keys() and kwargs['poll_interval']:
requestData += ' --poll ' + str(kwargs['poll_interval'])
else:
requestData += ' --poll ' + str(CONF.guest.softstop_interval)
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def guest_pause(self, userid):
self._check_power_state(userid, 'pause')
requestData = "PowerVM " + userid + " pause"
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def guest_unpause(self, userid):
self._check_power_state(userid, 'unpause')
requestData = "PowerVM " + userid + " unpause"
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def guest_reboot(self, userid):
requestData = ' '.join(("PowerVM", userid, "reboot"))
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def guest_reset(self, userid):
requestData = ' '.join(("PowerVM", userid, "reset"))
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def live_migrate_move(self, userid, destination, parms):
""" moves the specified virtual machine, while it continues to run,
to the specified system within the SSI cluster. """
rd = ('migratevm %(uid)s move --destination %(dest)s ' %
{'uid': userid, 'dest': destination})
if 'maxtotal' in parms:
rd += ('--maxtotal ' + str(parms['maxtotal']))
if 'maxquiesce' in parms:
rd += (' --maxquiesce ' + str(parms['maxquiesce']))
if 'immediate' in parms:
rd += " --immediate"
if 'forcearch' in parms:
rd += " --forcearch"
if 'forcedomain' in parms:
rd += " --forcedomain"
if 'forcestorage' in parms:
rd += " --forcestorage"
action = "move userid '%s' to SSI '%s'" % (userid, destination)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
msg = ''
if action is not None:
msg = "Failed to %s. " % action
msg += "SMT error: %s" % err.format_message()
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
def live_migrate_test(self, userid, destination):
""" tests the specified virtual machine and reports whether or not
it is eligible to be relocated to the specified system. """
rd = ('migratevm %(uid)s test --destination %(dest)s ' %
{'uid': userid, 'dest': destination})
action = "test to move userid '%s' to SSI '%s'" % (userid, destination)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
msg = ''
if action is not None:
msg = "Failed to %s. " % action
msg += "SMT error: %s" % err.format_message()
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
def _get_ipl_param(self, ipl_from):
if len(ipl_from) > 0:
ipl_param = ipl_from
else:
ipl_param = CONF.zvm.user_root_vdev
return ipl_param
def create_vm(self, userid, cpu, memory, disk_list, profile,
max_cpu, max_mem, ipl_from, ipl_param, ipl_loadparam,
dedicate_vdevs, loaddev, account, comment_list,
cschedule='', cshare='', rdomain='', pcif=''):
""" Create VM and add disks if specified. """
rd = ('makevm %(uid)s directory LBYONLY %(mem)im %(pri)s '
'--cpus %(cpu)i --profile %(prof)s --maxCPU %(max_cpu)i '
'--maxMemSize %(max_mem)s --setReservedMem' %
{'uid': userid, 'mem': memory,
'pri': const.ZVM_USER_DEFAULT_PRIVILEGE,
'cpu': cpu, 'prof': profile,
'max_cpu': max_cpu, 'max_mem': max_mem})
if CONF.zvm.default_admin_userid:
ids = CONF.zvm.default_admin_userid.split(' ')
id_str = ':'.join(ids)
rd += (' --logonby %s' % id_str)
# when use dasd as root disk, the disk_list[0] would be the boot
# disk.
# when boot from volume, ipl_from should be specified explicitly.
if (disk_list and 'is_boot_disk' in disk_list[0] and
disk_list[0]['is_boot_disk']) or ipl_from:
# we assume at least one disk exist, which means, is_boot_disk
# is true for exactly one disk.
rd += (' --ipl %s' % self._get_ipl_param(ipl_from))
# load param for ipl
if ipl_param:
rd += ' --iplParam %s' % ipl_param
if ipl_loadparam:
rd += ' --iplLoadparam %s' % ipl_loadparam
if dedicate_vdevs:
rd += ' --dedicate "%s"' % " ".join(dedicate_vdevs)
if account:
rd += ' --account "%s"' % account
if cschedule:
rd += ' --commandSchedule %s' % cschedule
# if share given, then user it
# or if CONF.zvm.user_default_share_unit is not 0
# set relative share to CONF.zvm.user_default_share_unit*cpu
if cshare:
rd += ' --commandSetShare "%s"' % cshare
else:
# only add SHARE statement if unit > 0
if CONF.zvm.user_default_share_unit > 0:
total = CONF.zvm.user_default_share_unit * cpu
data = 'RELATIVE %d' % total
rd += ' --commandSetShare "%s"' % data
if rdomain:
rd += ' --commandRDomain %s' % rdomain
if pcif:
v = pcif.split(':')
if len(v) != 2:
errmsg = ("pcif input %s is invalid, must be format like"
" <dev>:<dev>" % pcif)
raise exception.SDKInvalidInputFormat(msg=errmsg)
rd += ' --commandPcif %s' % pcif
comments = ''
if comment_list is not None:
for comment in comment_list:
comments += comment
# This s a dummy spliter and will be used for split
# the comment, for example, input comment is
# comment1,comment2, it will be constructed into
# comment1$@$@$comment2 and send to smtLayer to handle
comments += '$@$@$'
if comments:
rd += ' --comment "%s"' % comments
if loaddev:
if 'portname' in loaddev:
rd += ' --loadportname %s' % loaddev['portname']
if 'lun' in loaddev:
rd += ' --loadlun %s' % loaddev['lun']
# now, we need consider swap only case, customer using boot
# from volume but no disk pool provided, we allow to create
# swap disk from vdisk by default, when we come to this logic
# we are very sure that if no disk pool, there is only one
# disk in disk_list and that's swap
vdisk = None
# this is swap only case, which means, you only create a swap
# disk (len disk_list is 1) and no other disks
if len(disk_list) == 1:
disk = disk_list[0]
if 'format' in disk and disk['format'].lower() == 'swap':
disk_pool = disk.get('disk_pool') or CONF.zvm.disk_pool
if disk_pool is None:
# if it's vdisk, then create user direct directly
vd = disk.get('vdev') or self.generate_disk_vdev(offset=0)
disk['vdev'] = vd
sizeUpper = disk['size'].strip().upper()
sizeUnit = sizeUpper[-1]
if sizeUnit != 'M' and sizeUnit != 'G':
errmsg = ("%s must has 'M' or 'G' suffix" % sizeUpper)
raise exception.SDKInvalidInputFormat(msg=errmsg)
if sizeUnit == 'M':
size = int(sizeUpper[:-1])
if size > 2048:
errmsg = ("%s is great than 2048M" % sizeUpper)
raise exception.SDKInvalidInputFormat(msg=errmsg)
if sizeUnit == 'G':
size = int(sizeUpper[:-1])
if size > 2:
errmsg = ("%s is great than 2G" % sizeUpper)
raise exception.SDKInvalidInputFormat(msg=errmsg)
rd += ' --vdisk %s:%s' % (vd, sizeUpper)
vdisk = disk
action = "create userid '%s'" % userid
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
if ((err.results['rc'] == 436) and (err.results['rs'] == 4)):
result = "Profile '%s'" % profile
raise exception.SDKObjectNotExistError(obj_desc=result,
modID='guest')
elif ((err.results['rc'] == 596) and (err.results['rs'] == 3658)):
# internal issue 9939
# That is because a previous definition of CIC may have
# caused it to be defined. I would log it somewhere.
LOG.warning("ignoring 596/3658 as it might be defined already")
else:
msg = ''
if action is not None:
msg = "Failed to %s. " % action
msg += "SMT error: %s" % err.format_message()
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
# Add the guest to db immediately after user created
action = "add guest '%s' to database" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._GuestDbOperator.add_guest(userid)
# Continue to add disk, if vdisk is None, it means
# it's not vdisk routine and we need add disks
if vdisk is None and disk_list:
# not perform mkfs against root disk
if disk_list[0].get('is_boot_disk'):
disk_list[0].update({'format': 'none'})
return self.add_mdisks(userid, disk_list)
# we must return swap disk in order to make guest config
# handle other remaining jobs
return disk_list
def _add_mdisk(self, userid, disk, vdev):
"""Create one disk for userid
NOTE: No read, write and multi password specified, and
access mode default as 'MR'.
"""
size = disk['size']
fmt = disk.get('format', 'ext4')
disk_pool = disk.get('disk_pool') or CONF.zvm.disk_pool
# Check disk_pool, if it's None, report error
if disk_pool is None:
msg = ('disk_pool not configured for sdkserver.')
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=2, msg=msg)
[diskpool_type, diskpool_name] = disk_pool.split(':')
if (diskpool_type.upper() == 'ECKD'):
action = 'add3390'
else:
action = 'add9336'
rd = ' '.join(['changevm', userid, action, diskpool_name,
vdev, size, '--mode MR'])
if fmt and fmt != 'none':
rd += (' --filesystem %s' % fmt.lower())
action = "add mdisk to userid '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
def get_vm_list(self):
"""Get the list of guests that are created by SDK
return userid list"""
action = "list all guests in database"
with zvmutils.log_and_reraise_sdkbase_error(action):
guests_in_db = self._GuestDbOperator.get_guest_list()
guests_migrated = \
self._GuestDbOperator.get_migrated_guest_info_list()
# db query return value in tuple (uuid, userid, metadata, comments)
userids_in_db = [g[1].upper() for g in guests_in_db]
userids_migrated = [g[1].upper() for g in guests_migrated]
userid_list = list(set(userids_in_db) - set(userids_migrated))
return userid_list
def _remove_mdisk(self, userid, vdev):
rd = ' '.join(('changevm', userid, 'removedisk', vdev))
action = "remove disk with vdev '%s' from userid '%s'" % (vdev, userid)
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
def guest_authorize_iucv_client(self, userid, client=None):
"""Punch a script that used to set the authorized client userid in vm
If the guest is in log off status, the change will take effect when
the guest start up at first time.
If the guest is in active status, power off and power on are needed
for the change to take effect.
:param str guest: the user id of the vm
:param str client: the user id of the client that can communicate to
guest using IUCV"""
client = client or zvmutils.get_smt_userid()
iucv_path = "/tmp/" + userid
if not os.path.exists(iucv_path):
os.makedirs(iucv_path)
iucv_auth_file = iucv_path + "/iucvauth.sh"
zvmutils.generate_iucv_authfile(iucv_auth_file, client)
try:
requestData = "ChangeVM " + userid + " punchfile " + \
iucv_auth_file + " --class x"
self._request(requestData)
except exception.SDKSMTRequestFailed as err:
msg = ("Failed to punch IUCV auth file to userid '%s'. SMT error:"
" %s" % (userid, err.format_message()))
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
finally:
self._pathutils.clean_temp_folder(iucv_path)
def volume_refresh_bootmap(self, fcpchannels, wwpns, lun, wwid='',
transportfiles=None, guest_networks=None, min_fcp_paths_count=0):
guest_networks = guest_networks or []
fcps = ','.join(fcpchannels)
ws = ','.join(wwpns)
fcs = "--fcpchannel=%s" % fcps
wwpns = "--wwpn=%s" % ws
lun = "--lun=%s" % lun
wwid = "--wwid=%s" % wwid
paths = "--minfcp=%s" % min_fcp_paths_count
cmd = ['sudo', '/opt/zthin/bin/refresh_bootmap', fcs, wwpns,
lun, wwid, paths]
if guest_networks:
# prepare additional parameters for RHCOS BFV
if not transportfiles:
err_msg = 'Ignition file is required when deploying RHCOS'
LOG.error(err_msg)
raise exception.SDKVolumeOperationError(rs=10)
# get NIC ID
from zvmsdk import dist
_dist_manager = dist.LinuxDistManager()
linuxdist = _dist_manager.get_linux_dist("rhcos4")()
ip_config = linuxdist.create_coreos_parameter(guest_networks)
nic_id = self._generate_increasing_nic_id(
ip_config.split(":")[5].replace("enc", ""))
cmd += ["--ignitionurl=%s" % transportfiles, "--nicid=%s" % nic_id,
"--ipconfig=%s" % ip_config]
LOG.info("Running command: %s", cmd)
try:
(rc, output) = zvmutils.execute(cmd,
timeout=CONF.volume.refresh_bootmap_timeout)
except subprocess.TimeoutExpired as err:
err_msg = err.format_message()
raise exception.SDKVolumeOperationError(rs=7, msg=err_msg)
except PermissionError:
# because zvmsdk user dont have permission to kill background
# process so if the excute timeout, will raise PermissionError
# we also treat it as timeout exception
err_msg = ("Running command: %s timed out." % cmd)
raise exception.SDKVolumeOperationError(rs=7, msg=err_msg)
if rc != 0:
err_msg = ("refresh_bootmap failed with return code: %d." % rc)
err_output = ""
output_lines = output.split('\n')
for line in output_lines:
if line.__contains__("Exit MSG:"):
err_output += ("\\n" + line.strip())
LOG.error(err_msg + err_output)
raise exception.SDKVolumeOperationError(rs=5,
errcode=rc,
errmsg=err_output)
output_lines = output.split('\n')
paths_dict = {}
for line in output_lines:
if line.__contains__("RESULT PATHS: "):
paths_str = line[14:]
# paths_str format: "FCP1:W1 W2,FCP2:W3 W4"
# convert paths string into a dict
paths_list = paths_str.split(',')
for path in paths_list:
fcp, wwpn = path.split(':')
wwpn_list = wwpn.split(' ')
paths_dict[fcp] = wwpn_list
return paths_dict
def guest_deploy(self, userid, image_name, transportfiles=None,
remotehost=None, vdev=None, skipdiskcopy=False):
""" Deploy image and punch config driver to target """
# (TODO: add the support of multiple disks deploy)
if skipdiskcopy:
msg = ('Start guest_deploy without unpackdiskimage, guest: %(vm)s'
'os_version: %(img)s' % {'img': image_name, 'vm': userid})
LOG.info(msg)
else:
msg = ('Start to deploy image %(img)s to guest %(vm)s'
% {'img': image_name, 'vm': userid})
LOG.info(msg)
image_file = '/'.join([self._get_image_path_by_name(image_name),
CONF.zvm.user_root_vdev])
cmd = ['/usr/bin/hexdump', '-C', '-n', '64', image_file]
with zvmutils.expect_and_reraise_internal_error(modID='guest'):
(rc, output) = zvmutils.execute(cmd)
msg = ('Image header info in guest_deploy: rc: %d, header:\n%s'
% (rc, output))
LOG.info(msg)
# Unpack image file to root disk
vdev = vdev or CONF.zvm.user_root_vdev
cmd = ['sudo', '/opt/zthin/bin/unpackdiskimage', userid, vdev,
image_file]
with zvmutils.expect_and_reraise_internal_error(modID='guest'):
(rc, output) = zvmutils.execute(cmd)
if rc != 0:
err_msg = ("unpackdiskimage failed with return code: %d." % rc)
err_output = ""
output_lines = output.split('\n')
for line in output_lines:
if line.__contains__("ERROR:"):
err_output += ("\\n" + line.strip())
LOG.error(err_msg + err_output)
raise exception.SDKGuestOperationError(rs=3, userid=userid,
unpack_rc=rc,
err=err_output)
# Purge guest reader to clean dirty data
rd = ("changevm %s purgerdr" % userid)
action = "purge reader of '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
# Punch transport files if specified
if transportfiles:
# Copy transport file to local
msg = ('Start to send customized file to vm %s' % userid)
LOG.info(msg)
try:
tmp_trans_dir = tempfile.mkdtemp()
local_trans = '/'.join([tmp_trans_dir,
os.path.basename(transportfiles)])
if remotehost:
cmd = ["/usr/bin/scp", "-B",
"-P", CONF.zvm.remotehost_sshd_port,
"-o StrictHostKeyChecking=no",
("%s:%s" % (remotehost, transportfiles)),
local_trans]
else:
cmd = ["/usr/bin/cp", transportfiles, local_trans]
with zvmutils.expect_and_reraise_internal_error(modID='guest'):
(rc, output) = zvmutils.execute(cmd)
if rc != 0:
err_msg = ('copy config drive with command %(cmd)s '
'failed with output: %(res)s' %
{'cmd': str(cmd), 'res': output})
LOG.error(err_msg)
raise exception.SDKGuestOperationError(rs=4, userid=userid,
err_info=err_msg)
# Punch config drive to guest userid
rd = ("changevm %(uid)s punchfile %(file)s --class X" %
{'uid': userid, 'file': local_trans})
action = "punch config drive to userid '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
finally:
# remove the local temp config drive folder
self._pathutils.clean_temp_folder(tmp_trans_dir)
# Authorize iucv client
client_id = None
# try to re-use previous iucv authorized userid at first
if os.path.exists(const.IUCV_AUTH_USERID_PATH):
LOG.debug("Re-use previous iucv authorized userid")
with open(const.IUCV_AUTH_USERID_PATH) as f:
client_id = f.read().strip()
self.guest_authorize_iucv_client(userid, client_id)
# Update os version in guest metadata
# TODO: may should append to old metadata, not replace
if skipdiskcopy:
os_version = image_name
else:
image_info = self._ImageDbOperator.image_query_record(image_name)
os_version = image_info[0]['imageosdistro']
metadata = 'os_version=%s' % os_version
self._GuestDbOperator.update_guest_by_userid(userid, meta=metadata)
if skipdiskcopy:
msg = ('guest_deploy without unpackdiskimage finish successfully, '
'guest: %(vm)s, os_version: %(img)s'
% {'img': image_name, 'vm': userid})
else:
msg = ('Deploy image %(img)s to guest %(vm)s disk %(vdev)s'
' successfully' % {'img': image_name, 'vm': userid,
'vdev': vdev})
LOG.info(msg)
def guest_deploy_rhcos(self, userid, image_name, transportfiles,
remotehost=None, vdev=None, hostname=None,
skipdiskcopy=False):
""" Deploy image"""
# (TODO: add the support of multiple disks deploy)
if transportfiles is None:
err_msg = 'Ignition file is required when deploying RHCOS image'
LOG.error(err_msg)
raise exception.SDKGuestOperationError(rs=13, userid=userid)
if skipdiskcopy:
msg = ('Start guest_deploy without copy disk, guest: %(vm)s'
'os_version: %(img)s' % {'img': image_name, 'vm': userid})
LOG.info(msg)
image_file = None
else:
msg = ('Start to deploy image %(img)s to guest %(vm)s'
% {'img': image_name, 'vm': userid})
LOG.info(msg)
image_file = '/'.join([self._get_image_path_by_name(image_name),
CONF.zvm.user_root_vdev])
# Unpack image file to root disk
vdev = vdev or CONF.zvm.user_root_vdev
tmp_trans_dir = None
try:
if remotehost:
# download igintion file from remote host
tmp_trans_dir = tempfile.mkdtemp()
local_trans = '/'.join([tmp_trans_dir,
os.path.basename(transportfiles)])
cmd = ["/usr/bin/scp", "-B",
"-P", CONF.zvm.remotehost_sshd_port,
"-o StrictHostKeyChecking=no",
("%s:%s" % (remotehost, transportfiles)),
local_trans]
with zvmutils.expect_and_reraise_internal_error(modID='guest'):
(rc, output) = zvmutils.execute(cmd)
if rc != 0:
err_msg = ('copy ignition file with command %(cmd)s '
'failed with output: %(res)s' %
{'cmd': str(cmd), 'res': output})
LOG.error(err_msg)
raise exception.SDKGuestOperationError(rs=4, userid=userid,
err_info=err_msg)
transportfiles = local_trans
cmd = self._get_unpackdiskimage_cmd_rhcos(userid, image_name,
transportfiles, vdev,
image_file, hostname,
skipdiskcopy)
with zvmutils.expect_and_reraise_internal_error(modID='guest'):
(rc, output) = zvmutils.execute(cmd)
if rc != 0:
err_msg = ("unpackdiskimage failed with return code: %d." % rc)
err_output = ""
output_lines = output.split('\n')
for line in output_lines:
if line.__contains__("ERROR:"):
err_output += ("\\n" + line.strip())
LOG.error(err_msg + err_output)
raise exception.SDKGuestOperationError(rs=3, userid=userid,
unpack_rc=rc,
err=err_output)
finally:
# remove the temp ignition file
if tmp_trans_dir:
self._pathutils.clean_temp_folder(tmp_trans_dir)
# Update os version in guest metadata
# TODO: may should append to old metadata, not replace
if skipdiskcopy:
os_version = image_name
else:
os_version = self.image_get_os_distro(image_name)
metadata = 'os_version=%s' % os_version
self._GuestDbOperator.update_guest_by_userid(userid, meta=metadata)
if skipdiskcopy:
msg = ('guest_deploy without copy disk finish successfully, '
'guest: %(vm)s, os_version: %(img)s'
% {'img': image_name, 'vm': userid})
else:
msg = ('Deploy image %(img)s to guest %(vm)s disk %(vdev)s'
' successfully' % {'img': image_name, 'vm': userid,
'vdev': vdev})
LOG.info(msg)
def get_os_version_from_userid(self, userid):
"""Get the os_verison of guests from userid.
return os_version or UNKNOWN"""
action = "get guests os_version from userid."
with zvmutils.log_and_reraise_sdkbase_error(action):
guests_in_db = self._GuestDbOperator.\
get_guest_metadata_with_userid(userid)
# db query return metadata in tuple (metadata)
os_version = 'UNKNOWN'
for g in guests_in_db:
if 'os_version='.upper() in g[0].upper():
os_version = g[0].upper().strip().split('=')[1]
break
return os_version
def guest_capture(self, userid, image_name, capture_type='rootonly',
compress_level=6, capture_device_assign=None):
if capture_type == "alldisks":
func = ('Capture guest with type: %s' % capture_type)
msg = ('%s is not supported in current release' % func)
LOG.error(msg)
raise exception.SDKFunctionNotImplementError(func=func,
modID='guest')
msg = ('Start to capture %(vm)s to generate image %(img)s with '
'capture type %(type)s' % {'vm': userid,
'img': image_name,
'type': capture_type})
LOG.info(msg)
# self._check_power_state(userid, 'capture')
restart_flag = False
reachable = self.get_guest_connection_status(userid)
if reachable:
# Make sure iucv channel is ready for communication on source vm
try:
self.execute_cmd(userid, 'pwd')
except exception.SDKSMTRequestFailed as err:
msg = ('Failed to check iucv status on capture source vm '
'%(vm)s with error %(err)s'
% {'vm': userid, 'err': err.results['response'][0]})
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
# Get the os version of the vm
try:
os_version = self.guest_get_os_version(userid)
except exception.SDKSMTRequestFailed as err:
msg = ('Failed to execute command on capture source vm %(vm)s'
'to get os version with error %(err)s'
% {'vm': userid, 'err': err.results['response'][0]})
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
except Exception as err:
msg = ('Error happened when parsing os version on source vm '
'%(vm)s with error: %(err)s'
% {'vm': userid, 'err': six.text_type(err)})
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
msg = ('The capture source vm os version %(vm)s is %(version)s'
% {'vm': userid, 'version': os_version})
LOG.info(msg)
# Find the root device according to the capture type
try:
capture_devices = self._get_capture_devices(userid,
capture_type)
except exception.SDKSMTRequestFailed as err:
msg = ('Failed to execute command on source vm %(vm)s to get '
'devices for capture with error %(err)s'
% {'vm': userid, 'err': err.results['response'][0]})
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
except Exception as err:
msg = ('Internal error happened when getting the devices for '
'capture on source vm %(vm)s with error %(err)s' %
{'vm': userid, 'err': six.text_type(err)})
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
except exception.SDKGuestOperationError:
raise
# Shutdown the vm before capture
self.guest_softstop(userid)
# keep restart flag used after capture.
restart_flag = True
else:
os_version = self.get_os_version_from_userid(userid)
# Capture_device_assign as assign capture disk.
# Input should be string to identity disk.
# use force_capture_disk value first if
# force_capture_disk=xxxx in zvmsdk.conf.
if CONF.zvm.force_capture_disk:
capture_devices = [str(CONF.zvm.force_capture_disk)]
else:
if capture_device_assign:
capture_devices = [str(capture_device_assign)]
else:
direct_info = self.get_user_direct(userid)
disk_info =\
[x for x in direct_info if x.startswith('MDISK')]
capture_devices = \
[x.split(' ')[1].strip(' ') for x in disk_info]
if not capture_devices:
msg = ('Error happened when getting the devices for '
'get vm disk information on source vm %(vm)s '
% {'vm': userid})
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
# if VM power on, the VM need be perform stop and start
power_state = self.get_power_state(userid)
if power_state == 'on':
# Shutdown the vm before capture
self.guest_stop(userid)
restart_flag = True
# Prepare directory for writing image file
image_temp_dir = '/'.join((CONF.image.sdk_image_repository,
const.IMAGE_TYPE['CAPTURE'],
os_version,
image_name))
self._pathutils.mkdir_if_not_exist(image_temp_dir)
# Call creatediskimage to capture a vm to generate an image
# TODO:(nafei) to support multiple disk capture
vdev = capture_devices[0]
msg = ('Found the device %(vdev)s of %(vm)s for capture' %
{'vdev': vdev, 'vm': userid})
LOG.info(msg)
image_file_name = vdev
image_file_path = '/'.join((image_temp_dir, image_file_name))
cmd = ['sudo', '/opt/zthin/bin/creatediskimage', userid, vdev,
image_file_path, '--compression', str(compress_level)]
with zvmutils.expect_and_reraise_internal_error(modID='guest'):
(rc, output) = zvmutils.execute(cmd)
if rc != 0:
err_msg = ("creatediskimage failed with return code: %d." % rc)
err_output = ""
output_lines = output.split('\n')
for line in output_lines:
if line.__contains__("ERROR:"):
err_output += ("\\n" + line.strip())
LOG.error(err_msg + err_output)
self._pathutils.clean_temp_folder(image_temp_dir)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=err_output)
# Move the generated image to netboot folder
image_final_dir = '/'.join([CONF.image.sdk_image_repository,
const.IMAGE_TYPE['DEPLOY'],
os_version,
image_name])
image_final_path = '/'.join((image_final_dir,
image_file_name))
self._pathutils.mkdir_if_not_exist(image_final_dir)
cmd = ['mv', image_file_path, image_final_path]
with zvmutils.expect_and_reraise_internal_error(modID='guest'):
(rc, output) = zvmutils.execute(cmd)
if rc != 0:
err_msg = ("move image file from staging to netboot "
"folder failed with return code: %d." % rc)
LOG.error(err_msg)
self._pathutils.clean_temp_folder(image_temp_dir)
self._pathutils.clean_temp_folder(image_final_dir)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
err=err_msg)
self._pathutils.clean_temp_folder(image_temp_dir)
msg = ('Updating the metadata for captured image %s ' % image_name)
LOG.info(msg)
# Get md5sum of image
real_md5sum = self._get_md5sum(image_final_path)
# Get disk_size_units of image
disk_size_units = self._get_disk_size_units(image_final_path)
# Get the image physical size
image_size = self._get_image_size(image_final_path)
# Create the image record in image database
self._ImageDbOperator.image_add_record(image_name, os_version,
real_md5sum, disk_size_units, image_size,
capture_type)
if restart_flag:
LOG.info('Try start %s for capture completed successfully.'
% userid)
self.guest_start(userid)
LOG.info('Image %s is captured and imported to image repository '
'successfully' % image_name)
def guest_get_os_version(self, userid):
os_version = ''
release_file = self.execute_cmd(userid, 'ls /etc/*-release')
if '/etc/os-release' in release_file:
# Parse os-release file, part of the output looks like:
# NAME="Red Hat Enterprise Linux Server"
# ID="rhel"
# VERSION_ID="7.0"
release_info = self.execute_cmd(userid, 'cat /etc/os-release')
release_dict = {}
for item in release_info:
if item:
release_dict[item.split('=')[0]] = item.split('=')[1]
distro = release_dict['ID']
version = release_dict['VERSION_ID']
if '"' in distro:
distro = eval(distro)
if '"' in version:
version = eval(version)
os_version = '%s%s' % (distro, version)
return os_version
elif '/etc/redhat-release' in release_file:
# The output looks like:
# "Red Hat Enterprise Linux Server release 6.7 (Santiago)"
distro = 'rhel'
release_info = self.execute_cmd(userid, 'cat /etc/redhat-release')
distro_version = release_info[0].split()[6]
os_version = ''.join((distro, distro_version))
return os_version
elif '/etc/SuSE-release' in release_file:
# The output for this file looks like:
# SUSE Linux Enterprise Server 11 (s390x)
# VERSION = 11
# PATCHLEVEL = 3
distro = 'sles'
release_info = self.execute_cmd(userid, 'cat /etc/SuSE-release')
LOG.debug('OS release info is %s' % release_info)
release_version = '.'.join((release_info[1].split('=')[1].strip(),
release_info[2].split('=')[1].strip()))
os_version = ''.join((distro, release_version))
return os_version
elif '/etc/system-release' in release_file:
# For some rhel6.7 system, it only have system-release file and
# the output looks like:
# "Red Hat Enterprise Linux Server release 6.7 (Santiago)"
distro = 'rhel'
release_info = self.execute_cmd(userid, 'cat /etc/system-release')
distro_version = release_info[0].split()[6]
os_version = ''.join((distro, distro_version))
return os_version
def _get_capture_devices(self, userid, capture_type='rootonly'):
capture_devices = []
if capture_type == 'rootonly':
# Parse the /proc/cmdline to get root devices
proc_cmdline = self.execute_cmd(userid, 'cat /proc/cmdline '
'| tr " " "\\n" | grep -a "^root=" | cut -c6-')
root_device_info = proc_cmdline[0]
if not root_device_info:
msg = ('Unable to get useful info from /proc/cmdline to '
'locate the device associated with the root directory '
'on capture source vm %s' % userid)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
else:
if 'UUID=' in root_device_info:
uuid = root_device_info.split()[0].split('=')[1]
root_device = '/'.join(('/dev/disk/by-uuid', uuid))
elif 'LABEL=' in root_device_info:
label = root_device_info.split()[0].split('=')[1]
root_device = '/'.join(('/dev/disk/by-label', label))
elif 'mapper' in root_device_info:
msg = ('Capturing a disk with root filesystem on logical'
' volume is not supported')
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
else:
root_device = root_device_info
root_device_node = self.execute_cmd(userid, 'readlink -f %s' %
root_device)[0]
# Get device node vdev by node name
cmd = ('cat /proc/dasd/devices | grep -i "is %s" ' %
root_device_node.split('/')[-1].rstrip(string.digits))
result = self.execute_cmd(userid, cmd)[0]
root_device_vdev = result.split()[0][4:8]
capture_devices.append(root_device_vdev)
return capture_devices
else:
# For sysclone, parse the user directory entry to get the devices
# for capture, leave for future
pass
def _get_unpackdiskimage_cmd_rhcos(self, userid, image_name,
transportfiles=None, vdev=None,
image_file=None, hostname=None,
skipdiskcopy=False):
if skipdiskcopy:
os_version = image_name
image_disk_type = 'SCSI'
else:
os_version = self.image_get_os_distro(image_name)
# Query image disk type
image_disk_type = self._get_image_disk_type(image_name)
if image_disk_type is None:
err_msg = ("failed to get image disk type for "
"image '%(image_name)s'."
% {'image_name': image_name})
raise exception.SDKGuestOperationError(rs=12, userid=userid,
err=err_msg)
try:
# Query vm's disk pool type and image disk type
from zvmsdk import dist
_dist_manager = dist.LinuxDistManager()
linuxdist = _dist_manager.get_linux_dist(os_version)()
# Read coros fixed ip parameter from tempfile
fixed_ip_parameter = linuxdist.read_coreos_parameter(userid)
except Exception as err:
err_msg = ("failed to read coreos fixed ip "
"parameters for userid '%(userid)s',"
"error: %(err)s."
% {'userid': userid, 'err': err})
raise exception.SDKGuestOperationError(rs=12, userid=userid,
err=err_msg)
if fixed_ip_parameter is None:
err_msg = ("coreos fixed ip parameters don't exist.")
raise exception.SDKGuestOperationError(rs=12, userid=userid,
err=err_msg)
if hostname:
# replace hostname to display name instead of userid
fixed_ip_parameter = fixed_ip_parameter.replace(userid.upper(),
hostname)
# read nic device id and change it into the form like
# "0.0.1000,0.0.1001,0.0.1002"
nic_id = self._generate_increasing_nic_id(
fixed_ip_parameter.split(":")[5].replace("enc", ""))
if image_disk_type == 'SCSI':
(wwpn, lun) = self._get_wwpn_lun(userid)
if wwpn is None or lun is None:
err_msg = ("wwpn and lun is required for FCP devices,"
" please set LOADDEV for userid %s" % userid)
raise exception.SDKGuestOperationError(rs=14, userid=userid,
msg=err_msg)
wwpn = '0x' + wwpn
lun = '0x' + lun
if skipdiskcopy:
return ['sudo', '/opt/zthin/bin/unpackdiskimage', vdev,
wwpn, lun, transportfiles, nic_id, fixed_ip_parameter]
else:
return ['sudo', '/opt/zthin/bin/unpackdiskimage', vdev,
wwpn, lun, image_file, transportfiles,
image_disk_type, nic_id, fixed_ip_parameter]
else:
return ['sudo', '/opt/zthin/bin/unpackdiskimage', userid, vdev,
image_file, transportfiles, image_disk_type, nic_id,
fixed_ip_parameter]
def grant_user_to_vswitch(self, vswitch_name, userid):
"""Set vswitch to grant user."""
smt_userid = zvmutils.get_smt_userid()
requestData = ' '.join((
'SMAPI %s API Virtual_Network_Vswitch_Set_Extended' % smt_userid,
"--operands",
"-k switch_name=%s" % vswitch_name,
"-k grant_userid=%s" % userid,
"-k persist=YES"))
try:
self._request(requestData)
except exception.SDKSMTRequestFailed as err:
LOG.error("Failed to grant user %s to vswitch %s, error: %s"
% (userid, vswitch_name, err.format_message()))
self._set_vswitch_exception(err, vswitch_name)
def _set_vswitch_exception(self, error, switch_name):
if ((error.results['rc'] == 212) and (error.results['rs'] == 40)):
obj_desc = "Vswitch %s" % switch_name
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 396) and (error.results['rs'] == 2846)):
errmsg = ("Operation is not allowed for a "
"VLAN UNAWARE vswitch")
raise exception.SDKConflictError(modID='network', rs=5,
vsw=switch_name,
msg=errmsg)
elif ((error.results['rc'] == 396) and
((error.results['rs'] == 2838) or
(error.results['rs'] == 2853) or
(error.results['rs'] == 2856) or
(error.results['rs'] == 2858) or
(error.results['rs'] == 3022) or
(error.results['rs'] == 3033))):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=5,
vsw=switch_name,
msg=errmsg)
else:
raise error
def revoke_user_from_vswitch(self, vswitch_name, userid):
"""Revoke user for vswitch."""
smt_userid = zvmutils.get_smt_userid()
requestData = ' '.join((
'SMAPI %s API Virtual_Network_Vswitch_Set_Extended' % smt_userid,
"--operands",
"-k switch_name=%s" % vswitch_name,
"-k revoke_userid=%s" % userid,
"-k persist=YES"))
try:
self._request(requestData)
except exception.SDKSMTRequestFailed as err:
LOG.error("Failed to revoke user %s from vswitch %s, error: %s"
% (userid, vswitch_name, err.format_message()))
self._set_vswitch_exception(err, vswitch_name)
def image_performance_query(self, uid_list):
"""Call Image_Performance_Query to get guest current status.
:uid_list: A list of zvm userids to be queried
"""
if uid_list == []:
return {}
if not isinstance(uid_list, list):
uid_list = [uid_list]
smt_userid = zvmutils.get_smt_userid()
rd = ' '.join((
"SMAPI %s API Image_Performance_Query" % smt_userid,
"--operands",
'-T "%s"' % (' '.join(uid_list)),
"-c %d" % len(uid_list)))
action = "get performance info of userid '%s'" % str(uid_list)
with zvmutils.log_and_reraise_smt_request_failed(action):
results = self._request(rd)
ipq_kws = {
'userid': "Guest name:",
'guest_cpus': "Guest CPUs:",
'used_cpu_time': "Used CPU time:",
'elapsed_cpu_time': "Elapsed time:",
'min_cpu_count': "Minimum CPU count:",
'max_cpu_limit': "Max CPU limit:",
'samples_cpu_in_use': "Samples CPU in use:",
'samples_cpu_delay': "Samples CPU delay:",
'used_memory': "Used memory:",
'max_memory': "Max memory:",
'min_memory': "Minimum memory:",
'shared_memory': "Shared memory:",
}
pi_dict = {}
pi = {}
rpi_list = ('\n'.join(results['response'])).split("\n\n")
for rpi in rpi_list:
try:
pi = zvmutils.translate_response_to_dict(rpi, ipq_kws)
except exception.SDKInternalError as err:
emsg = err.format_message()
# when there is only one userid queried and this userid is
# in 'off'state, the smcli will only returns the queried
# userid number, no valid performance info returned.
if (emsg.__contains__("No value matched with keywords.")):
continue
else:
raise err
for k, v in pi.items():
pi[k] = v.strip('" ')
if pi.get('userid') is not None:
pi_dict[pi['userid']] = pi
return pi_dict
def system_image_performance_query(self, namelist):
"""Call System_Image_Performance_Query to get guest current status.
:namelist: A namelist that defined in smapi namelist file.
"""
smt_userid = zvmutils.get_smt_userid()
rd = ' '.join((
"SMAPI %s API System_Image_Performance_Query" % smt_userid,
"--operands -T %s" % namelist))
action = "get performance info of namelist '%s'" % namelist
with zvmutils.log_and_reraise_smt_request_failed(action):
results = self._request(rd)
ipq_kws = {
'userid': "Guest name:",
'guest_cpus': "Guest CPUs:",
'used_cpu_time': "Used CPU time:",
'elapsed_cpu_time': "Elapsed time:",
'min_cpu_count': "Minimum CPU count:",
'max_cpu_limit': "Max CPU limit:",
'samples_cpu_in_use': "Samples CPU in use:",
'samples_cpu_delay': "Samples CPU delay:",
'used_memory': "Used memory:",
'max_memory': "Max memory:",
'min_memory': "Minimum memory:",
'shared_memory': "Shared memory:",
}
pi_dict = {}
pi = {}
rpi_list = ('\n'.join(results['response'])).split("\n\n")
for rpi in rpi_list:
try:
pi = zvmutils.translate_response_to_dict(rpi, ipq_kws)
except exception.SDKInternalError as err:
emsg = err.format_message()
# when there is only one userid queried and this userid is
# in 'off'state, the smcli will only returns the queried
# userid number, no valid performance info returned.
if (emsg.__contains__("No value matched with keywords.")):
continue
else:
raise err
for k, v in pi.items():
pi[k] = v.strip('" ')
if pi.get('userid') is not None:
pi_dict[pi['userid']] = pi
return pi_dict
def virtual_network_vswitch_query_byte_stats(self):
smt_userid = zvmutils.get_smt_userid()
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Query_Byte_Stats" %
smt_userid,
"--operands",
'-T "%s"' % smt_userid,
'-k "switch_name=*"'
))
action = "query vswitch usage info"
with zvmutils.log_and_reraise_smt_request_failed(action):
results = self._request(rd)
return self._parse_vswitch_inspect_data(results['response'])
def get_host_info(self):
with zvmutils.log_and_reraise_smt_request_failed():
results = self._request("getHost general")
host_info = zvmutils.translate_response_to_dict(
'\n'.join(results['response']), const.RINV_HOST_KEYWORDS)
return host_info
def get_diskpool_info(self, pool):
with zvmutils.log_and_reraise_smt_request_failed():
results = self._request("getHost diskpoolspace %s" % pool)
dp_info = zvmutils.translate_response_to_dict(
'\n'.join(results['response']), const.DISKPOOL_KEYWORDS)
return dp_info
def get_vswitch_list(self):
smt_userid = zvmutils.get_smt_userid()
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Query" % smt_userid,
"--operands",
"-s \'*\'"))
try:
result = self._request(rd)
except exception.SDKSMTRequestFailed as err:
if ((err.results['rc'] == 212) and (err.results['rs'] == 40)):
LOG.warning("No Virtual switch in the host")
return []
else:
LOG.error("Failed to get vswitch list, error: %s" %
err.format_message())
raise
with zvmutils.expect_invalid_resp_data():
if (not result['response'] or not result['response'][0]):
return []
else:
data = '\n'.join([s for s in result['response']
if isinstance(s, six.string_types)])
output = re.findall('VSWITCH: Name: (.*)', data)
return output
def set_vswitch_port_vlan_id(self, vswitch_name, userid, vlan_id):
smt_userid = zvmutils.get_smt_userid()
msg = ('Start to set VLAN ID %(vid)s on vswitch %(vsw)s '
'for guest %(vm)s'
% {'vid': vlan_id, 'vsw': vswitch_name, 'vm': userid})
LOG.info(msg)
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Set_Extended" %
smt_userid,
"--operands",
"-k grant_userid=%s" % userid,
"-k switch_name=%s" % vswitch_name,
"-k user_vlan_id=%s" % vlan_id,
"-k persist=YES"))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
LOG.error("Failed to set VLAN ID %s on vswitch %s for user %s, "
"error: %s" %
(vlan_id, vswitch_name, userid, err.format_message()))
self._set_vswitch_exception(err, vswitch_name)
msg = ('Set VLAN ID %(vid)s on vswitch %(vsw)s '
'for guest %(vm)s successfully'
% {'vid': vlan_id, 'vsw': vswitch_name, 'vm': userid})
LOG.info(msg)
def add_vswitch(self, name, rdev=None, controller='*',
connection='CONNECT', network_type='ETHERNET',
router="NONROUTER", vid='UNAWARE', port_type='ACCESS',
gvrp='GVRP', queue_mem=8, native_vid=1, persist=True):
smt_userid = zvmutils.get_smt_userid()
msg = ('Start to create vswitch %s' % name)
LOG.info(msg)
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Create_Extended" %
smt_userid,
"--operands",
'-k switch_name=%s' % name))
if rdev is not None:
rd += " -k real_device_address" +\
"=\'%s\'" % rdev.replace(',', ' ')
if controller != '*':
rd += " -k controller_name=%s" % controller
rd = ' '.join((rd,
"-k connection_value=%s" % connection,
"-k queue_memory_limit=%s" % queue_mem,
"-k transport_type=%s" % network_type,
"-k vlan_id=%s" % vid,
"-k persist=%s" % (persist and 'YES' or 'NO')))
# Only if vswitch is vlan awared, port_type, gvrp and native_vid are
# allowed to specified
if isinstance(vid, int) or vid.upper() != 'UNAWARE':
rd = ' '.join((rd,
"-k port_type=%s" % port_type,
"-k gvrp_value=%s" % gvrp,
"-k native_vlanid=%s" % native_vid))
if router is not None:
rd += " -k routing_value=%s" % router
msg = ('Start to create vswitch %s' % name)
LOG.info(msg)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
LOG.error("Failed to create vswitch %s, error: %s" %
(name, err.format_message()))
raise
msg = ('Create vswitch %s successfully' % name)
LOG.info(msg)
def set_vswitch(self, switch_name, **kwargs):
"""Set vswitch"""
smt_userid = zvmutils.get_smt_userid()
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Set_Extended" %
smt_userid,
"--operands",
"-k switch_name=%s" % switch_name))
for k, v in kwargs.items():
rd = ' '.join((rd,
"-k %(key)s=\'%(value)s\'" %
{'key': k, 'value': v}))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
LOG.error("Failed to set vswitch %s, error: %s" %
(switch_name, err.format_message()))
self._set_vswitch_exception(err, switch_name)
def delete_vswitch(self, switch_name, persist=True):
smt_userid = zvmutils.get_smt_userid()
msg = ('Start to delete vswitch %s' % switch_name)
LOG.info(msg)
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Delete_Extended" %
smt_userid,
"--operands",
"-k switch_name=%s" % switch_name,
"-k persist=%s" % (persist and 'YES' or 'NO')))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
results = err.results
if ((results['rc'] == 212) and
(results['rs'] == 40)):
LOG.warning("Vswitch %s does not exist", switch_name)
return
else:
LOG.error("Failed to delete vswitch %s, error: %s" %
(switch_name, err.format_message()))
raise
msg = ('Delete vswitch %s successfully' % switch_name)
LOG.info(msg)
def create_nic(self, userid, vdev=None, nic_id=None,
mac_addr=None, active=False):
nic_vdev = self._get_available_vdev(userid, vdev=vdev)
LOG.debug('Nic attributes: vdev is %(vdev)s, '
'ID is %(id)s, address is %(address)s',
{'vdev': nic_vdev,
'id': nic_id or 'not specified',
'address': mac_addr or 'not specified'})
self._create_nic(userid, nic_vdev, nic_id=nic_id,
mac_addr=mac_addr, active=active)
return nic_vdev
def _create_nic_inactive_exception(self, error, userid, vdev):
if ((error.results['rc'] == 400) and (error.results['rs'] == 12)):
obj_desc = "Guest %s" % userid
raise exception.SDKConflictError(modID='network', rs=7,
vdev=vdev, userid=userid,
obj=obj_desc)
elif ((error.results['rc'] == 404) and (error.results['rs'] == 12)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKConflictError(modID='network', rs=7,
vdev=vdev, userid=userid,
obj=obj_desc)
elif ((error.results['rc'] == 404) and (error.results['rs'] == 4)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=6,
vdev=vdev, userid=userid,
msg=errmsg)
else:
raise error
def _create_nic_active_exception(self, error, userid, vdev):
if (((error.results['rc'] == 204) and (error.results['rs'] == 4)) or
((error.results['rc'] == 204) and (error.results['rs'] == 28))):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=6,
vdev=vdev, userid=userid,
msg=errmsg)
elif ((error.results['rc'] == 396) and
(error.results['rs'] == 2797)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=6,
vdev=vdev, userid=userid,
msg=errmsg)
else:
raise error
def _is_active(self, userid):
# Get the vm status
power_state = self.get_power_state(userid)
if power_state == 'off':
LOG.error('The vm %s is powered off, '
'active operation is not allowed' % userid)
raise exception.SDKConflictError(modID='network', rs=1,
userid=userid)
def _create_nic(self, userid, vdev, nic_id=None, mac_addr=None,
active=False):
if active:
self._is_active(userid)
msg = ('Start to create nic device %(vdev)s for guest %(vm)s'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
requestData = ' '.join((
'SMAPI %s API Virtual_Network_Adapter_Create_Extended_DM' %
userid,
"--operands",
"-k image_device_number=%s" % vdev,
"-k adapter_type=QDIO"))
if mac_addr is not None:
mac = ''.join(mac_addr.split(':'))[6:]
requestData += ' -k mac_id=%s' % mac
retry = 1
for secs in [1, 3, 5, 8, -1]:
try:
self._request(requestData)
break
except exception.SDKSMTRequestFailed as err:
if (err.results['rc'] == 400 and
err.results['rs'] == 12 and
retry < 5):
LOG.info("The VM is locked, will retry")
time.sleep(secs)
retry += 1
else:
LOG.error("Failed to create nic %s for user %s in "
"the guest's user direct, error: %s" %
(vdev, userid, err.format_message()))
self._create_nic_inactive_exception(err, userid, vdev)
if active:
if mac_addr is not None:
LOG.warning("Ignore the mac address %s when "
"adding nic on an active system" % mac_addr)
requestData = ' '.join((
'SMAPI %s API Virtual_Network_Adapter_Create_Extended' %
userid,
"--operands",
"-k image_device_number=%s" % vdev,
"-k adapter_type=QDIO"))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err1:
msg1 = err1.format_message()
persist_OK = True
requestData = ' '.join((
'SMAPI %s API Virtual_Network_Adapter_Delete_DM' % userid,
"--operands",
'-v %s' % vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err2:
results = err2.results
msg2 = err2.format_message()
if ((results['rc'] == 404) and
(results['rs'] == 8)):
persist_OK = True
else:
persist_OK = False
if persist_OK:
self._create_nic_active_exception(err1, userid, vdev)
else:
raise exception.SDKNetworkOperationError(rs=4,
nic=vdev, userid=userid,
create_err=msg1, revoke_err=msg2)
self._NetDbOperator.switch_add_record(userid, vdev, port=nic_id)
msg = ('Create nic device %(vdev)s for guest %(vm)s successfully'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
def get_user_direct(self, userid):
with zvmutils.log_and_reraise_smt_request_failed():
results = self._request("getvm %s directory" % userid)
return results.get('response', [])
def get_all_user_direct(self):
with zvmutils.log_and_reraise_smt_request_failed():
results = self._request("getvm alldirectory")
return results.get('response', [])
def get_diskpool_volumes(self, pool):
with zvmutils.log_and_reraise_smt_request_failed():
results = self._request("gethost diskpoolvolumes %s" % pool)
diskpool_volumes = zvmutils.translate_response_to_dict(
'\n'.join(results['response']), const.DISKPOOL_VOLUME_KEYWORDS)
return diskpool_volumes
def get_volume_info(self):
with zvmutils.log_and_reraise_smt_request_failed():
results = self._request("gethost volumeinfo")
with zvmutils.expect_invalid_resp_data(results):
volume_info = zvmutils.translate_response_data_to_expect_dict(
results['response'], 3)
return volume_info
def _delete_nic_active_exception(self, error, userid, vdev):
if ((error.results['rc'] == 204) and (error.results['rs'] == 28)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=8,
vdev=vdev, userid=userid,
msg=errmsg)
else:
raise error
def _delete_nic_inactive_exception(self, error, userid, vdev):
if ((error.results['rc'] == 400) and (error.results['rs'] == 12)):
obj_desc = "Guest %s" % userid
raise exception.SDKConflictError(modID='network', rs=9,
vdev=vdev, userid=userid,
obj=obj_desc)
else:
raise error
def delete_nic(self, userid, vdev, active=False):
if active:
self._is_active(userid)
vdev_exist = False
nic_list = self._NetDbOperator.switch_select_record_for_userid(userid)
for p in nic_list:
if (int(p['interface'], 16) == int(vdev, 16)):
vdev_exist = True
vdev_info = p
break
if not vdev_exist:
# Device has already be removed from user direct
LOG.warning("Virtual device %s does not exist in the switch table",
vdev)
if active:
try:
resp = self.execute_cmd(userid, 'vmcp q %s' % vdev)
nic_info = "%s ON NIC" % vdev.zfill(4).upper()
osa_info = "%s ON OSA" % vdev.zfill(4).upper()
if nic_info in resp[0]:
pass
elif osa_info in resp[0]:
self._undedicate_nic(userid, vdev, active=active,
del_active_only=True)
return
else:
LOG.warning("Device %s of guest %s is not "
"network adapter" % (vdev, userid))
return
except exception.SDKSMTRequestFailed as err:
emsg = err.format_message()
ignored_msg = ('Device %s does not exist'
% vdev.zfill(4).upper())
if (emsg.__contains__(ignored_msg)):
LOG.warning("Virtual device %s does not exist for "
"active guest %s" % (vdev, userid))
return
else:
raise
else:
return
else:
# Device hasnot be removed from user direct,
# check whether it is related to a dedicated OSA device
if ((vdev_info["comments"] is not None) and
(vdev_info["comments"].__contains__('OSA='))):
self._undedicate_nic(userid, vdev, active=active)
return
msg = ('Start to delete nic device %(vdev)s for guest %(vm)s'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
if vdev_exist:
rd = ' '.join((
"SMAPI %s API Virtual_Network_Adapter_Delete_DM" %
userid,
"--operands",
'-v %s' % vdev))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
results = err.results
emsg = err.format_message()
if ((results['rc'] == 404) and
(results['rs'] == 8)):
LOG.warning("Virtual device %s does not exist in "
"the guest's user direct", vdev)
else:
LOG.error("Failed to delete nic %s for %s in "
"the guest's user direct, error: %s" %
(vdev, userid, emsg))
self._delete_nic_inactive_exception(err, userid, vdev)
self._NetDbOperator.switch_delete_record_for_nic(userid, vdev)
if active:
rd = ' '.join((
"SMAPI %s API Virtual_Network_Adapter_Delete" %
userid,
"--operands",
'-v %s' % vdev))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
results = err.results
emsg = err.format_message()
if ((results['rc'] == 204) and
(results['rs'] == 8)):
LOG.warning("Virtual device %s does not exist on "
"the active guest system", vdev)
else:
LOG.error("Failed to delete nic %s for %s on "
"the active guest system, error: %s" %
(vdev, userid, emsg))
self._delete_nic_active_exception(err, userid, vdev)
msg = ('Delete nic device %(vdev)s for guest %(vm)s successfully'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
def _couple_active_exception(self, error, userid, vdev, vswitch):
if ((error.results['rc'] == 212) and
((error.results['rs'] == 28) or
(error.results['rs'] == 8))):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=10,
vdev=vdev, userid=userid,
vsw=vswitch,
msg=errmsg)
elif ((error.results['rc'] == 212) and (error.results['rs'] == 40)):
obj_desc = "Vswitch %s" % vswitch
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 204) and (error.results['rs'] == 8)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 396) and
((error.results['rs'] == 2788) or
(error.results['rs'] == 2848) or
(error.results['rs'] == 3034) or
(error.results['rs'] == 6011))):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=10,
vdev=vdev, userid=userid,
vsw=vswitch,
msg=errmsg)
else:
raise error
def _couple_inactive_exception(self, error, userid, vdev, vswitch):
if ((error.results['rc'] == 412) and (error.results['rs'] == 28)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=10,
vdev=vdev, userid=userid,
vsw=vswitch,
msg=errmsg)
elif ((error.results['rc'] == 400) and (error.results['rs'] == 12)):
obj_desc = "Guest %s" % userid
raise exception.SDKConflictError(modID='network', rs=11,
vdev=vdev, userid=userid,
vsw=vswitch,
obj=obj_desc)
elif ((error.results['rc'] == 400) and (error.results['rs'] == 4)):
obj_desc = "Guest %s" % vdev
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 404) and (error.results['rs'] == 12)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKConflictError(modID='network', rs=11,
vdev=vdev, userid=userid,
vsw=vswitch,
obj=obj_desc)
elif ((error.results['rc'] == 404) and (error.results['rs'] == 8)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
else:
raise error
def _couple_nic(self, userid, vdev, vswitch_name,
active=False):
"""Couple NIC to vswitch by adding vswitch into user direct."""
if active:
self._is_active(userid)
requestData = ' '.join((
'SMAPI %s' % userid,
'API Virtual_Network_Adapter_Connect_Vswitch',
"--operands",
"-v %s" % vdev,
"-n %s" % vswitch_name))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err1:
results1 = err1.results
msg1 = err1.format_message()
if ((results1 is not None) and
(results1['rc'] == 204) and
(results1['rs'] == 20)):
LOG.warning("Virtual device %s already connected "
"on the active guest system", vdev)
else:
persist_OK = True
requestData = ' '.join((
'SMAPI %s' % userid,
'API Virtual_Network_Adapter_Disconnect_DM',
"--operands",
'-v %s' % vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err2:
results2 = err2.results
msg2 = err2.format_message()
if ((results2 is not None) and
(results2['rc'] == 212) and
(results2['rs'] == 32)):
persist_OK = True
else:
persist_OK = False
if persist_OK:
self._couple_active_exception(err1, userid, vdev,
vswitch_name)
else:
raise exception.SDKNetworkOperationError(rs=3,
nic=vdev, vswitch=vswitch_name,
couple_err=msg1, revoke_err=msg2)
"""Update information in switch table."""
self._NetDbOperator.switch_update_record_with_switch(userid, vdev,
vswitch_name)
msg = ('Couple nic device %(vdev)s of guest %(vm)s '
'with vswitch %(vsw)s successfully'
% {'vdev': vdev, 'vm': userid, 'vsw': vswitch_name})
LOG.info(msg)
def couple_nic_to_vswitch(self, userid, nic_vdev,
vswitch_name, active=False,
vlan_id=-1, port_type='ACCESS'):
"""Couple nic to vswitch."""
if active:
msg = ("both in the user direct of guest %s and on "
"the active guest system" % userid)
else:
msg = "in the user direct of guest %s" % userid
LOG.debug("Connect nic %s to switch %s %s",
nic_vdev, vswitch_name, msg)
# previously we use Virtual_Network_Adapter_Connect_Vswitch_DM
# but due to limitation in SMAPI, we have to create such user
# direct by our own due to no way to add VLAN ID
msg = ('Start to couple nic device %(vdev)s of guest %(vm)s '
'with vswitch %(vsw)s with vlan %(vlan_id)s:'
% {'vdev': nic_vdev, 'vm': userid, 'vsw': vswitch_name,
'vlan_id': vlan_id})
LOG.info(msg)
user_direct = self.get_user_direct(userid)
new_user_direct = []
nicdef = "NICDEF %s" % nic_vdev.upper()
for ent in user_direct:
if len(ent) > 0:
new_user_direct.append(ent)
if ent.upper().startswith(nicdef):
# If NIC already coupled with this vswitch,
# return and skip following actions,
# such as migrating VM
if ("LAN SYSTEM %s" % vswitch_name) in ent:
LOG.info("NIC %s already coupled to vswitch %s, "
"skip couple action."
% (nic_vdev, vswitch_name))
return
# vlan_id < 0 means no VLAN ID given
v = nicdef
if vlan_id < 0:
v += " LAN SYSTEM %s" % vswitch_name
else:
v += " LAN SYSTEM %s VLAN %s PORTTYPE %s" \
% (vswitch_name, vlan_id, port_type)
new_user_direct.append(v)
try:
self._lock_user_direct(userid)
except exception.SDKSMTRequestFailed as e:
raise exception.SDKGuestOperationError(rs=9, userid=userid,
err=e.format_message())
# Replace user directory
try:
self._replace_user_direct(userid, new_user_direct)
except exception.SDKSMTRequestFailed as e:
rd = ("SMAPI %s API Image_Unlock_DM " % userid)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err2:
# ignore 'not locked' error
if ((err2.results['rc'] == 400) and (
err2.results['rs'] == 24)):
LOG.debug("Guest '%s' unlocked successfully." % userid)
pass
else:
# just print error and ignore this unlock error
msg = ("Unlock definition of guest '%s' failed "
"with SMT error: %s" %
(userid, err2.format_message()))
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=10,
userid=userid,
err=e.format_message())
self._couple_nic(userid, nic_vdev, vswitch_name, active=active)
def _uncouple_active_exception(self, error, userid, vdev):
if ((error.results['rc'] == 204) and (error.results['rs'] == 8)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 204) and (error.results['rs'] == 28)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=12,
vdev=vdev, userid=userid,
msg=errmsg)
else:
raise error
def _uncouple_inactive_exception(self, error, userid, vdev):
if ((error.results['rc'] == 404) and (error.results['rs'] == 8)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 400) and (error.results['rs'] == 4)):
obj_desc = "Guest %s" % vdev
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 400) and (error.results['rs'] == 12)):
obj_desc = "Guest %s" % userid
raise exception.SDKConflictError(modID='network', rs=13,
vdev=vdev, userid=userid,
obj=obj_desc)
else:
raise error
def _uncouple_nic(self, userid, vdev, active=False):
"""Uncouple NIC from vswitch"""
if active:
self._is_active(userid)
msg = ('Start to uncouple nic device %(vdev)s of guest %(vm)s'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
requestData = ' '.join((
'SMAPI %s' % userid,
"API Virtual_Network_Adapter_Disconnect_DM",
"--operands",
"-v %s" % vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err:
results = err.results
emsg = err.format_message()
if ((results is not None) and
(results['rc'] == 212) and
(results['rs'] == 32)):
LOG.warning("Virtual device %s is already disconnected "
"in the guest's user direct", vdev)
else:
LOG.error("Failed to uncouple nic %s in the guest's user "
"direct, error: %s" % (vdev, emsg))
self._uncouple_inactive_exception(err, userid, vdev)
"""Update information in switch table."""
self._NetDbOperator.switch_update_record_with_switch(userid, vdev,
None)
# the inst must be active, or this call will failed
if active:
requestData = ' '.join((
'SMAPI %s' % userid,
'API Virtual_Network_Adapter_Disconnect',
"--operands",
"-v %s" % vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err:
results = err.results
emsg = err.format_message()
if ((results is not None) and
(results['rc'] == 204) and
(results['rs'] == 48)):
LOG.warning("Virtual device %s is already "
"disconnected on the active "
"guest system", vdev)
else:
LOG.error("Failed to uncouple nic %s on the active "
"guest system, error: %s" % (vdev, emsg))
self._uncouple_active_exception(err, userid, vdev)
msg = ('Uncouple nic device %(vdev)s of guest %(vm)s successfully'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
def uncouple_nic_from_vswitch(self, userid, nic_vdev,
active=False):
if active:
msg = ("both in the user direct of guest %s and on "
"the active guest system" % userid)
else:
msg = "in the user direct of guest %s" % userid
LOG.debug("Disconnect nic %s with network %s",
nic_vdev, msg)
self._uncouple_nic(userid, nic_vdev, active=active)
def _delete_uesrid_again(self, rd, userid):
# ok, this is ugly, as we never know when this will happen
# so we try the stop again and ignore any exception here
try:
self.guest_stop(userid, timeout=30, poll_interval=10)
except Exception as err:
msg = "SMT error: %s" % err.format_message()
LOG.info("guest force stop when 596/6831: %s" % msg)
# wait some time to let guest shutoff and cleanup
time.sleep(2)
try:
self._request(rd)
except Exception as err:
msg = "SMT error: %s" % err.format_message()
LOG.info("guest force delete when 596/6831: %s" % msg)
def delete_userid(self, userid):
rd = ' '.join(('deletevm', userid, 'directory'))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
if err.results['rc'] == 400 and err.results['rs'] == 4:
# guest vm definition not found
LOG.debug("The guest %s does not exist." % userid)
return
# ingore delete VM not finished error
if err.results['rc'] == 596 and err.results['rs'] == 6831:
# 596/6831 means delete VM not finished yet
LOG.warning("The guest %s deleted with 596/6831" % userid)
self._delete_uesrid_again(rd, userid)
return
# ignore delete VM with VDISK format error
# DirMaint does not support formatting TDISK or VDISK extents.
if err.results['rc'] == 596 and err.results['rs'] == 3543:
LOG.debug("The guest %s deleted with 596/3543" % userid)
return
# The CP or CMS command shown resulted in a non-zero
# return code. This message is frequently preceded by
# a DMK, HCP, or DMS error message that describes the cause
# https://www-01.ibm.com/servers/resourcelink/svc0302a.nsf/
# pages/zVMV7R2gc246282/$file/hcpk2_v7r2.pdf
if err.results['rc'] == 596 and err.results['rs'] == 2119:
LOG.debug("The guest %s deleted with 596/2119" % userid)
return
msg = "SMT error: %s" % err.format_message()
raise exception.SDKSMTRequestFailed(err.results, msg)
def delete_vm(self, userid):
self.delete_userid(userid)
# remove userid from smapi namelist
self.namelist_remove(zvmutils.get_namelist(), userid)
# revoke userid from vswitch
action = "revoke id %s authority from vswitch" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
switch_info = self._NetDbOperator.switch_select_record_for_userid(
userid)
switch_list = set()
for item in switch_info:
switch_list.add(item['switch'])
for item in switch_list:
if item is not None:
self.revoke_user_from_vswitch(item, userid)
# cleanup db record from network table
action = "delete network record for user %s" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._NetDbOperator.switch_delete_record_for_userid(userid)
# TODO: cleanup db record from volume table
pass
# cleanup persistent folder for guest
self._pathutils.remove_guest_path(userid)
# cleanup db record from guest table
action = "delete guest %s from database" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._GuestDbOperator.delete_guest_by_userid(userid)
def execute_cmd(self, userid, cmdStr):
""""cmdVM."""
requestData = 'cmdVM ' + userid + ' CMD \'' + cmdStr + '\''
with zvmutils.log_and_reraise_smt_request_failed(action='execute '
'command on vm via iucv channel'):
results = self._request(requestData)
ret = results['response']
return ret
def execute_cmd_direct(self, userid, cmdStr, timeout=None):
""""cmdVM."""
if not timeout:
requestData = 'cmdVM ' + userid + ' CMD \'' + cmdStr + '\''
else:
requestData = ("cmdVM %s CMD \'%s\' %s" % (userid, cmdStr,
timeout))
results = self._smt.request(requestData)
return results
def image_import(self, image_name, url, image_meta, remote_host=None):
"""Import the image specified in url to SDK image repository, and
create a record in image db, the imported images are located in
image_repository/prov_method/os_version/image_name/, for example,
/opt/sdk/images/netboot/rhel7.2/90685d2b-167bimage/0100"""
image_info = []
try:
image_info = self._ImageDbOperator.image_query_record(image_name)
except exception.SDKObjectNotExistError:
msg = ("The image record %s doens't exist in SDK image datebase,"
" will import the image and create record now" % image_name)
LOG.info(msg)
# Ensure the specified image is not exist in image DB
if image_info:
msg = ("The image name %s has already exist in SDK image "
"database, please check if they are same image or consider"
" to use a different image name for import" % image_name)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=13, img=image_name)
try:
image_os_version = image_meta['os_version'].lower()
target_folder = self._pathutils.create_import_image_repository(
image_os_version, const.IMAGE_TYPE['DEPLOY'],
image_name)
except Exception as err:
msg = ('Failed to create repository to store image %(img)s with '
'error: %(err)s, please make sure there are enough space '
'on zvmsdk server and proper permission to create the '
'repository' % {'img': image_name,
'err': six.text_type(err)})
LOG.error(msg)
raise exception.SDKImageOperationError(rs=14, msg=msg)
if self.is_rhcos(image_os_version):
image_disk_type = image_meta.get('disk_type')
if ((image_disk_type is None) or
((image_disk_type.upper() != "DASD" and
image_disk_type.upper() != "SCSI"))):
msg = ('Disk type is required for RHCOS image import, '
'the value should be DASD or SCSI')
LOG.error(msg)
raise exception.SDKImageOperationError(rs=14, msg=msg)
else:
comments = {'disk_type': image_disk_type.upper()}
comments = str(comments)
else:
comments = None
try:
import_image_fn = urlparse.urlparse(url).path.split('/')[-1]
import_image_fpath = '/'.join([target_folder, import_image_fn])
self._scheme2backend(urlparse.urlparse(url).scheme).image_import(
image_name, url,
import_image_fpath,
remote_host=remote_host)
# Check md5 after import to ensure import a correct image
# TODO change to use query image name in DB
expect_md5sum = image_meta.get('md5sum')
real_md5sum = self._get_md5sum(import_image_fpath)
if expect_md5sum and expect_md5sum != real_md5sum:
msg = ("The md5sum after import is not same as source image,"
" the image has been broken")
LOG.error(msg)
raise exception.SDKImageOperationError(rs=4)
# After import to image repository, figure out the image type is
# single disk image or multiple-disk image,if multiple disks image,
# extract it, if it's single image, rename its name to be same as
# specific vdev
# TODO: (nafei) use sub-function to check the image type
image_type = 'rootonly'
if image_type == 'rootonly':
final_image_fpath = '/'.join([target_folder,
CONF.zvm.user_root_vdev])
os.rename(import_image_fpath, final_image_fpath)
elif image_type == 'alldisks':
# For multiple disks image, extract it, after extract, the
# content under image folder is like: 0100, 0101, 0102
# and remove the image file 0100-0101-0102.tgz
pass
# TODO: put multiple disk image into consideration, update the
# disk_size_units and image_size db field
if not self.is_rhcos(image_os_version):
disk_size_units = self._get_disk_size_units(final_image_fpath)
else:
disk_size_units = self._get_disk_size_units_rhcos(
final_image_fpath)
image_size = self._get_image_size(final_image_fpath)
# TODO: update the real_md5sum field to include each disk image
self._ImageDbOperator.image_add_record(image_name,
image_os_version,
real_md5sum,
disk_size_units,
image_size,
image_type,
comments=comments)
LOG.info("Image %s is import successfully" % image_name)
except Exception:
# Cleanup the image from image repository
self._pathutils.clean_temp_folder(target_folder)
raise
def image_export(self, image_name, dest_url, remote_host=None):
"""Export the specific image to remote host or local file system
:param image_name: image name that can be uniquely identify an image
:param dest_path: the location to store exported image, eg.
/opt/images, the image will be stored in folder
/opt/images/
:param remote_host: the server that export image to, the format is
username@IP eg. [email protected], if remote_host is
None, it means the image will be stored in local server
:returns a dictionary that contains the exported image info
{
'image_name': the image_name that exported
'image_path': the image_path after exported
'os_version': the os version of the exported image
'md5sum': the md5sum of the original image
'comments': the comments of the original image
}
"""
image_info = self._ImageDbOperator.image_query_record(image_name)
if not image_info:
msg = ("The image %s does not exist in image repository"
% image_name)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=20, img=image_name)
image_type = image_info[0]['type']
# TODO: (nafei) according to image_type, detect image exported path
# For multiple disk image, make the tgz firstly, the specify the
# source_path to be something like: 0100-0101-0102.tgz
if image_type == 'rootonly':
source_path = '/'.join([CONF.image.sdk_image_repository,
const.IMAGE_TYPE['DEPLOY'],
image_info[0]['imageosdistro'],
image_name,
CONF.zvm.user_root_vdev])
else:
pass
self._scheme2backend(urlparse.urlparse(dest_url).scheme).image_export(
source_path, dest_url,
remote_host=remote_host)
# TODO: (nafei) for multiple disks image, update the expect_dict
# to be the tgz's md5sum
export_dict = {'image_name': image_name,
'image_path': dest_url,
'os_version': image_info[0]['imageosdistro'],
'md5sum': image_info[0]['md5sum'],
'comments': image_info[0]['comments']}
LOG.info("Image %s export successfully" % image_name)
return export_dict
def _get_image_disk_size_units(self, image_path):
""" Return a comma separated string to indicate the image disk size
and units for each image disk file under image_path
For single disk image , it looks like: 0100=3338:CYL
For multiple disk image, it looks like:
0100=3338:CYL,0101=4194200:BLK, 0102=4370:CYL"""
pass
def _get_disk_size_units(self, image_path):
command = 'hexdump -n 48 -C %s' % image_path
(rc, output) = zvmutils.execute(command)
LOG.debug("hexdump result is %s" % output)
if rc:
msg = ("Error happened when executing command hexdump with"
"reason: %s" % output)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=5)
try:
root_disk_size = int(output[144:156])
disk_units = output[220:223]
root_disk_units = ':'.join([str(root_disk_size), disk_units])
except ValueError:
msg = ("Image file at %s is missing built-in disk size "
"metadata, it was probably not captured by SDK" %
image_path)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=6)
if 'FBA' not in output and 'CKD' not in output:
raise exception.SDKImageOperationError(rs=7)
LOG.debug("The image's root_disk_units is %s" % root_disk_units)
return root_disk_units
def _get_disk_size_units_rhcos(self, image_path):
command = "fdisk -b 4096 -l %s | head -2 | awk '{print $5}'" % (
image_path)
rc = 0
output = ""
try:
# shell should be set True because it is a shell command with
# pipeline, so can not use utils.execute function here
output = subprocess.check_output(command, shell=True,
stderr=subprocess.STDOUT)
output = bytes.decode(output)
except subprocess.CalledProcessError as err:
rc = err.returncode
output = err.output
except Exception as err:
err_msg = ('Command "%s" Error: %s' % (' '.join(command),
str(err)))
raise exception.SDKInternalError(msg=err_msg)
if rc or output.strip('1234567890*\n'):
msg = ("Error happened when executing command fdisk with "
"reason: %s" % output)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=8)
image_size = output.split()[0]
try:
cyl = (float(image_size)) / 737280
cyl = str(int(math.ceil(cyl)))
except Exception:
msg = ("Failed to convert %s to a number of cylinders."
% image_size)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=14, msg=msg)
disk_units = "CYL"
root_disk_units = ':'.join([str(cyl), disk_units])
LOG.debug("The image's root_disk_units is %s" % root_disk_units)
return root_disk_units
def _get_image_size(self, image_path):
"""Return disk size in bytes"""
command = 'du -b %s' % image_path
(rc, output) = zvmutils.execute(command)
if rc:
msg = ("Error happened when executing command du -b with"
"reason: %s" % output)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=8)
size = output.split()[0]
return size
def _get_image_path_by_name(self, image_name):
try:
target_info = self._ImageDbOperator.image_query_record(image_name)
except exception.SDKObjectNotExistError:
msg = ("The image %s does not exist in image repository"
% image_name)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=20, img=image_name)
# TODO: (nafei) Handle multiple disks image deploy
image_path = '/'.join([CONF.image.sdk_image_repository,
const.IMAGE_TYPE['DEPLOY'],
target_info[0]['imageosdistro'],
image_name])
return image_path
def _scheme2backend(self, scheme):
try:
return {
"file": FilesystemBackend,
"http": HTTPBackend,
# "https": HTTPSBackend
}[scheme]
except KeyError:
msg = ("No backend found for '%s'" % scheme)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=2, schema=scheme)
def _get_md5sum(self, fpath):
"""Calculate the md5sum of the specific image file"""
try:
current_md5 = hashlib.md5()
if isinstance(fpath, six.string_types) and os.path.exists(fpath):
with open(fpath, "rb") as fh:
for chunk in self._read_chunks(fh):
current_md5.update(chunk)
elif (fpath.__class__.__name__ in ["StringIO", "StringO"] or
isinstance(fpath, IOBase)):
for chunk in self._read_chunks(fpath):
current_md5.update(chunk)
else:
return ""
return current_md5.hexdigest()
except Exception:
msg = ("Failed to calculate the image's md5sum")
LOG.error(msg)
raise exception.SDKImageOperationError(rs=3)
def _read_chunks(self, fh):
fh.seek(0)
chunk = fh.read(CHUNKSIZE)
while chunk:
yield chunk
chunk = fh.read(CHUNKSIZE)
else:
fh.seek(0)
def image_delete(self, image_name):
# Delete image file
try:
self._delete_image_file(image_name)
# Delete image record from db
self._ImageDbOperator.image_delete_record(image_name)
except exception.SDKImageOperationError as err:
results = err.results
if ((results['rc'] == 300) and (results['rs'] == 20)):
LOG.warning("Image %s does not exist", image_name)
return
else:
LOG.error("Failed to delete image %s, error: %s" %
(image_name, err.format_message()))
raise
msg = ('Delete image %s successfully' % image_name)
LOG.info(msg)
def _delete_image_file(self, image_name):
image_path = self._get_image_path_by_name(image_name)
self._pathutils.clean_temp_folder(image_path)
def _get_image_last_access_time(self, image_name, raise_exception=True):
"""Get the last access time of the image."""
image_file = os.path.join(self._get_image_path_by_name(image_name),
CONF.zvm.user_root_vdev)
if not os.path.exists(image_file):
if raise_exception:
msg = 'Failed to get time stamp of image:%s' % image_name
LOG.error(msg)
raise exception.SDKImageOperationError(rs=23, img=image_name)
else:
# An invalid timestamp
return -1
atime = os.path.getatime(image_file)
return atime
def image_query(self, image_name=None):
image_info = self._ImageDbOperator.image_query_record(image_name)
if not image_info:
# because database maybe None, so return nothing here
return []
# if image_name is not None, means there is only one record
if image_name:
last_access_time = self._get_image_last_access_time(
image_name, raise_exception=False)
image_info[0]['last_access_time'] = last_access_time
else:
for item in image_info:
image_name = item['imagename']
# set raise_exception to false because one failed
# may stop processing all the items in the list
last_access_time = self._get_image_last_access_time(
image_name, raise_exception=False)
item['last_access_time'] = last_access_time
return image_info
def image_get_root_disk_size(self, image_name):
"""Return the root disk units of the specified image
image_name: the unique image name in db
Return the disk units in format like 3339:CYL or 467200:BLK
"""
image_info = self.image_query(image_name)
if not image_info:
raise exception.SDKImageOperationError(rs=20, img=image_name)
disk_size_units = image_info[0]['disk_size_units'].split(':')[0]
return disk_size_units
def image_get_os_distro(self, image_name):
"""
Return the operating system distro of the specified image
"""
image_info = self._ImageDbOperator.image_query_record(image_name)
if not image_info:
raise exception.SDKImageOperationError(rs=20, img=image_name)
os_distro = image_info[0]['imageosdistro']
return os_distro
def _get_image_disk_type(self, image_name):
"""
Return image disk type
"""
image_info = self._ImageDbOperator.image_query_record(image_name)
if ((image_info[0]['comments'] is not None) and
(image_info[0]['comments'].__contains__('disk_type'))):
image_disk_type = eval(image_info[0]['comments'])['disk_type']
if image_disk_type == 'DASD':
return 'ECKD'
elif image_disk_type == 'SCSI':
return 'SCSI'
else:
return None
else:
return None
def punch_file(self, userid, fn, fclass):
rd = ("changevm %(uid)s punchfile %(file)s --class %(class)s" %
{'uid': userid, 'file': fn, 'class': fclass})
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
LOG.error("Failed to punch file to userid '%s',"
"error: %s" % (userid, err.format_message()))
raise
finally:
os.remove(fn)
def get_guest_connection_status(self, userid):
'''Get guest vm connection status.'''
rd = ' '.join(('getvm', userid, 'isreachable'))
results = self._request(rd)
if results['rs'] == 1:
return True
else:
return False
def _generate_disk_parmline(self, vdev, fmt, mntdir):
parms = [
'action=' + 'addMdisk',
'vaddr=' + vdev,
'filesys=' + fmt,
'mntdir=' + mntdir
]
parmline = ' '.join(parms)
parmstr = "'" + parmline + "'"
return parmstr
def process_additional_minidisks(self, userid, disk_info):
'''Generate and punch the scripts used to process additional disk into
target vm's reader.
'''
for idx, disk in enumerate(disk_info):
vdev = disk.get('vdev') or self.generate_disk_vdev(
offset = (idx + 1))
fmt = disk.get('format')
mount_dir = disk.get('mntdir') or ''.join(['/mnt/ephemeral',
str(vdev)])
# the mount point of swap partition is swap
if fmt == "swap":
mount_dir = "swap"
disk_parms = self._generate_disk_parmline(vdev, fmt, mount_dir)
func_name = '/var/lib/zvmsdk/setupDisk'
self.aemod_handler(userid, func_name, disk_parms)
# trigger do-script
if self.get_power_state(userid) == 'on':
self.execute_cmd(userid, "/usr/bin/zvmguestconfigure start")
def aemod_handler(self, instance_name, func_name, parms):
rd = ' '.join(['changevm', instance_name, 'aemod', func_name,
'--invparms', parms])
action = parms[0] + instance_name
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
def get_user_console_output(self, userid):
# get console into reader
rd = 'getvm %s consoleoutput' % userid
action = 'get console log reader file list for guest vm: %s' % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
resp = self._request(rd)
with zvmutils.expect_invalid_resp_data(resp):
rf_list = resp['response'][0].rpartition(':')[2].strip().split()
# TODO: make sure reader device is online
# via 'cat /sys/bus/ccw/drivers/vmur/0.0.000c/online'
# 'sudo /sbin/cio_ignore -r 000c; sudo /sbin/chccwdev -e 000c'
# 'which udevadm &> /dev/null && udevadm settle || udevsettle'
logs = []
for rf in rf_list:
cmd = 'sudo /usr/sbin/vmur re -t -O %s' % rf
rc, output = zvmutils.execute(cmd)
if rc == 0:
logs.append(output)
return ''.join(logs)
def query_vswitch(self, switch_name):
smt_userid = zvmutils.get_smt_userid()
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Query_Extended" %
smt_userid,
"--operands",
'-k switch_name=%s' % switch_name
))
try:
results = self._request(rd)
rd_list = results['response']
except exception.SDKSMTRequestFailed as err:
if ((err.results['rc'] == 212) and (err.results['rs'] == 40)):
msg = 'Vswitch %s does not exist' % switch_name
LOG.error(msg)
obj_desc = "Vswitch %s" % switch_name
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
else:
action = "query vswitch details info"
msg = "Failed to %s. " % action
msg += "SMT error: %s" % err.format_message()
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
vsw_info = {}
with zvmutils.expect_invalid_resp_data():
# ignore user_vlan_id part and jump to the vswitch basic info
idx_end = len(rd_list)
idx = 0
while ((idx < idx_end) and
not rd_list[idx].__contains__('switch_name')):
idx = idx + 1
# The next 21 lines contains the vswitch basic info
# eg, name, type, port_type, vlan_awareness, etc
for i in range(21):
rd = rd_list[idx + i].split(':')
vsw_info[rd[0].strip()] = rd[1].strip()
idx = idx + 21
# Skip the vepa_status
while ((idx < idx_end) and
not rd_list[idx].__contains__('real_device_address') and
not rd_list[idx].__contains__('port_num') and
not rd_list[idx].__contains__('adapter_owner')):
idx = idx + 1
def _parse_value(data_list, idx, keyword, offset=1):
value = data_list[idx].rpartition(keyword)[2].strip()
if value == '(NONE)':
value = 'NONE'
return idx + offset, value
def _parse_dev_status(value):
if value in const.DEV_STATUS.keys():
return const.DEV_STATUS[value]
else:
return 'Unknown'
def _parse_dev_err(value):
if value in const.DEV_ERROR.keys():
return const.DEV_ERROR[value]
else:
return 'Unknown'
# Start to analyse the real devices info
vsw_info['real_devices'] = {}
while ((idx < idx_end) and
rd_list[idx].__contains__('real_device_address')):
# each rdev has 6 lines' info
idx, rdev_addr = _parse_value(rd_list, idx,
'real_device_address: ')
idx, vdev_addr = _parse_value(rd_list, idx,
'virtual_device_address: ')
idx, controller = _parse_value(rd_list, idx,
'controller_name: ')
idx, port_name = _parse_value(rd_list, idx, 'port_name: ')
idx, dev_status = _parse_value(rd_list, idx,
'device_status: ')
idx, dev_err = _parse_value(rd_list, idx,
'device_error_status ')
vsw_info['real_devices'][rdev_addr] = {'vdev': vdev_addr,
'controller': controller,
'port_name': port_name,
'dev_status':
_parse_dev_status(
dev_status),
'dev_err': _parse_dev_err(
dev_err)
}
# Under some case there would be an error line in the output
# "Error controller_name is NULL!!", skip this line
if ((idx < idx_end) and
rd_list[idx].__contains__(
'Error controller_name is NULL!!')):
idx += 1
# Start to get the authorized userids
vsw_info['authorized_users'] = {}
while ((idx < idx_end) and rd_list[idx].__contains__('port_num')):
# each authorized userid has 6 lines' info at least
idx, port_num = _parse_value(rd_list, idx,
'port_num: ')
idx, userid = _parse_value(rd_list, idx,
'grant_userid: ')
idx, prom_mode = _parse_value(rd_list, idx,
'promiscuous_mode: ')
idx, osd_sim = _parse_value(rd_list, idx, 'osd_sim: ')
idx, vlan_count = _parse_value(rd_list, idx,
'vlan_count: ')
vlan_ids = []
for i in range(int(vlan_count)):
idx, id = _parse_value(rd_list, idx,
'user_vlan_id: ')
vlan_ids.append(id)
# For vlan unaware vswitch, the query smcli would
# return vlan_count as 1, here we just set the count to 0
if (vsw_info['vlan_awareness'] == 'UNAWARE'):
vlan_count = 0
vlan_ids = []
vsw_info['authorized_users'][userid] = {
'port_num': port_num,
'prom_mode': prom_mode,
'osd_sim': osd_sim,
'vlan_count': vlan_count,
'vlan_ids': vlan_ids
}
# Start to get the connected adapters info
# OWNER_VDEV would be used as the dict key for each adapter
vsw_info['adapters'] = {}
while ((idx < idx_end) and
rd_list[idx].__contains__('adapter_owner')):
# each adapter has four line info: owner, vdev, macaddr, type
idx, owner = _parse_value(rd_list, idx,
'adapter_owner: ')
idx, vdev = _parse_value(rd_list, idx,
'adapter_vdev: ')
idx, mac = _parse_value(rd_list, idx,
'adapter_macaddr: ')
idx, type = _parse_value(rd_list, idx, 'adapter_type: ')
key = owner + '_' + vdev
vsw_info['adapters'][key] = {
'mac': mac,
'type': type
}
# Todo: analyze and add the uplink NIC info and global member info
def _parse_switch_status(value):
if value in const.SWITCH_STATUS.keys():
return const.SWITCH_STATUS[value]
else:
return 'Unknown'
if 'switch_status' in vsw_info.keys():
vsw_info['switch_status'] = _parse_switch_status(
vsw_info['switch_status'])
return vsw_info
def get_nic_info(self, userid=None, nic_id=None, vswitch=None):
nic_info = self._NetDbOperator.switch_select_record(userid=userid,
nic_id=nic_id, vswitch=vswitch)
return nic_info
def is_first_network_config(self, userid):
action = "get guest '%s' to database" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
info = self._GuestDbOperator.get_guest_by_userid(userid)
# check net_set
if int(info[3]) == 0:
return True
else:
return False
def update_guestdb_with_net_set(self, userid):
action = "update guest '%s' in database" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._GuestDbOperator.update_guest_by_userid(userid, net_set='1')
def _is_OSA_free(self, OSA_device):
osa_info = self._query_OSA()
if 'OSA' not in osa_info.keys():
return False
elif len(osa_info['OSA']['FREE']) == 0:
return False
else:
dev1 = str(OSA_device).zfill(4).upper()
dev2 = str(str(hex(int(OSA_device, 16) + 1))[2:]).zfill(4).upper()
dev3 = str(str(hex(int(OSA_device, 16) + 2))[2:]).zfill(4).upper()
if ((dev1 in osa_info['OSA']['FREE']) and
(dev2 in osa_info['OSA']['FREE']) and
(dev3 in osa_info['OSA']['FREE'])):
return True
else:
return False
def _query_OSA(self):
smt_userid = zvmutils.get_smt_userid()
rd = "SMAPI %s API Virtual_Network_OSA_Query" % smt_userid
OSA_info = {}
try:
results = self._request(rd)
rd_list = results['response']
except exception.SDKSMTRequestFailed as err:
if ((err.results['rc'] == 4) and (err.results['rs'] == 4)):
msg = 'No OSAs on system'
LOG.info(msg)
return OSA_info
else:
action = "query OSA details info"
msg = "Failed to %s. " % action
msg += "SMT error: %s" % err.format_message()
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
with zvmutils.expect_invalid_resp_data():
idx_end = len(rd_list)
idx = 0
def _parse_value(data_list, idx, keyword, offset=1):
value = data_list[idx].rpartition(keyword)[2].strip()
return idx + offset, value
# Start to analyse the osa devices info
while ((idx < idx_end) and
rd_list[idx].__contains__('OSA Address')):
idx, osa_addr = _parse_value(rd_list, idx,
'OSA Address: ')
idx, osa_status = _parse_value(rd_list, idx,
'OSA Status: ')
idx, osa_type = _parse_value(rd_list, idx,
'OSA Type: ')
if osa_type != 'UNKNOWN':
idx, CHPID_addr = _parse_value(rd_list, idx,
'CHPID Address: ')
idx, Agent_status = _parse_value(rd_list, idx,
'Agent Status: ')
if osa_type not in OSA_info.keys():
OSA_info[osa_type] = {}
OSA_info[osa_type]['FREE'] = []
OSA_info[osa_type]['BOXED'] = []
OSA_info[osa_type]['OFFLINE'] = []
OSA_info[osa_type]['ATTACHED'] = []
if osa_status.__contains__('ATT'):
id = osa_status.split()[1]
item = (id, osa_addr)
OSA_info[osa_type]['ATTACHED'].append(item)
else:
OSA_info[osa_type][osa_status].append(osa_addr)
return OSA_info
def _get_available_vdev(self, userid, vdev=None):
ports_info = self._NetDbOperator.switch_select_table()
vdev_info = []
for p in ports_info:
if p['userid'] == userid.upper():
vdev_info.append(p['interface'])
if len(vdev_info) == 0:
# no nic defined for the guest
if vdev is None:
nic_vdev = CONF.zvm.default_nic_vdev
else:
nic_vdev = vdev
else:
if vdev is None:
used_vdev = max(vdev_info)
nic_vdev = str(hex(int(used_vdev, 16) + 3))[2:]
else:
if self._is_vdev_valid(vdev, vdev_info):
nic_vdev = vdev
else:
errmsg = ("The specified virtual device number %s "
"has already been used." % vdev)
raise exception.SDKConflictError(modID='network', rs=6,
vdev=vdev, userid=userid,
msg=errmsg)
if ((len(nic_vdev) > 4) or
(len(str(hex(int(nic_vdev, 16) + 2))[2:]) > 4)):
errmsg = ("Virtual device number %s is not valid" % nic_vdev)
raise exception.SDKInvalidInputFormat(msg=errmsg)
return nic_vdev
def dedicate_OSA(self, userid, OSA_device, vdev=None, active=False):
nic_vdev = self._get_available_vdev(userid, vdev=vdev)
if not self._is_OSA_free(OSA_device):
errmsg = ("The specified OSA device number %s "
"is not free" % OSA_device)
raise exception.SDKConflictError(modID='network', rs=14,
osa=OSA_device, userid=userid,
msg=errmsg)
LOG.debug('Nic attributes: vdev is %(vdev)s, '
'dedicated OSA device is %(osa)s',
{'vdev': nic_vdev,
'osa': OSA_device})
self._dedicate_OSA(userid, OSA_device, nic_vdev, active=active)
return nic_vdev
def _dedicate_OSA_inactive_exception(self, error, userid, vdev,
OSA_device):
if ((error.results['rc'] == 400) and (error.results['rs'] == 12)):
obj_desc = "Guest %s" % userid
raise exception.SDKConflictError(modID='network', rs=15,
osa=OSA_device, userid=userid,
obj=obj_desc)
elif ((error.results['rc'] == 404) and (error.results['rs'] == 12)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKConflictError(modID='network', rs=15,
osa=OSA_device, userid=userid,
obj=obj_desc)
elif ((error.results['rc'] == 404) and (error.results['rs'] == 4)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=14,
osa=OSA_device, userid=userid,
msg=errmsg)
else:
raise error
def _dedicate_OSA_active_exception(self, error, userid, OSA_device):
if (((error.results['rc'] == 204) and (error.results['rs'] == 4)) or
((error.results['rc'] == 204) and (error.results['rs'] == 8)) or
((error.results['rc'] == 204) and (error.results['rs'] == 16))):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=14,
osa=OSA_device, userid=userid,
msg=errmsg)
else:
raise error
def _dedicate_OSA(self, userid, OSA_device, vdev, active=False):
if active:
self._is_active(userid)
msg = ('Start to dedicate nic device %(vdev)s of guest %(vm)s '
'to OSA device %(osa)s'
% {'vdev': vdev, 'vm': userid, 'osa': OSA_device})
LOG.info(msg)
def_vdev = vdev
att_OSA_device = OSA_device
for i in range(3):
requestData = ' '.join((
'SMAPI %s API Image_Device_Dedicate_DM' %
userid,
"--operands",
"-v %s" % def_vdev,
"-r %s" % att_OSA_device))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err:
LOG.error("Failed to dedicate OSA %s to nic %s for user %s "
"in the guest's user direct, error: %s" %
(att_OSA_device, def_vdev, userid,
err.format_message()))
# TODO revoke the dedicated OSA in user direct
while (int(def_vdev, 16) != int(vdev, 16)):
def_vdev = str(hex(int(def_vdev, 16) - 1))[2:]
requestData = ' '.join((
'SMAPI %s API Image_Device_Undedicate_DM' %
userid,
"--operands",
"-v %s" % def_vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err2:
if ((err2.results['rc'] == 404) and
(err2.results['rs'] == 8)):
pass
else:
LOG.error("Failed to Undedicate nic %s for user"
" %s in the guest's user direct, "
"error: %s" %
(def_vdev, userid,
err2.format_message()))
pass
self._dedicate_OSA_inactive_exception(err, userid, vdev,
OSA_device)
def_vdev = str(hex(int(def_vdev, 16) + 1))[2:]
att_OSA_device = str(hex(int(att_OSA_device, 16) + 1))[2:]
if active:
def_vdev = vdev
att_OSA_device = OSA_device
for i in range(3):
requestData = ' '.join((
'SMAPI %s API Image_Device_Dedicate' %
userid,
"--operands",
"-v %s" % def_vdev,
"-r %s" % att_OSA_device))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err:
LOG.error("Failed to dedicate OSA %s to nic %s for user "
"%s on the active guest system, error: %s" %
(att_OSA_device, def_vdev, userid,
err.format_message()))
# TODO revoke the dedicated OSA in user direct and active
detach_vdev = vdev
for j in range(3):
requestData = ' '.join((
'SMAPI %s API Image_Device_Undedicate_DM' %
userid,
"--operands",
"-v %s" % detach_vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err2:
if ((err2.results['rc'] == 404) and
(err2.results['rs'] == 8)):
pass
else:
LOG.error("Failed to Undedicate nic %s for "
"user %s in the guest's user "
"direct, error: %s" %
(def_vdev, userid,
err2.format_message()))
pass
detach_vdev = str(hex(int(detach_vdev, 16) + 1))[2:]
while (int(def_vdev, 16) != int(vdev, 16)):
def_vdev = str(hex(int(def_vdev, 16) - 1))[2:]
requestData = ' '.join((
'SMAPI %s API Image_Device_Undedicate' %
userid,
"--operands",
"-v %s" % def_vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err3:
if ((err3.results['rc'] == 204) and
(err3.results['rs'] == 8)):
pass
else:
LOG.error("Failed to Undedicate nic %s for "
"user %s on the active guest "
"system, error: %s" %
(def_vdev, userid,
err3.format_message()))
pass
self._dedicate_OSA_active_exception(err, userid,
OSA_device)
def_vdev = str(hex(int(def_vdev, 16) + 1))[2:]
att_OSA_device = str(hex(int(att_OSA_device, 16) + 1))[2:]
OSA_desc = 'OSA=%s' % OSA_device
self._NetDbOperator.switch_add_record(userid, vdev, comments=OSA_desc)
msg = ('Dedicate nic device %(vdev)s of guest %(vm)s '
'to OSA device %(osa)s successfully'
% {'vdev': vdev, 'vm': userid, 'osa': OSA_device})
LOG.info(msg)
def _undedicate_nic_active_exception(self, error, userid, vdev):
if ((error.results['rc'] == 204) and (error.results['rs'] == 44)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=16,
userid=userid, vdev=vdev,
msg=errmsg)
else:
raise error
def _undedicate_nic_inactive_exception(self, error, userid, vdev):
if ((error.results['rc'] == 400) and (error.results['rs'] == 12)):
obj_desc = "Guest %s" % userid
raise exception.SDKConflictError(modID='network', rs=17,
userid=userid, vdev=vdev,
obj=obj_desc)
else:
raise error
def _undedicate_nic(self, userid, vdev, active=False,
del_active_only=False):
if active:
self._is_active(userid)
msg = ('Start to undedicate nic device %(vdev)s of guest %(vm)s'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
if not del_active_only:
def_vdev = vdev
for i in range(3):
requestData = ' '.join((
'SMAPI %s API Image_Device_Undedicate_DM' %
userid,
"--operands",
"-v %s" % def_vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err:
results = err.results
emsg = err.format_message()
if ((results['rc'] == 404) and
(results['rs'] == 8)):
LOG.warning("Virtual device %s does not exist in "
"the guest's user direct", vdev)
else:
LOG.error("Failed to undedicate nic %s for %s in "
"the guest's user direct, error: %s" %
(vdev, userid, emsg))
self._undedicate_nic_inactive_exception(err, userid, vdev)
def_vdev = str(hex(int(def_vdev, 16) + 1))[2:]
self._NetDbOperator.switch_delete_record_for_nic(userid, vdev)
if active:
def_vdev = vdev
for i in range(3):
rd = ' '.join((
"SMAPI %s API Image_Device_Undedicate" %
userid,
"--operands",
'-v %s' % def_vdev))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
results = err.results
emsg = err.format_message()
if ((results['rc'] == 204) and
(results['rs'] == 8)):
LOG.warning("Virtual device %s does not exist on "
"the active guest system", vdev)
else:
LOG.error("Failed to undedicate nic %s for %s on "
"the active guest system, error: %s" %
(vdev, userid, emsg))
self._undedicate_nic_active_exception(err, userid,
vdev)
def_vdev = str(hex(int(def_vdev, 16) + 1))[2:]
msg = ('Undedicate nic device %(vdev)s of guest %(vm)s successfully'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
def _request_with_error_ignored(self, rd):
"""Send smt request, log and ignore any errors."""
try:
return self._request(rd)
except Exception as err:
# log as warning and ignore namelist operation failures
LOG.warning(six.text_type(err))
def namelist_add(self, namelist, userid):
rd = ''.join(("SMAPI %s API Name_List_Add " % namelist,
"--operands -n %s" % userid))
self._request_with_error_ignored(rd)
def namelist_remove(self, namelist, userid):
rd = ''.join(("SMAPI %s API Name_List_Remove " % namelist,
"--operands -n %s" % userid))
self._request_with_error_ignored(rd)
def namelist_query(self, namelist):
rd = "SMAPI %s API Name_List_Query" % namelist
resp = self._request_with_error_ignored(rd)
if resp is not None:
return resp['response']
else:
return []
def namelist_destroy(self, namelist):
rd = "SMAPI %s API Name_List_Destroy" % namelist
self._request_with_error_ignored(rd)
def _get_defined_cpu_addrs(self, userid):
user_direct = self.get_user_direct(userid)
defined_addrs = []
max_cpus = 0
for ent in user_direct:
if ent.startswith("CPU"):
cpu_addr = ent.split()[1].strip().upper()
defined_addrs.append(cpu_addr)
if ent.startswith("MACHINE ESA"):
max_cpus = int(ent.split()[2].strip())
return (max_cpus, defined_addrs)
def _get_available_cpu_addrs(self, used_addrs, max_cpus):
# Get available CPU addresses that are not defined in user entry
used_set = set(used_addrs)
available_addrs = set([hex(i)[2:].rjust(2, '0').upper()
for i in range(0, max_cpus)])
available_addrs.difference_update(used_set)
return list(available_addrs)
def get_active_cpu_addrs(self, userid):
# Get the active cpu addrs in two-digit hex string in upper case
# Sample output for 'lscpu --parse=ADDRESS':
# # The following is the parsable format, which can be fed to other
# # programs. Each different item in every column has an unique ID
# # starting from zero.
# # Address
# 0
# 1
active_addrs = []
active_cpus = self.execute_cmd(userid, "lscpu --parse=ADDRESS")
for c in active_cpus:
# Skip the comment lines at beginning
if c.startswith("# "):
continue
addr = hex(int(c.strip()))[2:].rjust(2, '0').upper()
active_addrs.append(addr)
return active_addrs
def resize_cpus(self, userid, count):
# Check defined cpus in user entry. If greater than requested, then
# delete cpus. Otherwise, add new cpus.
# Return value: for revert usage, a tuple of
# action: The action taken for this resize, possible values:
# 0: no action, 1: add cpu, 2: delete cpu
# cpu_addrs: list of influenced cpu addrs
action = 0
updated_addrs = []
(max_cpus, defined_addrs) = self._get_defined_cpu_addrs(userid)
defined_count = len(defined_addrs)
# Check maximum cpu count defined
if max_cpus == 0:
LOG.error("Resize for guest '%s' cann't be done. The maximum "
"number of cpus is not defined in user directory." %
userid)
raise exception.SDKConflictError(modID='guest', rs=3,
userid=userid)
# Check requested count is less than the maximum cpus
if count > max_cpus:
LOG.error("Resize for guest '%s' cann't be done. The "
"requested number of cpus: '%i' exceeds the maximum "
"number of cpus allowed: '%i'." %
(userid, count, max_cpus))
raise exception.SDKConflictError(modID='guest', rs=4,
userid=userid,
req=count, max=max_cpus)
# Check count and take action
if defined_count == count:
LOG.info("The number of current defined CPUs in user '%s' equals "
"to requested count: %i, no action for static resize"
"needed." % (userid, count))
return (action, updated_addrs, max_cpus)
elif defined_count < count:
action = 1
# add more CPUs
available_addrs = self._get_available_cpu_addrs(defined_addrs,
max_cpus)
# sort the list and get the first few addrs to use
available_addrs.sort()
# Define new cpus in user directory
rd = ''.join(("SMAPI %s API Image_Definition_Update_DM " % userid,
"--operands"))
updated_addrs = available_addrs[0:count - defined_count]
for addr in updated_addrs:
rd += (" -k CPU=CPUADDR=%s" % addr)
# Add resize support for share of CPU
if CONF.zvm.user_default_share_unit > 0:
total = CONF.zvm.user_default_share_unit * count
rd += (" -k SHARE=RELATIVE=%s" % total)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as e:
msg = ("Define new cpus in user directory for '%s' failed with"
" SMT error: %s" % (userid, e.format_message()))
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=6, userid=userid,
err=e.format_message())
LOG.info("New CPUs defined in user directory for '%s' "
"successfully" % userid)
return (action, updated_addrs, max_cpus)
else:
action = 2
# Delete CPUs
defined_addrs.sort()
updated_addrs = defined_addrs[-(defined_count - count):]
# Delete the last few cpus in user directory
rd = ''.join(("SMAPI %s API Image_Definition_Delete_DM " % userid,
"--operands"))
for addr in updated_addrs:
rd += (" -k CPU=CPUADDR=%s" % addr)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as e:
msg = ("Delete CPUs in user directory for '%s' failed with"
" SMT error: %s" % (userid, e.format_message()))
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=6, userid=userid,
err=e.format_message())
LOG.info("CPUs '%s' deleted from user directory for '%s' "
"successfully" % (str(updated_addrs), userid))
# Add resize support for share of CPU
if CONF.zvm.user_default_share_unit > 0:
total = CONF.zvm.user_default_share_unit * count
rd = ''.join(("SMAPI %s API Image_Definition_Update_DM " % userid,
"--operands -k SHARE=RELATIVE=%s" % total))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as e:
msg = ("Update share statement in user directory for '%s' failed with"
" SMT error: %s" % (userid, e.format_message()))
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=6, userid=userid,
err=e.format_message())
LOG.info("Update share statment in user directory for '%s' "
"successfully" % userid)
return (action, updated_addrs, max_cpus)
def live_resize_cpus(self, userid, count):
# Get active cpu count and compare with requested count
# If request count is smaller than the current count, then report
# error and exit immediately.
active_addrs = self.get_active_cpu_addrs(userid)
active_count = len(active_addrs)
if active_count > count:
LOG.error("Failed to live resize cpus of guest: %(uid)s, "
"current active cpu count: %(cur)i is greater than "
"the requested count: %(req)i." %
{'uid': userid, 'cur': active_count,
'req': count})
raise exception.SDKConflictError(modID='guest', rs=2,
userid=userid,
active=active_count,
req=count)
# Static resize CPUs. (add or delete CPUs from user directory)
(action, updated_addrs, max_cpus) = self.resize_cpus(userid, count)
if active_count == count:
# active count equals to requested
LOG.info("Current active cpu count of guest: '%s' equals to the "
"requested count: '%i', no more actions needed for "
"live resize." % (userid, count))
LOG.info("Live resize cpus for guest: '%s' finished successfully."
% userid)
return
else:
# Get the number of cpus to add to active and check address
active_free = self._get_available_cpu_addrs(active_addrs,
max_cpus)
active_free.sort()
active_new = active_free[0:count - active_count]
# Do live resize
# Define new cpus
cmd_str = "vmcp def cpu " + ' '.join(active_new)
try:
self.execute_cmd(userid, cmd_str)
except exception.SDKSMTRequestFailed as err1:
# rollback and return
msg1 = ("Define cpu of guest: '%s' to active failed with . "
"error: %s." % (userid, err1.format_message()))
# Start to do rollback
if action == 0:
LOG.error(msg1)
else:
LOG.error(msg1 + (" Will revert the user directory "
"change."))
# Combine influenced cpu addrs
cpu_entries = ""
for addr in updated_addrs:
cpu_entries += (" -k CPU=CPUADDR=%s" % addr)
rd = ''
if action == 1:
# Delete added CPUs
rd = ''.join(("SMAPI %s API Image_Definition_Delete_DM"
% userid, " --operands"))
else:
# Add deleted CPUs
rd = ''.join(("SMAPI %s API Image_Definition_Create_DM"
% userid, " --operands"))
rd += cpu_entries
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err2:
msg = ("Failed to revert user directory change for '"
"%s', SMT error: %s" % (userid,
err2.format_message()))
LOG.error(msg)
else:
# revert change for share statement
if CONF.zvm.user_default_share_unit > 0:
old = CONF.zvm.user_default_share_unit * active_count
rd = ''.join(("SMAPI %s API Image_Definition_Update_DM " % userid,
"--operands -k SHARE=RELATIVE=%s" % old))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as e:
msg = ("Failed to revert user directory change of share for '"
"%s', SMT error: %s" % (userid, e.format_message()))
LOG.error(msg)
else:
LOG.info("Revert user directory change for '%s' "
"successfully." % userid)
else:
LOG.info("Revert user directory change for '%s' "
"successfully." % userid)
# Finally raise the exception
raise exception.SDKGuestOperationError(
rs=7, userid=userid, err=err1.format_message())
# Activate successfully, rescan in Linux layer to hot-plug new cpus
LOG.info("Added new CPUs to active configuration of guest '%s'" %
userid)
try:
self.execute_cmd(userid, "chcpu -r")
except exception.SDKSMTRequestFailed as err:
msg = err.format_message()
LOG.error("Rescan cpus to hot-plug new defined cpus for guest: "
"'%s' failed with error: %s. No rollback is done and you"
"may need to check the status and restart the guest to "
"make the defined cpus online." % (userid, msg))
raise exception.SDKGuestOperationError(rs=8, userid=userid,
err=msg)
uname_out = self.execute_cmd(userid, "uname -a")
if uname_out and len(uname_out) >= 1:
distro = uname_out[0]
else:
distro = ''
if 'ubuntu' in distro or 'Ubuntu' in distro \
or 'UBUNTU' in distro:
try:
# need use chcpu -e <cpu-list> to make cpu online for Ubuntu
online_cmd = "chcpu -e " + ','.join(active_new)
self.execute_cmd(userid, online_cmd)
except exception.SDKSMTRequestFailed as err:
msg = err.format_message()
LOG.error("Enable cpus for guest: '%s' failed with error: %s. "
"No rollback is done and you may need to check the "
"status and restart the guest to make the defined "
"cpus online." % (userid, msg))
raise exception.SDKGuestOperationError(rs=15, userid=userid,
err=msg)
LOG.info("Live resize cpus for guest: '%s' finished successfully."
% userid)
def _get_defined_memory(self, userid):
user_direct = self.get_user_direct(userid)
defined_mem = max_mem = reserved_mem = -1
for ent in user_direct:
# u'USER userid password storage max privclass'
if ent.startswith("USER "):
fields = ent.split(' ')
if len(fields) < 6:
# This case should not exist if the target user
# is created by zcc and not updated manually by user
break
defined_mem = int(zvmutils.convert_to_mb(fields[3]))
max_mem = int(zvmutils.convert_to_mb(fields[4]))
# For legacy guests, the reserved memory may not be defined
if ent.startswith("COMMAND DEF STOR RESERVED"):
reserved_mem = int(zvmutils.convert_to_mb(ent.split(' ')[4]))
return (defined_mem, max_mem, reserved_mem, user_direct)
def _replace_user_direct(self, userid, user_entry):
# user_entry can be a list or a string
entry_str = ""
if isinstance(user_entry, list):
for ent in user_entry:
if ent == "":
# skip empty line
continue
else:
entry_str += (ent + '\n')
else:
entry_str = user_entry
tmp_folder = tempfile.mkdtemp()
tmp_user_direct = os.path.join(tmp_folder, userid)
with open(tmp_user_direct, 'w') as f:
f.write(entry_str)
rd = ''.join(("SMAPI %s API Image_Replace_DM " % userid,
"--operands ",
"-f %s" % tmp_user_direct))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err1:
msg = ("Replace definition of guest '%s' failed with "
"SMT error: %s." % (userid, err1.format_message()))
LOG.error(msg)
LOG.debug("Unlocking the user directory.")
rd = ("SMAPI %s API Image_Unlock_DM " % userid)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err2:
# ignore 'not locked' error
if ((err2.results['rc'] == 400) and (
err2.results['rs'] == 24)):
LOG.debug("Guest '%s' unlocked successfully." % userid)
pass
else:
# just print error and ignore this unlock error
msg = ("Unlock definition of guest '%s' failed "
"with SMT error: %s" %
(userid, err2.format_message()))
LOG.error(msg)
else:
LOG.debug("Guest '%s' unlocked successfully." % userid)
# at the end, raise the replace error for upper layer to handle
raise err1
finally:
self._pathutils.clean_temp_folder(tmp_folder)
def _lock_user_direct(self, userid):
rd = ("SMAPI %s API Image_Lock_DM " % userid)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as e:
# ignore the "already locked" error
if ((e.results['rc'] == 400) and (e.results['rs'] == 12)):
LOG.debug("Image is already unlocked.")
else:
msg = ("Lock definition of guest '%s' failed with"
" SMT error: %s" % (userid, e.format_message()))
LOG.error(msg)
raise e
def resize_memory(self, userid, memory):
# Check defined storage in user entry.
# Update STORAGE and RESERVED accordingly.
size = int(zvmutils.convert_to_mb(memory))
(defined_mem, max_mem, reserved_mem,
user_direct) = self._get_defined_memory(userid)
# Check max memory is properly defined
if max_mem == -1 or reserved_mem == -1:
LOG.error("Memory resize for guest '%s' cann't be done."
"Failed to get the defined/max/reserved memory size "
"from user directory." % userid)
raise exception.SDKConflictError(modID='guest', rs=19,
userid=userid)
action = 0
# Make sure requested size is less than the maximum memory size
if size > max_mem:
LOG.error("Memory resize for guest '%s' cann't be done. The "
"requested memory size: '%im' exceeds the maximum "
"size allowed: '%im'." %
(userid, size, max_mem))
raise exception.SDKConflictError(modID='guest', rs=20,
userid=userid,
req=size, max=max_mem)
# check if already satisfy request
if defined_mem == size:
LOG.info("The current defined memory size in user '%s' equals "
"to requested size: %im, no action for memory resize "
"needed." % (userid, size))
return (action, defined_mem, max_mem, user_direct)
else:
# set action to 1 to represent that revert need to be done when
# live resize failed.
action = 1
# get the new reserved memory size
new_reserved = max_mem - size
# get maximum reserved memory value
MAX_STOR_RESERVED = int(zvmutils.convert_to_mb(
CONF.zvm.user_default_max_reserved_memory))
# when new reserved memory value > the MAX_STOR_RESERVED,
# make is as the MAX_STOR_RESERVED value
if new_reserved > MAX_STOR_RESERVED:
new_reserved = MAX_STOR_RESERVED
# prepare the new user entry content
entry_str = ""
for ent in user_direct:
if ent == '':
# Avoid adding an empty line in the entry file
# otherwise Image_Replace_DM would return syntax error.
continue
new_ent = ""
if ent.startswith("USER "):
fields = ent.split(' ')
for i in range(len(fields)):
# update fields[3] to new defined size
if i != 3:
new_ent += (fields[i] + ' ')
else:
new_ent += (str(size) + 'M ')
# remove the last space
new_ent = new_ent.strip()
elif ent.startswith("COMMAND DEF STOR RESERVED"):
new_ent = ("COMMAND DEF STOR RESERVED %iM" % new_reserved)
else:
new_ent = ent
# append this new entry
entry_str += (new_ent + '\n')
# Lock and replace user definition with the new_entry content
try:
self._lock_user_direct(userid)
except exception.SDKSMTRequestFailed as e:
raise exception.SDKGuestOperationError(rs=9, userid=userid,
err=e.format_message())
LOG.debug("User directory Locked successfully for guest '%s' " %
userid)
# Replace user directory
try:
self._replace_user_direct(userid, entry_str)
except exception.SDKSMTRequestFailed as e:
raise exception.SDKGuestOperationError(rs=10,
userid=userid,
err=e.format_message())
# Finally return useful info
return (action, defined_mem, max_mem, user_direct)
def _revert_user_direct(self, userid, user_entry):
# user_entry can be a list or a string
try:
self._lock_user_direct(userid)
except exception.SDKSMTRequestFailed:
# print revert error and return
msg = ("Failed to revert user direct of guest '%s'." % userid)
LOG.error(msg)
return
LOG.debug("User directory Locked successfully for guest '%s'." %
userid)
# Replace user directory
try:
self._replace_user_direct(userid, user_entry)
except exception.SDKSMTRequestFailed:
msg = ("Failed to revert user direct of guest '%s'." % userid)
LOG.error(msg)
return
LOG.debug("User directory reverted successfully for guest '%s'." %
userid)
def _get_active_memory(self, userid):
# Return an integer value representing the active memory size in mb
output = self.execute_cmd(userid, "lsmem")
active_mem = 0
for e in output:
# cmd output contains line starts with "Total online memory",
# its format can be like:
# "Total online memory : 8192 MB"
# or
# "Total online memory: 8G"
# need handle both formats
if e.startswith("Total online memory"):
try:
# sample mem_info_str: "8192MB" or "8G"
mem_info_str = e.split(':')[1].replace(' ', '').upper()
# make mem_info as "8192M" or "8G"
if mem_info_str.endswith('B'):
mem_info = mem_info_str[:-1]
else:
mem_info = mem_info_str
active_mem = int(zvmutils.convert_to_mb(mem_info))
except (IndexError, ValueError, KeyError, TypeError) as e:
errmsg = ("Failed to get active storage size for guest: %s"
% userid)
LOG.error(errmsg + " with error: " + six.text_type(e))
raise exception.SDKInternalError(msg=errmsg)
break
return active_mem
def live_resize_memory(self, userid, memory):
# Get active memory size and compare with requested size
# If request size is smaller than the current size, then report
# error and exit immediately.
size = int(zvmutils.convert_to_mb(memory))
active_size = self._get_active_memory(userid)
if active_size > size:
LOG.error("Failed to live resize memory of guest: %(uid)s, "
"current active memory size: %(cur)im is greater than "
"the requested size: %(req)im." %
{'uid': userid, 'cur': active_size,
'req': size})
raise exception.SDKConflictError(modID='guest', rs=18,
userid=userid,
active=active_size,
req=size)
# get maximum reserved memory value
MAX_STOR_RESERVED = int(zvmutils.convert_to_mb(
CONF.zvm.user_default_max_reserved_memory))
# The maximum increased memory size in one live resizing can't
# exceed MAX_STOR_RESERVED
increase_size = size - active_size
if increase_size > MAX_STOR_RESERVED:
LOG.error("Live memory resize for guest '%s' cann't be done. "
"The memory size to be increased: '%im' is greater "
" than the maximum reserved memory size: '%im'." %
(userid, increase_size, MAX_STOR_RESERVED))
raise exception.SDKConflictError(modID='guest', rs=21,
userid=userid,
inc=increase_size,
max=MAX_STOR_RESERVED)
# Static resize memory. (increase/decrease memory from user directory)
(action, defined_mem, max_mem,
user_direct) = self.resize_memory(userid, memory)
# Compare active size and requested size, then update accordingly
if active_size == size:
# online memory already satisfied
LOG.info("Current active memory size of guest: '%s' equals to the "
"requested size: '%iM', no more actions needed for "
"live resize." % (userid, size))
LOG.info("Live resize memory for guest: '%s' finished "
"successfully." % userid)
return
else:
# Do live resize. update memory size
# Step1: Define new standby storage
cmd_str = ("vmcp def storage standby %sM" % increase_size)
try:
self.execute_cmd(userid, cmd_str)
except exception.SDKSMTRequestFailed as e:
# rollback and return
msg = ("Define standby memory of guest: '%s' failed with "
"error: %s." % (userid, e.format_message()))
LOG.error(msg)
# Start to do rollback
if action == 1:
LOG.debug("Start to revert user definition of guest '%s'."
% userid)
self._revert_user_direct(userid, user_direct)
# Finally, raise the error and exit
raise exception.SDKGuestOperationError(rs=11,
userid=userid,
err=e.format_message())
# Step 2: Online new memory
cmd_str = ("chmem -e %sM" % increase_size)
try:
self.execute_cmd(userid, cmd_str)
except exception.SDKSMTRequestFailed as err1:
# rollback and return
msg1 = ("Online memory of guest: '%s' failed with "
"error: %s." % (userid, err1.format_message()))
LOG.error(msg1)
# Start to do rollback
LOG.info("Start to do revert.")
LOG.debug("Reverting the standby memory.")
try:
self.execute_cmd(userid, "vmcp def storage standby 0M")
except exception.SDKSMTRequestFailed as err2:
# print revert error info and continue
msg2 = ("Revert standby memory of guest: '%s' failed with "
"error: %s." % (userid, err2.format_message()))
LOG.error(msg2)
# Continue to do the user directory change.
if action == 1:
LOG.debug("Reverting the user directory change of guest "
"'%s'." % userid)
self._revert_user_direct(userid, user_direct)
# Finally raise the exception
raise exception.SDKGuestOperationError(
rs=7, userid=userid, err=err1.format_message())
LOG.info("Live resize memory for guest: '%s' finished successfully."
% userid)
def is_rhcos(self, os_version):
return os_version.lower().startswith('rhcos')
def _get_wwpn_lun(self, userid):
user_direct = self.get_user_direct(userid)
wwpn = None
lun = None
for ent in user_direct:
if ent.upper().startswith("LOADDEV PORT"):
wwpn = ent.split()[2].strip()
elif ent.upper().startswith("LOADDEV LUN"):
lun = ent.split()[2].strip()
return (wwpn, lun)
def host_get_ssi_info(self):
msg = ('Start SSI_Query')
LOG.info(msg)
rd = 'SMAPI HYPERVISOR API SSI_Query'
try:
results = self._request(rd)
except exception.SDKSMTRequestFailed as err:
if err.results['rc'] == 4 and err.results['rs'] == 3008:
# System is not a member of an SSI cluster
LOG.debug("Host is not a member of an SSI cluster.")
return []
msg = "SMT error: %s" % err.format_message()
raise exception.SDKSMTRequestFailed(err.results, msg)
LOG.error("Failed to query SSI information.")
if results['rc'] == 0 and results['rs'] == 0 \
and results.get('response'):
return results.get('response')
return []
def guest_get_kernel_info(self, userid):
# Get the kernel info of 'uname -srm'
try:
kernel_info = self.execute_cmd(userid, "uname -srm")
return kernel_info[0]
except exception.SDKSMTRequestFailed as err:
msg = err.format_message()
LOG.error("Get kernel info from the guest %s failed: %s"
% (userid, msg))
return ''
class FilesystemBackend(object):
@classmethod
def image_import(cls, image_name, url, target, **kwargs):
"""Import image from remote host to local image repository using scp.
If remote_host not specified, it means the source file exist in local
file system, just copy the image to image repository
"""
source = urlparse.urlparse(url).path
if kwargs['remote_host']:
if '@' in kwargs['remote_host']:
source_path = ':'.join([kwargs['remote_host'], source])
command = ' '.join(['/usr/bin/scp',
"-P", CONF.zvm.remotehost_sshd_port,
"-o StrictHostKeyChecking=no",
'-r ', source_path, target])
(rc, output) = zvmutils.execute(command)
if rc:
msg = ("Copying image file from remote filesystem failed"
" with reason: %s" % output)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=10, err=output)
else:
msg = ("The specified remote_host %s format invalid" %
kwargs['remote_host'])
LOG.error(msg)
raise exception.SDKImageOperationError(rs=11,
rh=kwargs['remote_host'])
else:
LOG.debug("Remote_host not specified, will copy from local")
try:
shutil.copyfile(source, target)
except Exception as err:
msg = ("Import image from local file system failed"
" with reason %s" % six.text_type(err))
LOG.error(msg)
raise exception.SDKImageOperationError(rs=12,
err=six.text_type(err))
@classmethod
def image_export(cls, source_path, dest_url, **kwargs):
"""Export the specific image to remote host or local file system """
dest_path = urlparse.urlparse(dest_url).path
if kwargs['remote_host']:
target_path = ':'.join([kwargs['remote_host'], dest_path])
command = ' '.join(['/usr/bin/scp',
"-P", CONF.zvm.remotehost_sshd_port,
"-o StrictHostKeyChecking=no",
'-r ', source_path, target_path])
(rc, output) = zvmutils.execute(command)
if rc:
msg = ("Error happened when copying image file to remote "
"host with reason: %s" % output)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=21, msg=output)
else:
# Copy to local file system
LOG.debug("Remote_host not specified, will copy to local server")
try:
shutil.copyfile(source_path, dest_path)
except Exception as err:
msg = ("Export image from %(src)s to local file system"
" %(dest)s failed: %(err)s" %
{'src': source_path,
'dest': dest_path,
'err': six.text_type(err)})
LOG.error(msg)
raise exception.SDKImageOperationError(rs=22,
err=six.text_type(err))
class HTTPBackend(object):
@classmethod
def image_import(cls, image_name, url, target, **kwargs):
import_image = MultiThreadDownloader(image_name, url,
target)
import_image.run()
class MultiThreadDownloader(threading.Thread):
def __init__(self, image_name, url, target):
super(MultiThreadDownloader, self).__init__()
self.url = url
# Set thread number
self.threadnum = 8
r = requests.head(self.url)
# Get the size of the download resource
self.totalsize = int(r.headers['Content-Length'])
self.target = target
def handle_download_errors(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as err:
self.fd.close()
msg = ("Download image from http server failed: %s" %
six.text_type(err))
LOG.error(msg)
raise exception.SDKImageOperationError(rs=9,
err=six.text_type(err))
return wrapper
def get_range(self):
ranges = []
offset = int(self.totalsize / self.threadnum)
for i in range(self.threadnum):
if i == self.threadnum - 1:
ranges.append((i * offset, ''))
else:
# Get the process range for each thread
ranges.append((i * offset, (i + 1) * offset))
return ranges
def download(self, start, end):
headers = {'Range': 'Bytes=%s-%s' % (start, end),
'Accept-Encoding': '*'}
# Get the data
res = requests.get(self.url, headers=headers)
# seek to the right position for writing data
LOG.debug("Downloading file range %s:%s success" % (start, end))
with _LOCK:
self.fd.seek(start)
self.fd.write(res.content)
@handle_download_errors
def run(self):
self.fd = open(self.target, 'w')
thread_list = []
n = 0
for ran in self.get_range():
start, end = ran
LOG.debug('thread %d start:%s,end:%s' % (n, start, end))
n += 1
# Open thread
thread = threading.Thread(target=self.download, args=(start, end))
thread.start()
thread_list.append(thread)
for i in thread_list:
i.join()
LOG.info('Download %s success' % (self.name))
self.fd.close() | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmsdk/smtclient.py | smtclient.py |
import threading
import time
from zvmsdk import config
from zvmsdk import log
from zvmsdk import smtclient
from zvmsdk import utils as zvmutils
_MONITOR = None
CONF = config.CONF
LOG = log.LOG
def get_monitor():
global _MONITOR
if _MONITOR is None:
_MONITOR = ZVMMonitor()
return _MONITOR
class ZVMMonitor(object):
"""Monitor support for ZVM"""
_TYPES = ('cpumem', 'vnics')
def __init__(self):
self._cache = MeteringCache(self._TYPES)
self._smtclient = smtclient.get_smtclient()
self._namelist = zvmutils.get_namelist()
def inspect_stats(self, uid_list):
cpumem_data = self._get_inspect_data('cpumem', uid_list)
# construct and return final result
stats_data = {}
for uid in uid_list:
if uid in cpumem_data:
with zvmutils.expect_invalid_resp_data():
user_data = cpumem_data[uid]
guest_cpus = int(user_data['guest_cpus'])
used_cpu_time = user_data['used_cpu_time']
used_cpu_time = int(used_cpu_time.partition(' ')[0])
elapsed_cpu_time = int(
user_data['elapsed_cpu_time'].partition(' ')[0])
used_mem = int(user_data['used_memory'].partition(' ')[0])
max_mem = int(user_data['max_memory'].partition(' ')[0])
min_mem = int(user_data['min_memory'].partition(' ')[0])
shared_mem = int(
user_data['shared_memory'].partition(' ')[0])
stats_data[uid] = {
'guest_cpus': guest_cpus,
'used_cpu_time_us': used_cpu_time,
'elapsed_cpu_time_us': elapsed_cpu_time,
'min_cpu_count': int(user_data['min_cpu_count']),
'max_cpu_limit': int(user_data['max_cpu_limit']),
'samples_cpu_in_use': int(user_data['samples_cpu_in_use']),
'samples_cpu_delay': int(user_data['samples_cpu_delay']),
'used_mem_kb': used_mem,
'max_mem_kb': max_mem,
'min_mem_kb': min_mem,
'shared_mem_kb': shared_mem
}
return stats_data
def inspect_vnics(self, uid_list):
vnics = self._get_inspect_data('vnics', uid_list)
# construct and return final result
target_vnics = {}
for uid in uid_list:
if uid in vnics:
with zvmutils.expect_invalid_resp_data():
target_vnics[uid] = vnics[uid]
return target_vnics
def _cache_enabled(self):
return CONF.monitor.cache_interval > 0
def _get_inspect_data(self, type, uid_list):
inspect_data = {}
update_needed = False
for uid in uid_list:
if not zvmutils.valid_userid(uid):
continue
cache_data = self._cache.get(type, uid)
if cache_data is not None:
inspect_data[uid] = cache_data
else:
if self._smtclient.get_power_state(uid) == 'on':
update_needed = True
inspect_data = {}
break
# If all data are found in cache, just return
if not update_needed:
return inspect_data
# Call client to query latest data
rdata = {}
if type == 'cpumem':
rdata = self._update_cpumem_data(uid_list)
elif type == 'vnics':
rdata = self._update_nic_data()
return rdata
def _update_cpumem_data(self, uid_list):
namelist_uids = self._smtclient.namelist_query(self._namelist)
sdk_managed_uids = self._smtclient.get_vm_list()
mis_uids = list((set(uid_list) -
set(namelist_uids)).intersection(set(sdk_managed_uids)))
for muid in mis_uids:
self._smtclient.namelist_add(self._namelist, muid)
rdata = {}
if self._cache_enabled():
rdata = self._smtclient.system_image_performance_query(
self._namelist)
self._cache.refresh('cpumem', rdata)
else:
rdata = self._smtclient.system_image_performance_query(
self._namelist)
return rdata
def _update_nic_data(self):
nics = {}
vsw_dict = self._smtclient.virtual_network_vswitch_query_byte_stats()
with zvmutils.expect_invalid_resp_data():
for vsw in vsw_dict['vswitches']:
for nic in vsw['nics']:
userid = nic['userid']
nic_entry = {
'vswitch_name': vsw['vswitch_name'],
'nic_vdev': nic['vdev'],
'nic_fr_rx': int(nic['nic_fr_rx']),
'nic_fr_tx': int(nic['nic_fr_tx']),
'nic_fr_rx_dsc': int(nic['nic_fr_rx_dsc']),
'nic_fr_tx_dsc': int(nic['nic_fr_tx_dsc']),
'nic_fr_rx_err': int(nic['nic_fr_rx_err']),
'nic_fr_tx_err': int(nic['nic_fr_tx_err']),
'nic_rx': int(nic['nic_rx']),
'nic_tx': int(nic['nic_tx'])}
if nics.get(userid, None) is None:
nics[userid] = [nic_entry]
else:
nics[userid].append(nic_entry)
# Update cache if enabled
if self._cache_enabled():
self._cache.refresh('vnics', nics)
return nics
class MeteringCache(object):
"""Cache for metering data."""
def __init__(self, types):
self._cache = {}
self._types = types
self._lock = threading.RLock()
self._reset(types)
def _reset(self, types):
with zvmutils.acquire_lock(self._lock):
for type in types:
self._cache[type] = {'expiration': time.time(),
'data': {},
}
def _get_ctype_cache(self, ctype):
return self._cache[ctype]
def set(self, ctype, key, data):
"""Set or update cache content.
:param ctype: cache type
:param key: the key to be set value
:param data: cache data
"""
with zvmutils.acquire_lock(self._lock):
target_cache = self._get_ctype_cache(ctype)
target_cache['data'][key] = data
def get(self, ctype, key):
with zvmutils.acquire_lock(self._lock):
target_cache = self._get_ctype_cache(ctype)
if (time.time() > target_cache['expiration']):
return None
else:
return target_cache['data'].get(key, None)
def delete(self, ctype, key):
with zvmutils.acquire_lock(self._lock):
target_cache = self._get_ctype_cache(ctype)
if key in target_cache['data']:
del target_cache['data'][key]
def clear(self, ctype='all'):
with zvmutils.acquire_lock(self._lock):
if ctype == 'all':
self._reset()
else:
target_cache = self._get_ctype_cache(ctype)
target_cache['data'] = {}
def refresh(self, ctype, data):
with zvmutils.acquire_lock(self._lock):
self.clear(ctype)
target_cache = self._get_ctype_cache(ctype)
target_cache['expiration'] = (time.time() +
float(CONF.monitor.cache_interval))
for (k, v) in data.items():
self.set(ctype, k, v) | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmsdk/monitor.py | monitor.py |
import os
import dist
import tarfile
import shutil
import stat
from zvmsdk import config
CONF = config.CONF
_DEFAULT_MODE = stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO
def _generate_vdev(base, offset):
"""Generate virtual device number based on base vdev
:param base: base virtual device number, string of 4 bit hex.
:param offset: offset to base, integer.
"""
vdev = hex(int(base, 16) + offset)[2:]
return vdev.rjust(4, '0')
def get_cfg_str(network_interface_info, os_version):
ip_v4 = network_interface_info['ip_addr']
address_read = network_interface_info['nic_vdev']
broadcast_v4 = network_interface_info['broadcast_v4']
gateway_v4 = network_interface_info['gateway_v4']
netmask_v4 = network_interface_info['netmask_v4']
nic_vdev = network_interface_info['nic_vdev']
subchannels = ','.join(('0.0.' + nic_vdev,
'0.0.' + _generate_vdev(nic_vdev, 1),
'0.0.' + _generate_vdev(nic_vdev, 2)))
linuxdist = dist.LinuxDistManager().get_linux_dist(os_version)()
device_num = 0
device_name = linuxdist.get_device_name(device_num)
cfg_str = 'DEVICE=' + device_name + '\n'
cfg_str += 'BOOTPROTO=static\n'
cfg_str += 'BROADCAST=' + broadcast_v4 + '\n'
cfg_str += 'GATEWAY=' + gateway_v4 + '\n'
cfg_str += 'IPADDR=' + ip_v4 + '\n'
cfg_str += 'NETMASK=' + netmask_v4 + '\n'
cfg_str += 'NETTYPE=qeth\n'
cfg_str += 'ONBOOT=yes\n'
cfg_str += 'PORTNAME=PORT' + address_read + '\n'
cfg_str += 'OPTIONS=\"layer2=1\"\n'
cfg_str += 'SUBCHANNELS=' + subchannels + '\n'
return cfg_str
def generate_net_file(network_interface_info, net_file_path, os_version):
cfg_str = get_cfg_str(network_interface_info, os_version)
generate_file(cfg_str, net_file_path)
def get_znetconfig_str(os_version):
linuxdist = dist.LinuxDistManager().get_linux_dist(os_version)()
udev_settle = linuxdist.get_znetconfig_contents()
znetconfig = '\n'.join(('# !/bin/sh', udev_settle))
znetconfig += '\nrm -rf /tmp/znetconfig.sh\n'
return znetconfig
def generate_znetconfig_file(znetconfig_path, os_version):
znetconfig = get_znetconfig_str(os_version)
generate_file(znetconfig, znetconfig_path)
def get_meta_data_str():
meta_data = '{\"files\":[{\"path\":' +\
'\"/etc/sysconfig/network-scripts/ifcfg-enccw0.0.1000\", '
meta_data += '\"content_path\": \"/content/0000\"},' +\
'{\"path\": \"/tmp/znetconfig.sh\", \"content_path\":' +\
' \"/content/0001\"}], '
meta_data += '\"uuid\": \"4ec7a80d-201a-4c17-afbc-b0a93b66133b\", '
meta_data += '\"availability_zone\": \"nova\", '
meta_data += '\"hostname\": \"eckdrh72.5.novalocal\", '
meta_data += '\"launch_index\": 0, '
meta_data += '\"project_id\": \"94f8dc6644f24785a1383959dbba3f9e\", '
meta_data += '\"name\": \"eckdrh72.5\"}'
return meta_data
def generate_meta_data(meta_data_path):
meta_data = get_meta_data_str()
generate_file(meta_data, meta_data_path)
def generate_file(file_content, path):
f = open(path, 'w')
f.write(file_content)
f.close()
def create_config_drive(network_interface_info, os_version):
"""Generate config driver for zVM guest vm.
:param dict network_interface_info: Required keys:
ip_addr - (str) IP address
nic_vdev - (str) VDEV of the nic
gateway_v4 - IPV4 gateway
broadcast_v4 - IPV4 broadcast address
netmask_v4 - IPV4 netmask
:param str os_version: operating system version of the guest
"""
temp_path = CONF.guest.temp_path
if not os.path.exists(temp_path):
os.mkdir(temp_path)
cfg_dir = os.path.join(temp_path, 'openstack')
if os.path.exists(cfg_dir):
shutil.rmtree(cfg_dir)
content_dir = os.path.join(cfg_dir, 'content')
latest_dir = os.path.join(cfg_dir, 'latest')
os.mkdir(cfg_dir)
os.mkdir(content_dir)
os.mkdir(latest_dir)
net_file = os.path.join(content_dir, '0000')
generate_net_file(network_interface_info, net_file, os_version)
znetconfig_file = os.path.join(content_dir, '0001')
generate_znetconfig_file(znetconfig_file, os_version)
meta_data_path = os.path.join(latest_dir, 'meta_data.json')
generate_meta_data(meta_data_path)
network_data_path = os.path.join(latest_dir, 'network_data.json')
generate_file('{}', network_data_path)
vendor_data_path = os.path.join(latest_dir, 'vendor_data.json')
generate_file('{}', vendor_data_path)
tar_path = os.path.join(temp_path, 'cfgdrive.tgz')
tar = tarfile.open(tar_path, "w:gz")
os.chdir(temp_path)
tar.add('openstack')
tar.close()
return tar_path | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmsdk/configdrive.py | configdrive.py |
import abc
import netaddr
import os
import six
from jinja2 import Environment, FileSystemLoader
from zvmsdk import config
from zvmsdk import exception
from zvmsdk import log
from zvmsdk import smtclient
CONF = config.CONF
LOG = log.LOG
@six.add_metaclass(abc.ABCMeta)
class LinuxDist(object):
"""Linux distribution base class
Due to we need to interact with linux dist and inject different files
according to the dist version. Currently RHEL6, RHEL7, SLES11, SLES12
, UBUNTU16 and RHCOS4 are supported.
"""
def __init__(self):
self._smtclient = smtclient.get_smtclient()
def create_network_configuration_files(self, file_path, guest_networks,
first, active=False):
"""Generate network configuration files for guest vm
:param list guest_networks: a list of network info for the guest.
It has one dictionary that contain some of the below keys for
each network, the format is:
{'ip_addr': (str) IP address,
'dns_addr': (list) dns addresses,
'gateway_addr': (str) gateway address,
'cidr': (str) cidr format
'nic_vdev': (str) VDEV of the nic}
Example for guest_networks:
[{'ip_addr': '192.168.95.10',
'dns_addr': ['9.0.2.1', '9.0.3.1'],
'gateway_addr': '192.168.95.1',
'cidr': "192.168.95.0/24",
'nic_vdev': '1000'},
{'ip_addr': '192.168.96.10',
'dns_addr': ['9.0.2.1', '9.0.3.1'],
'gateway_addr': '192.168.96.1',
'cidr': "192.168.96.0/24",
'nic_vdev': '1003}]
:returns cfg_files: the network interface configuration file name
and file content
cmd_strings: shell command, helps to enable the network
interface, will be put into znetconfig file
clean_cmd: if first is true, it is used to erase the previous
network interface configuration, will be put into
invokeScript file
net_enable_cmd: 'ip addr' and 'ip link' command to enable the
new network interface
"""
cfg_files = []
cmd_strings = ''
udev_cfg_str = ''
dns_cfg_str = ''
route_cfg_str = ''
net_enable_cmd = ''
cmd_str = None
file_path = self._get_network_file_path()
file_name_route = file_path + 'routes'
if first:
clean_cmd = self._get_clean_command()
else:
clean_cmd = ''
file_name_dns = self._get_dns_filename()
for network in guest_networks:
base_vdev = network['nic_vdev'].lower()
file_name = self._get_device_filename(base_vdev)
(cfg_str, cmd_str, dns_str,
route_str, net_cmd) = self._generate_network_configuration(
network,
base_vdev, active=active)
LOG.debug('Network configure file content is: %s', cfg_str)
target_net_conf_file_name = file_path + file_name
cfg_files.append((target_net_conf_file_name, cfg_str))
udev_cfg_str += self._get_udev_configuration(base_vdev,
'0.0.' + str(base_vdev).zfill(4))
self._append_udev_rules_file(cfg_files, base_vdev)
if cmd_str is not None:
cmd_strings += cmd_str
if net_cmd is not None:
net_enable_cmd += net_cmd
if len(dns_str) > 0:
dns_cfg_str += dns_str
if len(route_str) > 0:
route_cfg_str += route_str
if len(dns_cfg_str) > 0:
cfg_files.append((file_name_dns, dns_cfg_str))
cmd_strings = self._append_udev_info(cmd_strings, cfg_files,
file_name_route,
route_cfg_str,
udev_cfg_str, first)
return cfg_files, cmd_strings, clean_cmd, net_enable_cmd
def _generate_network_configuration(self, network, vdev, active=False):
ip_v4 = dns_str = gateway_v4 = ''
ip_cidr = netmask_v4 = broadcast_v4 = ''
net_cmd = mtu = ''
dns_v4 = []
if (('ip_addr' in network.keys()) and
(network['ip_addr'] is not None)):
ip_v4 = network['ip_addr']
if (('gateway_addr' in network.keys()) and
(network['gateway_addr'] is not None)):
gateway_v4 = network['gateway_addr']
if (('dns_addr' in network.keys()) and
(network['dns_addr'] is not None) and
(len(network['dns_addr']) > 0)):
for dns in network['dns_addr']:
dns_str += 'nameserver ' + dns + '\n'
dns_v4.append(dns)
if (('cidr' in network.keys()) and
(network['cidr'] is not None)):
ip_cidr = network['cidr']
netmask_v4 = str(netaddr.IPNetwork(ip_cidr).netmask)
broadcast_v4 = str(netaddr.IPNetwork(ip_cidr).broadcast)
if broadcast_v4 == 'None':
broadcast_v4 = ''
if (('mtu' in network.keys()) and
(network['mtu'] is not None)):
mtu = str(network['mtu'])
device = self._get_device_name(vdev)
address_read = str(vdev).zfill(4)
address_write = str(hex(int(vdev, 16) + 1))[2:].zfill(4)
address_data = str(hex(int(vdev, 16) + 2))[2:].zfill(4)
subchannels = '0.0.%s' % address_read.lower()
subchannels += ',0.0.%s' % address_write.lower()
subchannels += ',0.0.%s' % address_data.lower()
cfg_str = self._get_cfg_str(device, broadcast_v4, gateway_v4,
ip_v4, netmask_v4, address_read,
subchannels, dns_v4, mtu)
cmd_str = self._get_cmd_str(address_read, address_write,
address_data)
route_str = self._get_route_str(gateway_v4)
if active and ip_v4 != '':
if ip_cidr != '':
mask = ip_cidr.rpartition('/')[2]
else:
mask = '32'
full_ip = '%s/%s' % (ip_v4, mask)
net_cmd = self._enable_network_interface(device, full_ip,
broadcast_v4)
return cfg_str, cmd_str, dns_str, route_str, net_cmd
def get_simple_znetconfig_contents(self):
return '\n'.join(('cio_ignore -R',
'znetconf -A',
'cio_ignore -u'))
def get_device_name(self, vdev):
return self._get_device_name(vdev)
def get_network_configuration_files(self, vdev):
vdev = vdev.lower()
file_path = self._get_network_file_path()
device = self._get_device_filename(vdev)
target_net_conf_file_name = os.path.join(file_path, device)
return target_net_conf_file_name
def delete_vdev_info(self, vdev):
cmd = self._delete_vdev_info(vdev)
return cmd
@abc.abstractmethod
def _get_network_file_path(self):
"""Get network file configuration path."""
pass
def get_change_passwd_command(self, admin_password):
"""construct change password command
:admin_password: the password to be changed to
"""
return "echo 'root:%s' | chpasswd" % admin_password
@abc.abstractmethod
def get_volume_attach_configuration_cmds(self, fcp_list, target_wwpns,
target_lun, multipath,
mount_point):
"generate punch script for attachment configuration"
pass
@abc.abstractmethod
def get_volume_detach_configuration_cmds(self, fcp_list, target_wwpns,
target_lun, multipath,
mount_point, connections):
"generate punch script for detachment configuration"
pass
@abc.abstractmethod
def _get_cfg_str(self, device, broadcast_v4, gateway_v4, ip_v4,
netmask_v4, address_read, subchannels):
"""construct configuration file of network device."""
pass
@abc.abstractmethod
def _get_device_filename(self, vdev):
"""construct the name of a network device file."""
pass
@abc.abstractmethod
def _get_route_str(self, gateway_v4):
"""construct a router string."""
pass
@abc.abstractmethod
def _enable_network_interface(self, device, ip, broadcast):
"""construct a router string."""
pass
@abc.abstractmethod
def _get_clean_command(self):
"""construct a clean command to remove."""
pass
@abc.abstractmethod
def _get_cmd_str(self, address_read, address_write, address_data):
"""construct network startup command string."""
pass
@abc.abstractmethod
def _get_dns_filename(self):
"""construct the name of dns file."""
pass
@abc.abstractmethod
def get_znetconfig_contents(self):
"""construct znetconfig file will be called during first boot."""
pass
@abc.abstractmethod
def _get_device_name(self, vdev):
"""construct the name of a network device."""
pass
@abc.abstractmethod
def _get_udev_configuration(self, device, dev_channel):
"""construct udev configuration info."""
pass
@abc.abstractmethod
def _get_udev_rules(self, channel_read, channel_write, channel_data):
"""construct udev rules info."""
pass
@abc.abstractmethod
def _append_udev_info(self, cmd_str, cfg_files, file_name_route,
route_cfg_str, udev_cfg_str, first=False):
return cmd_str
@abc.abstractmethod
def _append_udev_rules_file(self, cfg_files, base_vdev):
pass
@abc.abstractmethod
def get_scp_string(self, root, fcp, wwpn, lun):
"""construct scp_data string for ipl parameter"""
pass
@abc.abstractmethod
def get_zipl_script_lines(self, image, ramdisk, root, fcp, wwpn, lun):
"""construct the lines composing the script to generate
the /etc/zipl.conf file
"""
pass
@abc.abstractmethod
def create_active_net_interf_cmd(self):
"""construct active command which will initialize and configure vm."""
pass
@abc.abstractmethod
def _delete_vdev_info(self, vdev):
"""delete udev rules file."""
pass
def generate_set_hostname_script(self, hostname):
lines = ['#!/bin/bash\n',
'echo -n %s > /etc/hostname\n' % hostname,
'/bin/hostname %s\n' % hostname]
return lines
def get_template(self, module, template_name):
relative_path = module + "/templates"
base_path = os.path.dirname(os.path.abspath(__file__))
template_file_path = os.path.join(base_path, relative_path,
template_name)
template_file_directory = os.path.dirname(template_file_path)
template_loader = FileSystemLoader(searchpath=template_file_directory)
env = Environment(loader=template_loader)
template = env.get_template(template_name)
return template
def get_extend_partition_cmds(self):
template = self.get_template("vmactions", "grow_root_volume.j2")
content = template.render()
return content
class rhel(LinuxDist):
def _get_network_file_path(self):
return '/etc/sysconfig/network-scripts/'
def _get_cfg_str(self, device, broadcast_v4, gateway_v4, ip_v4,
netmask_v4, address_read, subchannels, dns_v4, mtu):
cfg_str = 'DEVICE=\"' + device + '\"\n'
cfg_str += 'BOOTPROTO=\"static\"\n'
cfg_str += 'BROADCAST=\"' + broadcast_v4 + '\"\n'
cfg_str += 'GATEWAY=\"' + gateway_v4 + '\"\n'
cfg_str += 'IPADDR=\"' + ip_v4 + '\"\n'
cfg_str += 'NETMASK=\"' + netmask_v4 + '\"\n'
cfg_str += 'NETTYPE=\"qeth\"\n'
cfg_str += 'ONBOOT=\"yes\"\n'
cfg_str += 'PORTNAME=\"PORT' + address_read + '\"\n'
cfg_str += 'OPTIONS=\"layer2=1\"\n'
cfg_str += 'SUBCHANNELS=\"' + subchannels + '\"\n'
cfg_str += 'MTU=\"' + mtu + '\"\n'
if (dns_v4 is not None) and (len(dns_v4) > 0):
i = 1
for dns in dns_v4:
cfg_str += 'DNS' + str(i) + '=\"' + dns + '\"\n'
i += 1
return cfg_str
def _get_route_str(self, gateway_v4):
return ''
def _get_cmd_str(self, address_read, address_write, address_data):
return ''
def _get_dns_filename(self):
return '/etc/resolv.conf'
def _get_device_name(self, vdev):
return 'eth' + str(vdev).zfill(4)
def _get_udev_configuration(self, device, dev_channel):
return ''
def _append_udev_info(self, cmd_str, cfg_files, file_name_route,
route_cfg_str, udev_cfg_str, first=False):
return cmd_str
def _get_udev_rules(self, channel_read, channel_write, channel_data):
"""construct udev rules info."""
return ''
def _append_udev_rules_file(self, cfg_files, base_vdev):
pass
def _enable_network_interface(self, device, ip, broadcast):
return ''
def _delete_vdev_info(self, vdev):
return ''
class rhel6(rhel):
def get_znetconfig_contents(self):
return '\n'.join(('cio_ignore -R',
'znetconf -R -n',
'udevadm trigger',
'udevadm settle',
'sleep 2',
'znetconf -A',
'service network restart',
'cio_ignore -u'))
def _get_device_filename(self, vdev):
return 'ifcfg-eth' + str(vdev).zfill(4)
def _get_all_device_filename(self):
return 'ifcfg-eth*'
def _get_device_name(self, vdev):
return 'eth' + str(vdev).zfill(4)
def get_scp_string(self, root, fcp, wwpn, lun):
return ("=root=%(root)s selinux=0 "
"rd_ZFCP=0.0.%(fcp)s,0x%(wwpn)s,0x%(lun)s") % {
'root': root, 'fcp': fcp, 'wwpn': wwpn, 'lun': lun}
def get_zipl_script_lines(self, image, ramdisk, root, fcp, wwpn, lun):
return ['#!/bin/bash\n',
('echo -e "[defaultboot]\\n'
'timeout=5\\n'
'default=boot-from-volume\\n'
'target=/boot/\\n'
'[boot-from-volume]\\n'
'image=%(image)s\\n'
'ramdisk=%(ramdisk)s\\n'
'parameters=\\"root=%(root)s '
'rd_ZFCP=0.0.%(fcp)s,0x%(wwpn)s,0x%(lun)s selinux=0\\""'
'>/etc/zipl_volume.conf\n'
'zipl -c /etc/zipl_volume.conf')
% {'image': image, 'ramdisk': ramdisk, 'root': root,
'fcp': fcp, 'wwpn': wwpn, 'lun': lun}]
def create_active_net_interf_cmd(self):
return 'service zvmguestconfigure start'
def _get_clean_command(self):
files = os.path.join(self._get_network_file_path(),
self._get_all_device_filename())
return '\nrm -f %s\n' % files
def generate_set_hostname_script(self, hostname):
lines = ['#!/bin/bash\n',
'sed -i "s/^HOSTNAME=.*/HOSTNAME=%s/" '
'/etc/sysconfig/network\n' % hostname,
'/bin/hostname %s\n' % hostname]
return lines
def get_volume_attach_configuration_cmds(self, fcp_list, target_wwpns,
target_lun, multipath,
mount_point):
"generate punch script for attachment configuration"
func_name = 'get_volume_attach_configuration_cmds'
raise exception.SDKFunctionNotImplementError(func=func_name,
modID='volume')
def get_volume_detach_configuration_cmds(self, fcp_list, target_wwpns,
target_lun, multipath,
mount_point, connections):
"generate punch script for detachment configuration"
func_name = 'get_volume_detach_configuration_cmds'
raise exception.SDKFunctionNotImplementError(func=func_name,
modID='volume')
class rhel7(rhel):
def get_znetconfig_contents(self):
return '\n'.join(('cio_ignore -R',
'znetconf -R -n',
'udevadm trigger',
'udevadm settle',
'sleep 2',
'znetconf -A',
'cio_ignore -u'))
def _get_device_filename(self, vdev):
# Construct a device like ifcfg-enccw0.0.1000, ifcfg-enccw0.0.1003
return 'ifcfg-enccw0.0.' + str(vdev).zfill(4)
def _get_all_device_filename(self):
return 'ifcfg-enccw0.0.*'
def _get_device_name(self, vdev):
# Construct a device like enccw0.0.1000, enccw0.0.1003
return 'enccw0.0.' + str(vdev).zfill(4)
def get_scp_string(self, root, fcp, wwpn, lun):
return ("=root=%(root)s selinux=0 zfcp.allow_lun_scan=0 "
"rd.zfcp=0.0.%(fcp)s,0x%(wwpn)s,0x%(lun)s") % {
'root': root, 'fcp': fcp, 'wwpn': wwpn, 'lun': lun}
def get_zipl_script_lines(self, image, ramdisk, root, fcp, wwpn, lun):
return ['#!/bin/bash\n',
('echo -e "[defaultboot]\\n'
'timeout=5\\n'
'default=boot-from-volume\\n'
'target=/boot/\\n'
'[boot-from-volume]\\n'
'image=%(image)s\\n'
'ramdisk=%(ramdisk)s\\n'
'parameters=\\"root=%(root)s '
'rd.zfcp=0.0.%(fcp)s,0x%(wwpn)s,0x%(lun)s '
'zfcp.allow_lun_scan=0 selinux=0\\""'
'>/etc/zipl_volume.conf\n'
'zipl -c /etc/zipl_volume.conf')
% {'image': image, 'ramdisk': ramdisk, 'root': root,
'fcp': fcp, 'wwpn': wwpn, 'lun': lun}]
def _enable_network_interface(self, device, ip, broadcast):
if len(broadcast) > 0:
activeIP_str = 'ip addr add %s broadcast %s dev %s\n' % (ip,
broadcast, device)
else:
activeIP_str = 'ip addr add %s dev %s\n' % (ip, device)
activeIP_str += 'ip link set dev %s up\n' % device
return activeIP_str
def create_active_net_interf_cmd(self):
return 'systemctl start zvmguestconfigure.service'
def _get_clean_command(self):
files = os.path.join(self._get_network_file_path(),
self._get_all_device_filename())
return '\nrm -f %s\n' % files
def get_volume_attach_configuration_cmds(self, fcp_list, target_wwpns,
target_lun, multipath,
mount_point):
"""rhel7"""
template = self.get_template("volumeops", "rhel7_attach_volume.j2")
target_filename = mount_point.replace('/dev/', '')
content = template.render(fcp_list=fcp_list,
wwpns=target_wwpns,
lun=target_lun,
target_filename=target_filename)
return content
def get_volume_detach_configuration_cmds(self, fcp_list, target_wwpns,
target_lun, multipath,
mount_point, connections):
"""rhel7"""
if connections > 0:
# if this volume is the last volume
# we need to know it and offline the FCP devices
is_last_volume = 0
else:
is_last_volume = 1
template = self.get_template("volumeops", "rhel7_detach_volume.j2")
target_filename = mount_point.replace('/dev/', '')
content = template.render(fcp_list=fcp_list,
wwpns=target_wwpns,
lun=target_lun,
target_filename=target_filename,
is_last_volume=is_last_volume)
return content
class rhel8(rhel7):
"""docstring for rhel8"""
def _get_device_filename(self, vdev):
return 'ifcfg-enc' + str(vdev).zfill(4)
def _get_all_device_filename(self):
return 'ifcfg-enc*'
def _get_device_name(self, vdev):
# Construct a device like enc1000
return 'enc' + str(vdev).zfill(4)
def _get_clean_command(self):
files = os.path.join(self._get_network_file_path(),
self._get_all_device_filename())
return '\nrm -f %s\n' % files
def get_volume_attach_configuration_cmds(self, fcp_list, target_wwpns,
target_lun, multipath,
mount_point):
"""rhel8 attach script generation"""
template = self.get_template("volumeops", "rhel8_attach_volume.j2")
target_filename = mount_point.replace('/dev/', '')
content = template.render(fcp_list=fcp_list,
wwpns=target_wwpns,
lun=target_lun,
target_filename=target_filename)
return content
def get_volume_detach_configuration_cmds(self, fcp_list, target_wwpns,
target_lun, multipath,
mount_point, connections):
"""rhel8 detach script generation"""
if connections > 0:
# if this volume is the last volume
# we need to know it and offline the FCP devices
is_last_volume = 0
else:
is_last_volume = 1
template = self.get_template("volumeops", "rhel8_detach_volume.j2")
target_filename = mount_point.replace('/dev/', '')
content = template.render(fcp_list=fcp_list,
wwpns=target_wwpns,
lun=target_lun,
target_filename=target_filename,
is_last_volume=is_last_volume)
return content
class rhel9(rhel8):
pass
class rhcos(LinuxDist):
def create_coreos_parameter(self, network_info, userid=''):
try:
# TODO: fix the limitation that assuming the first nic configured
vif = network_info[0]
ip_addr = vif['ip_addr']
gateway_addr = vif['gateway_addr']
netmask = vif['cidr'].split("/")[-1]
nic_name = "enc" + vif.get('nic_vdev', CONF.zvm.default_nic_vdev)
hostname = vif.get('hostname', userid) or "localhost"
# update dns name server info if they're defined in subnet
_dns = ["", ""]
if 'dns_addr' in vif.keys():
if ((vif['dns_addr'] is not None) and
(len(vif['dns_addr']) > 0)):
_index = 0
for dns in vif['dns_addr']:
_dns[_index] = dns
_index += 1
mtu = vif['mtu']
# transfor network info and hostname into form of
# ip=<client-IP>:[<peer>]:<gateway-IP>:<netmask>:<client_hostname>
# :<interface>:none[:[<dns1>][:<dns2>]];<mtu>
result = "%s::%s:%s:%s:%s:none:%s:%s;%s" % (ip_addr, gateway_addr,
netmask, hostname, nic_name, _dns[0], _dns[1], mtu)
return result
except Exception as err:
LOG.error("Failed to create coreos parameter for userid '%s',"
"error: %s" % (userid, err))
raise
def create_coreos_parameter_temp_file(self, network_info, userid):
# Create the coreos parameters for ZCC, includes ignitionUrl, diskType,
# nicID and ipConfig, then save them in a temp file
try:
result = self.create_coreos_parameter(network_info, userid)
tmp_path = self._smtclient.get_guest_path(userid.upper())
LOG.debug("Created coreos fixed ip parameter: %(result)s, "
"writing them to tempfile: %(tmp_path)s/fixed_ip_param"
% {'result': result, 'tmp_path': tmp_path})
with open('%s/fixed_ip_param' % tmp_path, 'w') as f:
f.write(result)
f.write('\n')
return True
except Exception as err:
LOG.error("Failed to create coreos parameter for userid '%s',"
"error: %s" % (userid, err))
return False
def read_coreos_parameter(self, userid):
# read coreos fixed ip parameters from tempfile by matching userid
tmp_path = self._smtclient.get_guest_path(userid.upper())
tmp_file_path = ('%s/fixed_ip_param' % tmp_path)
with open(tmp_file_path, 'r') as f:
fixed_ip_parameter = f.read().replace('\n', '')
LOG.debug('Read coreos fixed ip parameter: %(parameter)s '
'from tempfile: %(filename)s'
% {'parameter': fixed_ip_parameter,
'filename': tmp_file_path})
# Clean up tempfile
self._smtclient.clean_temp_folder(tmp_path)
return fixed_ip_parameter
def get_volume_attach_configuration_cmds(self, fcp_list, target_wwpns,
target_lun, multipath,
mount_point):
"generate punch script for attachment configuration"
func_name = 'get_volume_attach_configuration_cmds'
raise exception.SDKFunctionNotImplementError(func=func_name,
modID='volume')
def get_volume_detach_configuration_cmds(self, fcp_list, target_wwpns,
target_lun, multipath,
mount_point, connections):
"generate punch script for detachment configuration"
func_name = 'get_volume_detach_configuration_cmds'
raise exception.SDKFunctionNotImplementError(func=func_name,
modID='volume')
def _append_udev_info(self, cmd_str, cfg_files, file_name_route,
route_cfg_str, udev_cfg_str, first=False):
pass
def _append_udev_rules_file(self, cfg_files, base_vdev):
pass
def _delete_vdev_info(self, vdev):
pass
def _enable_network_interface(self, device, ip, broadcast):
pass
def _get_cfg_str(self, device, broadcast_v4, gateway_v4, ip_v4,
netmask_v4, address_read, subchannels):
pass
def _get_clean_command(self):
pass
def _get_cmd_str(self, address_read, address_write, address_data):
pass
def _get_device_filename(self, vdev):
pass
def _get_device_name(self, vdev):
pass
def _get_dns_filename(self):
pass
def _get_network_file_path(self):
pass
def _get_route_str(self, gateway_v4):
pass
def _get_udev_configuration(self, device, dev_channel):
pass
def _get_udev_rules(self, channel_read, channel_write, channel_data):
pass
def create_active_net_interf_cmd(self):
pass
def get_scp_string(self, root, fcp, wwpn, lun):
pass
def get_zipl_script_lines(self, image, ramdisk, root, fcp, wwpn, lun):
pass
def get_znetconfig_contents(self):
pass
class rhcos4(rhcos):
pass
class sles(LinuxDist):
def _get_network_file_path(self):
return '/etc/sysconfig/network/'
def _get_cfg_str(self, device, broadcast_v4, gateway_v4, ip_v4,
netmask_v4, address_read, subchannels, dns_v4, mtu):
cfg_str = "BOOTPROTO=\'static\'\n"
cfg_str += "IPADDR=\'%s\'\n" % ip_v4
cfg_str += "NETMASK=\'%s\'\n" % netmask_v4
cfg_str += "BROADCAST=\'%s\'\n" % broadcast_v4
cfg_str += "STARTMODE=\'onboot\'\n"
cfg_str += ("NAME=\'OSA Express Network card (%s)\'\n" %
address_read)
cfg_str += "MTU=\'%s\'\n" % mtu
if (dns_v4 is not None) and (len(dns_v4) > 0):
self.dns_v4 = dns_v4
else:
self.dns_v4 = None
return cfg_str
def _get_route_str(self, gateway_v4):
route_str = 'default %s - -\n' % gateway_v4
return route_str
def _get_cmd_str(self, address_read, address_write, address_data):
cmd_str = 'qeth_configure -l 0.0.%s ' % address_read.lower()
cmd_str += '0.0.%(write)s 0.0.%(data)s 1\n' % {'write':
address_write.lower(), 'data': address_data.lower()}
cmd_str += ('echo "0.0.%(read)s,0.0.%(write)s,0.0.%(data)s #`date`"'
' >>/boot/zipl/active_devices.txt\n' % {'read':
address_read.lower(), 'write': address_write.lower(),
'data': address_data.lower()})
return cmd_str
def _get_dns_filename(self):
return '/etc/resolv.conf'
def _get_device_filename(self, vdev):
return 'ifcfg-eth' + str(vdev).zfill(4)
def _get_all_device_filename(self):
return 'ifcfg-eth*'
def _get_device_name(self, vdev):
return 'eth' + str(vdev).zfill(4)
def _append_udev_info(self, cmd_str, cfg_files, file_name_route,
route_cfg_str, udev_cfg_str, first=False):
udev_file_name = '/etc/udev/rules.d/70-persistent-net.rules'
if first:
cfg_files.append((udev_file_name, udev_cfg_str))
if len(route_cfg_str) > 0:
cfg_files.append((file_name_route, route_cfg_str))
else:
cmd_str += ("echo '%s'"
' >>%s\n' % (udev_cfg_str, udev_file_name))
if len(route_cfg_str) > 0:
cmd_str += ('echo "%s"'
' >>%s\n' % (route_cfg_str, file_name_route))
return cmd_str
def _get_udev_configuration(self, device, dev_channel):
cfg_str = 'SUBSYSTEM==\"net\", ACTION==\"add\", DRIVERS==\"qeth\",'
cfg_str += ' KERNELS==\"%s\", ATTR{type}==\"1\",' % dev_channel
cfg_str += ' KERNEL==\"eth*\", NAME=\"eth%s\"\n' % device
return cfg_str
def _append_udev_rules_file(self, cfg_files, base_vdev):
rules_file_name = '/etc/udev/rules.d/51-qeth-0.0.%s.rules' % base_vdev
read_ch = '0.0.' + base_vdev
write_ch = '0.0.' + str(hex(int(base_vdev, 16) + 1))[2:]
data_ch = '0.0.' + str(hex(int(base_vdev, 16) + 2))[2:]
udev_rules_str = self._get_udev_rules(read_ch, write_ch, data_ch)
cfg_files.append((rules_file_name, udev_rules_str))
def _get_udev_rules(self, channel_read, channel_write, channel_data):
"""construct udev rules info."""
sub_str = '%(read)s %%k %(read)s %(write)s %(data)s qeth' % {
'read': channel_read,
'read': channel_read,
'write': channel_write,
'data': channel_data}
rules_str = '# Configure qeth device at'
rules_str += ' %(read)s/%(write)s/%(data)s\n' % {
'read': channel_read,
'write': channel_write,
'data': channel_data}
rules_str += ('ACTION==\"add\", SUBSYSTEM==\"drivers\", KERNEL=='
'\"qeth\", IMPORT{program}=\"collect %s\"\n') % sub_str
rules_str += ('ACTION==\"add\", SUBSYSTEM==\"ccw\", KERNEL==\"'
'%(read)s\", IMPORT{program}="collect %(channel)s\"\n') % {
'read': channel_read, 'channel': sub_str}
rules_str += ('ACTION==\"add\", SUBSYSTEM==\"ccw\", KERNEL==\"'
'%(write)s\", IMPORT{program}=\"collect %(channel)s\"\n') % {
'write': channel_write, 'channel': sub_str}
rules_str += ('ACTION==\"add\", SUBSYSTEM==\"ccw\", KERNEL==\"'
'%(data)s\", IMPORT{program}=\"collect %(channel)s\"\n') % {
'data': channel_data, 'channel': sub_str}
rules_str += ('ACTION==\"remove\", SUBSYSTEM==\"drivers\", KERNEL==\"'
'qeth\", IMPORT{program}=\"collect --remove %s\"\n') % sub_str
rules_str += ('ACTION==\"remove\", SUBSYSTEM==\"ccw\", KERNEL==\"'
'%(read)s\", IMPORT{program}=\"collect --remove %(channel)s\"\n'
) % {'read': channel_read, 'channel': sub_str}
rules_str += ('ACTION==\"remove\", SUBSYSTEM==\"ccw\", KERNEL==\"'
'%(write)s\", IMPORT{program}=\"collect --remove %(channel)s\"\n'
) % {'write': channel_write, 'channel': sub_str}
rules_str += ('ACTION==\"remove\", SUBSYSTEM==\"ccw\", KERNEL==\"'
'%(data)s\", IMPORT{program}=\"collect --remove %(channel)s\"\n'
) % {'data': channel_data, 'channel': sub_str}
rules_str += ('TEST==\"[ccwgroup/%(read)s]\", GOTO=\"qeth-%(read)s'
'-end\"\n') % {'read': channel_read, 'read': channel_read}
rules_str += ('ACTION==\"add\", SUBSYSTEM==\"ccw\", ENV{COLLECT_'
'%(read)s}==\"0\", ATTR{[drivers/ccwgroup:qeth]group}=\"'
'%(read)s,%(write)s,%(data)s\"\n') % {
'read': channel_read, 'read': channel_read,
'write': channel_write, 'data': channel_data}
rules_str += ('ACTION==\"add\", SUBSYSTEM==\"drivers\", KERNEL==\"qeth'
'\", ENV{COLLECT_%(read)s}==\"0\", ATTR{[drivers/'
'ccwgroup:qeth]group}=\"%(read)s,%(write)s,%(data)s\"\n'
'LABEL=\"qeth-%(read)s-end\"\n') % {
'read': channel_read, 'read': channel_read, 'write': channel_write,
'data': channel_data, 'read': channel_read}
rules_str += ('ACTION==\"add\", SUBSYSTEM==\"ccwgroup\", KERNEL=='
'\"%s\", ATTR{layer2}=\"1\"\n') % channel_read
rules_str += ('ACTION==\"add\", SUBSYSTEM==\"ccwgroup\", KERNEL=='
'\"%s\", ATTR{online}=\"1\"\n') % channel_read
return rules_str
def get_scp_string(self, root, fcp, wwpn, lun):
return ("=root=%(root)s "
"zfcp.device=0.0.%(fcp)s,0x%(wwpn)s,0x%(lun)s") % {
'root': root, 'fcp': fcp, 'wwpn': wwpn, 'lun': lun}
def get_zipl_script_lines(self, image, ramdisk, root, fcp, wwpn, lun):
return ['#!/bin/bash\n',
('echo -e "[defaultboot]\\n'
'default=boot-from-volume\\n'
'[boot-from-volume]\\n'
'image=%(image)s\\n'
'target = /boot/zipl\\n'
'ramdisk=%(ramdisk)s\\n'
'parameters=\\"root=%(root)s '
'zfcp.device=0.0.%(fcp)s,0x%(wwpn)s,0x%(lun)s\\""'
'>/etc/zipl_volume.conf\n'
'mkinitrd\n'
'zipl -c /etc/zipl_volume.conf')
% {'image': image, 'ramdisk': ramdisk, 'root': root,
'fcp': fcp, 'wwpn': wwpn, 'lun': lun}]
def _enable_network_interface(self, device, ip, broadcast):
return ''
def _get_clean_command(self):
files = os.path.join(self._get_network_file_path(),
self._get_all_device_filename())
cmd = '\nrm -f %s\n' % files
all_udev_rules_files = '/etc/udev/rules.d/51-qeth-0.0.*'
cmd += 'rm -f %s\n' % all_udev_rules_files
cmd += '> /boot/zipl/active_devices.txt\n'
return cmd
def _delete_vdev_info(self, vdev):
"""handle udev rules file."""
vdev = vdev.lower()
rules_file_name = '/etc/udev/rules.d/51-qeth-0.0.%s.rules' % vdev
cmd = 'rm -f %s\n' % rules_file_name
address = '0.0.%s' % str(vdev).zfill(4)
udev_file_name = '/etc/udev/rules.d/70-persistent-net.rules'
cmd += "sed -i '/%s/d' %s\n" % (address, udev_file_name)
cmd += "sed -i '/%s/d' %s\n" % (address,
'/boot/zipl/active_devices.txt')
return cmd
def get_volume_attach_configuration_cmds(self, fcp_list, target_wwpns,
target_lun, multipath,
mount_point):
"""sles attach script generation"""
template = self.get_template("volumeops", "sles_attach_volume.j2")
target_filename = mount_point.replace('/dev/', '')
# TODO(bill): also consider is first attach or not
content = template.render(fcp_list=fcp_list,
wwpns=target_wwpns,
lun=target_lun,
target_filename=target_filename)
return content
def get_volume_detach_configuration_cmds(self, fcp_list, target_wwpns,
target_lun, multipath,
mount_point, connections):
"""sles detach script generation"""
if connections > 0:
# if this volume is the last volume
# we need to know it and offline the FCP devices
is_last_volume = 0
else:
is_last_volume = 1
template = self.get_template("volumeops", "sles_detach_volume.j2")
target_filename = mount_point.replace('/dev/', '')
content = template.render(fcp_list=fcp_list,
wwpns=target_wwpns,
lun=target_lun,
target_filename=target_filename,
is_last_volume=is_last_volume)
return content
class sles11(sles):
def get_znetconfig_contents(self):
return '\n'.join(('cio_ignore -R',
'znetconf -R -n',
'sleep 2',
'udevadm trigger',
'udevadm settle',
'sleep 2',
'znetconf -A',
'service network restart',
'cio_ignore -u'))
def create_active_net_interf_cmd(self):
return 'service zvmguestconfigure start'
class sles12(sles):
def get_znetconfig_contents(self):
remove_route = 'rm -f %s/ifroute-eth*' % self._get_network_file_path()
return '\n'.join(('cio_ignore -R',
'znetconf -R -n',
'sleep 2',
remove_route,
'udevadm trigger',
'udevadm settle',
'sleep 2',
'znetconf -A',
'cio_ignore -u',
'wicked ifreload all'))
def get_scp_string(self, root, fcp, wwpn, lun):
return ("=root=%(root)s zfcp.allow_lun_scan=0 "
"zfcp.device=0.0.%(fcp)s,0x%(wwpn)s,0x%(lun)s") % {
'root': root, 'fcp': fcp, 'wwpn': wwpn, 'lun': lun}
def get_zipl_script_lines(self, image, ramdisk, root, fcp, wwpn, lun):
return ['#!/bin/bash\n',
('echo -e "[defaultboot]\\n'
'default=boot-from-volume\\n'
'[boot-from-volume]\\n'
'image=%(image)s\\n'
'target = /boot/zipl\\n'
'ramdisk=%(ramdisk)s\\n'
'parameters=\\"root=%(root)s '
'zfcp.device=0.0.%(fcp)s,0x%(wwpn)s,0x%(lun)s '
'zfcp.allow_lun_scan=0\\""'
'>/etc/zipl_volume.conf\n'
'mkinitrd\n'
'zipl -c /etc/zipl_volume.conf')
% {'image': image, 'ramdisk': ramdisk, 'root': root,
'fcp': fcp, 'wwpn': wwpn, 'lun': lun}]
def create_active_net_interf_cmd(self):
return 'systemctl start zvmguestconfigure.service'
def _enable_network_interface(self, device, ip, broadcast):
if len(broadcast) > 0:
activeIP_str = 'ip addr add %s broadcast %s dev %s\n' % (ip,
broadcast, device)
else:
activeIP_str = 'ip addr add %s dev %s\n' % (ip, device)
activeIP_str += 'ip link set dev %s up\n' % device
return activeIP_str
class sles15(sles12):
"""docstring for sles15"""
def get_znetconfig_contents(self):
remove_route = 'rm -f %s/ifroute-eth*' % self._get_network_file_path()
replace_var = 'NETCONFIG_DNS_STATIC_SERVERS'
replace_file = '/etc/sysconfig/network/config'
remove_dns_cfg = "sed -i '/^\s*%s=\"/d' %s" % (replace_var,
replace_file)
if self.dns_v4:
dns_addrs = ' '.join(self.dns_v4)
netconfig_dns = '%s="%s"' % (replace_var, dns_addrs)
set_dns = "echo '%s' >> %s" % (netconfig_dns, replace_file)
return '\n'.join(('cio_ignore -R',
'znetconf -R -n',
'sleep 2',
remove_route,
remove_dns_cfg,
set_dns,
'udevadm trigger',
'udevadm settle',
'sleep 2',
'znetconf -A',
'cio_ignore -u',
'wicked ifreload all'))
else:
return '\n'.join(('cio_ignore -R',
'znetconf -R -n',
'sleep 2',
remove_route,
remove_dns_cfg,
'udevadm trigger',
'udevadm settle',
'sleep 2',
'znetconf -A',
'cio_ignore -u',
'wicked ifreload all'))
class ubuntu(LinuxDist):
def create_network_configuration_files(self, file_path, guest_networks,
first, active=False):
"""Generate network configuration files for guest vm
:param list guest_networks: a list of network info for the guest.
It has one dictionary that contain some of the below keys for
each network, the format is:
{'ip_addr': (str) IP address,
'dns_addr': (list) dns addresses,
'gateway_addr': (str) gateway address,
'cidr': (str) cidr format
'nic_vdev': (str) VDEV of the nic}
Example for guest_networks:
[{'ip_addr': '192.168.95.10',
'dns_addr': ['9.0.2.1', '9.0.3.1'],
'gateway_addr': '192.168.95.1',
'cidr': "192.168.95.0/24",
'nic_vdev': '1000'},
{'ip_addr': '192.168.96.10',
'dns_addr': ['9.0.2.1', '9.0.3.1'],
'gateway_addr': '192.168.96.1',
'cidr': "192.168.96.0/24",
'nic_vdev': '1003}]
"""
cfg_files = []
cmd_strings = ''
network_config_file_name = self._get_network_file()
network_cfg_str = 'auto lo\n'
network_cfg_str += 'iface lo inet loopback\n'
net_enable_cmd = ''
if first:
clean_cmd = self._get_clean_command()
else:
clean_cmd = ''
network_cfg_str = ''
for network in guest_networks:
base_vdev = network['nic_vdev'].lower()
network_hw_config_fname = self._get_device_filename(base_vdev)
network_hw_config_str = self._get_network_hw_config_str(base_vdev)
cfg_files.append((network_hw_config_fname, network_hw_config_str))
(cfg_str, dns_str) = self._generate_network_configuration(network,
base_vdev)
LOG.debug('Network configure file content is: %s', cfg_str)
network_cfg_str += cfg_str
if len(dns_str) > 0:
network_cfg_str += dns_str
if first:
cfg_files.append((network_config_file_name, network_cfg_str))
else:
cmd_strings = ('echo "%s" >>%s\n' % (network_cfg_str,
network_config_file_name))
return cfg_files, cmd_strings, clean_cmd, net_enable_cmd
def get_network_configuration_files(self, vdev):
vdev = vdev.lower()
network_hw_config_fname = self._get_device_filename(vdev)
return network_hw_config_fname
def delete_vdev_info(self, vdev):
cmd = self._delete_vdev_info(vdev)
return cmd
def _delete_vdev_info(self, vdev):
"""handle vdev related info."""
vdev = vdev.lower()
network_config_file_name = self._get_network_file()
device = self._get_device_name(vdev)
cmd = '\n'.join(("num=$(sed -n '/auto %s/=' %s)" % (device,
network_config_file_name),
"dns=$(awk 'NR==(\"\'$num\'\"+6)&&"
"/dns-nameservers/' %s)" %
network_config_file_name,
"if [[ -n $dns ]]; then",
" sed -i '/auto %s/,+6d' %s" % (device,
network_config_file_name),
"else",
" sed -i '/auto %s/,+5d' %s" % (device,
network_config_file_name),
"fi"))
return cmd
def _get_network_file(self):
return '/etc/network/interfaces'
def _get_cfg_str(self, device, broadcast_v4, gateway_v4, ip_v4,
netmask_v4, mtu):
cfg_str = 'auto ' + device + '\n'
cfg_str += 'iface ' + device + ' inet static\n'
cfg_str += 'address ' + ip_v4 + '\n'
cfg_str += 'netmask ' + netmask_v4 + '\n'
cfg_str += 'broadcast ' + broadcast_v4 + '\n'
cfg_str += 'gateway ' + gateway_v4 + '\n'
cfg_str += 'mtu ' + mtu + '\n'
return cfg_str
def _generate_network_configuration(self, network, vdev):
ip_v4 = dns_str = gateway_v4 = ''
netmask_v4 = broadcast_v4 = ''
if (('ip_addr' in network.keys()) and
(network['ip_addr'] is not None)):
ip_v4 = network['ip_addr']
if (('gateway_addr' in network.keys()) and
(network['gateway_addr'] is not None)):
gateway_v4 = network['gateway_addr']
if (('dns_addr' in network.keys()) and
(network['dns_addr'] is not None) and
(len(network['dns_addr']) > 0)):
for dns in network['dns_addr']:
dns_str += 'dns-nameservers ' + dns + '\n'
if (('cidr' in network.keys()) and
(network['cidr'] is not None)):
ip_cidr = network['cidr']
netmask_v4 = str(netaddr.IPNetwork(ip_cidr).netmask)
broadcast_v4 = str(netaddr.IPNetwork(ip_cidr).broadcast)
if broadcast_v4 == 'None':
broadcast_v4 = ''
if (('mtu' in network.keys()) and
(network['mtu'] is not None)):
mtu = str(network['mtu'])
device = self._get_device_name(vdev)
cfg_str = self._get_cfg_str(device, broadcast_v4, gateway_v4,
ip_v4, netmask_v4, mtu)
return cfg_str, dns_str
def _get_route_str(self, gateway_v4):
return ''
def _get_cmd_str(self, address_read, address_write, address_data):
return ''
def _enable_network_interface(self, device, ip):
return ''
def _get_device_name(self, device_num):
return 'enc' + str(device_num)
def _get_dns_filename(self):
return ''
def _get_device_filename(self, device_num):
return '/etc/sysconfig/hardware/config-ccw-0.0.' + str(device_num)
def _get_network_hw_config_str(self, base_vdev):
ccwgroup_chans_str = ' '.join((
'0.0.' + str(hex(int(base_vdev, 16)))[2:],
'0.0.' + str(hex(int(base_vdev, 16) + 1))[2:],
'0.0.' + str(hex(int(base_vdev, 16) + 2))[2:]))
return '\n'.join(('CCWGROUP_CHANS=(' + ccwgroup_chans_str + ')',
'QETH_OPTIONS=layer2'))
def _get_network_file_path(self):
pass
def get_znetconfig_contents(self):
return '\n'.join(('cio_ignore -R',
'znetconf -R -n',
'sleep 2',
'udevadm trigger',
'udevadm settle',
'sleep 2',
'znetconf -A',
'/etc/init.d/networking restart',
'cio_ignore -u'))
def _get_udev_configuration(self, device, dev_channel):
return ''
def _append_udev_info(self, cmd_str, cfg_files, file_name_route,
route_cfg_str, udev_cfg_str, first=False):
return cmd_str
def get_scp_string(self, root, fcp, wwpn, lun):
pass
def get_zipl_script_lines(self, image, ramdisk, root, fcp, wwpn, lun):
pass
def _get_udev_rules(self, channel_read, channel_write, channel_data):
"""construct udev rules info."""
return ''
def _append_udev_rules_file(self, cfg_files, base_vdev):
pass
def create_active_net_interf_cmd(self):
return "systemctl start zvmguestconfigure.service"
def _get_clean_command(self):
files = self._get_device_filename('*')
cmd = '\nrm -f %s\n' % files
return cmd
def _check_multipath_tools(self):
multipath = 'multipath'
return multipath
def _format_lun(self, lun):
"""ubuntu"""
target_lun = int(lun[2:6], 16)
return target_lun
def get_volume_attach_configuration_cmds(self, fcp_list, target_wwpns,
target_lun, multipath,
mount_point):
"""ubuntu attach script generation"""
template = self.get_template("volumeops", "ubuntu_attach_volume.j2")
target_filename = mount_point.replace('/dev/', '')
# the parameter 'target_lun' is hex for either v7k or ds8k:
# for v7k, target_lun[2] == '0' and target_lun[6:] == '0'
# for ds8k, target_lun[2] == '4'
# in the future, we add support to other storage provider whose lun
# id may use bits in target_lun[6:], such as, 0x0003040200000000
# when attach v7k volume:
# 1. if the lun id less than 256,
# the file under /dev/disk/by-path/ will as below,
# take 'lun id = 0' as example:
# ccw-0.0.5c03-fc-0x5005076802400c1a-lun-0,the the lun id is decimal.
# 2. if the lun id is equal or more than 256,
# the file under /dev/disk/by-path/ will as below,
# take 'lun id = 256' as example:
# ccw-0.0.1a0d-fc-0x500507680b26bac7-lun-0x0100000000000000,
# the lun id is hex.
# when attach ds8k volume:
# the file under /dev/disk/by-path/ will as below,
# take "volume id 140c" as example:
# ccw-0.0.1a0d-fc-0x5005076306035388-lun-0x4014400c00000000,
# the lun id is always hex.
lun = self._format_lun(target_lun)
if all([x == '0' for x in target_lun[6:]]) and lun < 256:
lun_id = lun
else:
lun_id = target_lun
# TODO(bill): also consider is first attach or not
content = template.render(fcp_list=fcp_list,
wwpns=target_wwpns,
lun=target_lun,
lun_id=lun_id,
target_filename=target_filename)
return content
def get_volume_detach_configuration_cmds(self, fcp_list, target_wwpns,
target_lun, multipath,
mount_point, connections):
"""ubuntu detach script generation"""
if connections > 0:
# if this volume is the last volume
# we need to know it and offline the FCP devices
is_last_volume = 0
else:
is_last_volume = 1
template = self.get_template("volumeops", "ubuntu_detach_volume.j2")
target_filename = mount_point.replace('/dev/', '')
lun = self._format_lun(target_lun)
if all([x == '0' for x in target_lun[6:]]) and lun < 256:
lun_id = lun
else:
lun_id = target_lun
content = template.render(fcp_list=fcp_list,
wwpns=target_wwpns,
lun=target_lun,
lun_id=lun_id,
target_filename=target_filename,
is_last_volume=is_last_volume)
return content
class ubuntu16(ubuntu):
pass
class ubuntu20(ubuntu):
def _get_device_filename(self, device_num):
return '/etc/netplan/' + str(device_num) + '.yaml'
def _get_network_file(self):
return '/etc/netplan/00-zvmguestconfigure-config.yaml'
def _get_network_file_path(self):
return '/etc/netplan/'
def get_znetconfig_contents(self):
return '\n'.join(('cio_ignore -R',
'znetconf -R -n',
'sleep 2',
'udevadm trigger',
'udevadm settle',
'sleep 2',
'znetconf -A',
'netplan apply',
'cio_ignore -u'))
def create_network_configuration_files(self, file_path, guest_networks,
first, active=False):
"""Generate network configuration files for guest vm
:param list guest_networks: a list of network info for the guest.
It has one dictionary that contain some of the below keys for
each network, the format is:
{'ip_addr': (str) IP address,
'dns_addr': (list) dns addresses,
'gateway_addr': (str) gateway address,
'cidr': (str) cidr format
'nic_vdev': (str) VDEV of the nic}
Example for guest_networks:
[{'ip_addr': '192.168.95.10',
'dns_addr': ['9.0.2.1', '9.0.3.1'],
'gateway_addr': '192.168.95.1',
'cidr': "192.168.95.0/24",
'nic_vdev': '1000'},
{'ip_addr': '192.168.96.10',
'dns_addr': ['9.0.2.1', '9.0.3.1'],
'gateway_addr': '192.168.96.1',
'cidr': "192.168.96.0/24",
'nic_vdev': '1003}]
"""
cfg_files = []
cmd_strings = ''
network_config_file_name = self._get_network_file()
net_enable_cmd = ''
if first:
clean_cmd = self._get_clean_command()
else:
clean_cmd = ''
for network in guest_networks:
base_vdev = network['nic_vdev'].lower()
(cfg_str) = self._generate_network_configuration(network,
base_vdev)
LOG.debug('Network configure file content is: %s', cfg_str)
if first:
cfg_files.append((network_config_file_name, cfg_str))
else:
# TODO: create interface with cmd_strings after VM deployed
raise Exception('Ubuntu20 is not supported to create interface'
'after VM deployed.')
return cfg_files, cmd_strings, clean_cmd, net_enable_cmd
def _generate_network_configuration(self, network, vdev):
ip_v4 = dns_str = gateway_v4 = ''
cidr = mtu = ''
dns_v4 = []
if (('ip_addr' in network.keys()) and
(network['ip_addr'] is not None)):
ip_v4 = network['ip_addr']
if (('gateway_addr' in network.keys()) and
(network['gateway_addr'] is not None)):
gateway_v4 = network['gateway_addr']
if (('dns_addr' in network.keys()) and
(network['dns_addr'] is not None) and
(len(network['dns_addr']) > 0)):
for dns in network['dns_addr']:
dns_str += 'nameserver ' + dns + '\n'
dns_v4.append(dns)
if (('cidr' in network.keys()) and
(network['cidr'] is not None)):
cidr = network['cidr'].split('/')[1]
if (('mtu' in network.keys()) and
(network['mtu'] is not None)):
mtu = str(network['mtu'])
device = self._get_device_name(vdev)
if dns_v4:
cfg_str = {'network':
{'ethernets':
{device:
{'addresses': [ip_v4 + '/' + cidr],
'gateway4': gateway_v4,
'mtu': mtu,
'nameservers':
{'addresses': dns_v4}
}
},
'version': 2
}
}
else:
cfg_str = {'network':
{'ethernets':
{device:
{'addresses': [ip_v4 + '/' + cidr],
'gateway4': gateway_v4,
'mtu': mtu
}
},
'version': 2
}
}
return cfg_str
class ubuntu22(ubuntu20):
pass
class LinuxDistManager(object):
def get_linux_dist(self, os_version):
distro, release = self.parse_dist(os_version)
return globals()[distro + release]
def _parse_release(self, os_version, distro, remain):
supported = {'rhel': ['6', '7', '8', '9'],
'sles': ['11', '12', '15'],
'ubuntu': ['16', '20', '22'],
'rhcos': ['4']}
releases = supported[distro]
for r in releases:
if remain.startswith(r):
return r
else:
msg = 'Can not handle os: %s' % os_version
raise exception.ZVMException(msg=msg)
def parse_dist(self, os_version):
"""Separate os and version from os_version.
Possible return value are only:
('rhel', x.y) and ('sles', x.y) where x.y may not be digits
"""
supported = {'rhel': ['rhel', 'redhat', 'red hat'],
'sles': ['suse', 'sles'],
'ubuntu': ['ubuntu'],
'rhcos': ['rhcos', 'coreos', 'red hat coreos']}
os_version = os_version.lower()
for distro, patterns in supported.items():
for i in patterns:
if os_version.startswith(i):
# Not guarrentee the version is digital
remain = os_version.split(i, 2)[1]
release = self._parse_release(os_version, distro, remain)
return distro, release
msg = 'Can not handle os: %s' % os_version
raise exception.ZVMException(msg=msg) | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmsdk/dist.py | dist.py |
import contextlib
import errno
import functools
import netaddr
import os
import pwd
import re
import shlex
import shutil
import six
import subprocess
import sys
import tempfile
import time
import traceback
import string
from zvmsdk import config
from zvmsdk import constants
from zvmsdk import exception
from zvmsdk import log
CONF = config.CONF
LOG = log.LOG
def execute(cmd, timeout=None):
""" execute command, return rc and output string.
The cmd argument can be a string or a list composed of
the command name and each of its argument.
eg, ['/usr/bin/cp', '-r', 'src', 'dst'] """
# Parse cmd string to a list
if not isinstance(cmd, list):
cmd = shlex.split(cmd)
# Execute command
rc = 0
output = ""
try:
output = subprocess.check_output(cmd, close_fds=True,
stderr=subprocess.STDOUT,
timeout=timeout)
except subprocess.CalledProcessError as err:
rc = err.returncode
output = err.output
except (subprocess.TimeoutExpired,
PermissionError) as err:
raise err
except Exception as err:
err_msg = ('Command "%s" Error: %s' % (' '.join(cmd), str(err)))
raise exception.SDKInternalError(msg=err_msg)
output = bytes.decode(output)
return (rc, output)
def get_host():
return ''.join([pwd.getpwuid(os.geteuid()).pw_name, '@',
CONF.network.my_ip])
def looping_call(f, sleep=5, inc_sleep=0, max_sleep=60, timeout=600,
exceptions=(), *args, **kwargs):
"""Helper function that to run looping call with fixed/dynamical interval.
:param f: the looping call function or method.
:param sleep: initial interval of the looping calls.
:param inc_sleep: sleep time increment, default as 0.
:param max_sleep: max sleep time.
:param timeout: looping call timeout in seconds, 0 means no timeout.
:param exceptions: exceptions that trigger re-try.
"""
time_start = time.time()
expiration = time_start + timeout
retry = True
while retry:
expired = timeout and (time.time() > expiration)
LOG.debug(
"timeout is %(timeout)s, expiration is %(expiration)s, \
time_start is %(time_start)s" %
{"timeout": timeout, "expiration": expiration,
"time_start": time_start})
try:
f(*args, **kwargs)
except exceptions:
retry = not expired
if retry:
LOG.debug("Will re-try %(fname)s in %(itv)d seconds" %
{'fname': f.__name__, 'itv': sleep})
time.sleep(sleep)
sleep = min(sleep + inc_sleep, max_sleep)
else:
LOG.debug("Looping call %s timeout" % f.__name__)
continue
retry = False
def convert_to_mb(s):
"""Convert memory size from GB to MB."""
s = s.upper()
try:
if s.endswith('G'):
return float(s[:-1].strip()) * 1024
elif s.endswith('T'):
return float(s[:-1].strip()) * 1024 * 1024
else:
return float(s[:-1].strip())
except (IndexError, ValueError, KeyError, TypeError):
errmsg = ("Invalid memory format: %s") % s
raise exception.SDKInternalError(msg=errmsg)
class PathUtils(object):
def clean_temp_folder(self, tmp_folder):
if os.path.isdir(tmp_folder):
LOG.debug('Removing existing folder %s ', tmp_folder)
shutil.rmtree(tmp_folder)
def _get_guest_path(self):
return os.path.join(constants.SDK_DATA_PATH, 'guests')
def mkdir_if_not_exist(self, folder):
if not os.path.exists(folder):
LOG.debug("Creating the guest path %s", folder)
os.makedirs(folder)
# This is for persistent info for guests
# by default it's /var/lib/zvmsdk/guests/xxxx
def remove_guest_path(self, userid):
guest_folder = os.path.join(self._get_guest_path(), userid)
try:
shutil.rmtree(guest_folder)
except Exception:
# Ignore any exception for delete temp folder
pass
def get_guest_temp_path(self, userid):
tmp_inst_dir = tempfile.mkdtemp(prefix=userid,
dir='/tmp')
return tmp_inst_dir
def get_guest_path(self, userid):
guest_folder = os.path.join(self._get_guest_path(), userid)
self.mkdir_if_not_exist(guest_folder)
return guest_folder
def get_console_log_path(self, userid):
return os.path.join(self.get_guest_path(userid), "console.log")
def create_import_image_repository(self, image_osdistro, type,
image_name):
zvmsdk_image_import_repo = os.path.join(
CONF.image.sdk_image_repository,
type,
image_osdistro,
image_name)
if not os.path.exists(zvmsdk_image_import_repo):
LOG.debug('Creating image repository %s for image import',
zvmsdk_image_import_repo)
os.makedirs(zvmsdk_image_import_repo)
return zvmsdk_image_import_repo
def create_file_repository(self, file_type):
zvmsdk_file_repo = os.path.join(CONF.file.file_repository,
file_type)
if not os.path.exists(zvmsdk_file_repo):
LOG.debug('Creating file repository %s for file transfer',
zvmsdk_file_repo)
os.makedirs(zvmsdk_file_repo)
return zvmsdk_file_repo
def to_utf8(text):
if isinstance(text, bytes):
return text
elif isinstance(text, six.text_type):
return text.encode()
else:
raise TypeError("bytes or Unicode expected, got %s"
% type(text).__name__)
def valid_userid(userid):
if not isinstance(userid, six.string_types):
return False
if ((userid == '') or
(userid.find(' ') != -1)):
return False
if len(userid) > 8:
return False
return True
def valid_mac_addr(addr):
''' Validates a mac address'''
if not isinstance(addr, six.string_types):
return False
valid = re.compile(r'''
(^([0-9A-F]{2}[:]){5}([0-9A-F]{2})$)
''',
re.VERBOSE | re.IGNORECASE)
return valid.match(addr) is not None
def valid_cidr(cidr):
if not isinstance(cidr, six.string_types):
return False
try:
netaddr.IPNetwork(cidr)
except netaddr.AddrFormatError:
return False
if '/' not in cidr:
return False
if re.search('\s', cidr):
return False
return True
def last_bytes(file_like_object, num):
try:
file_like_object.seek(-num, os.SEEK_END)
except IOError as e:
# seek() fails with EINVAL when trying to go before the start of the
# file. It means that num is larger than the file size, so just
# go to the start.
if e.errno == errno.EINVAL:
file_like_object.seek(0, os.SEEK_SET)
else:
raise
remaining = file_like_object.tell()
return (file_like_object.read(), remaining)
def check_input_types(*types, **validkeys):
"""This is a function decorator to check all input parameters given to
decorated function are in expected types.
The checks can be skipped by specify skip_input_checks=True in decorated
function.
:param tuple types: expected types of input parameters to the decorated
function
:param validkeys: valid keywords(str) in a list.
e.g. validkeys=['key1', 'key2']
"""
def decorator(function):
@functools.wraps(function)
def wrap_func(*args, **kwargs):
if args[0]._skip_input_check:
# skip input check
return function(*args, **kwargs)
# drop class object self
inputs = args[1:]
if (len(inputs) > len(types)):
msg = ("Too many parameters provided: %(specified)d specified,"
"%(expected)d expected." %
{'specified': len(inputs), 'expected': len(types)})
LOG.info(msg)
raise exception.SDKInvalidInputNumber(function.__name__,
len(types), len(inputs))
argtypes = tuple(map(type, inputs))
match_types = types[0:len(argtypes)]
invalid_type = False
invalid_userid_idx = -1
for idx in range(len(argtypes)):
_mtypes = match_types[idx]
if not isinstance(_mtypes, tuple):
_mtypes = (_mtypes,)
argtype = argtypes[idx]
if constants._TUSERID in _mtypes:
userid_type = True
for _tmtype in _mtypes:
if ((argtype == _tmtype) and
(_tmtype != constants._TUSERID)):
userid_type = False
if (userid_type and
(not valid_userid(inputs[idx]))):
invalid_userid_idx = idx
break
elif argtype not in _mtypes:
invalid_type = True
break
if invalid_userid_idx != -1:
msg = ("Invalid string value found at the #%d parameter, "
"length should be less or equal to 8 and should not be "
"null or contain spaces." % (invalid_userid_idx + 1))
LOG.info(msg)
raise exception.SDKInvalidInputFormat(msg=msg)
if invalid_type:
msg = ("Invalid input types: %(argtypes)s; "
"Expected types: %(types)s" %
{'argtypes': str(argtypes), 'types': str(types)})
LOG.info(msg)
raise exception.SDKInvalidInputTypes(function.__name__,
str(types), str(argtypes))
valid_keys = validkeys.get('valid_keys')
if valid_keys:
for k in kwargs.keys():
if k not in valid_keys:
msg = ("Invalid keyword: %(key)s; "
"Expected keywords are: %(keys)s" %
{'key': k, 'keys': str(valid_keys)})
LOG.info(msg)
raise exception.SDKInvalidInputFormat(msg=msg)
return function(*args, **kwargs)
return wrap_func
return decorator
def import_class(import_str):
"""Returns a class from a string including module and class."""
mod_str, _sep, class_str = import_str.rpartition('.')
__import__(mod_str)
try:
return getattr(sys.modules[mod_str], class_str)
except AttributeError:
raise ImportError('Class %s cannot be found (%s)' %
(class_str,
traceback.format_exception(*sys.exc_info())))
def import_object(import_str, *args, **kwargs):
"""Import a class and return an instance of it."""
return import_class(import_str)(*args, **kwargs)
@contextlib.contextmanager
def expect_invalid_resp_data(data=''):
"""Catch exceptions when using zvm client response data."""
try:
yield
except (ValueError, TypeError, IndexError, AttributeError,
KeyError) as err:
msg = ('Invalid smt response data: %s. Error: %s' %
(data, six.text_type(err)))
LOG.error(msg)
raise exception.SDKInternalError(msg=msg)
def wrap_invalid_resp_data_error(function):
"""Catch exceptions when using zvm client response data."""
@functools.wraps(function)
def decorated_function(*arg, **kwargs):
try:
return function(*arg, **kwargs)
except (ValueError, TypeError, IndexError, AttributeError,
KeyError) as err:
msg = ('Invalid smt response data. Error: %s' %
six.text_type(err))
LOG.error(msg)
raise exception.SDKInternalError(msg=msg)
return decorated_function
@contextlib.contextmanager
def expect_and_reraise_internal_error(modID='SDK'):
"""Catch all kinds of zvm client request failure and reraise.
modID: the moduleID that the internal error happens in.
"""
try:
yield
except exception.SDKInternalError as err:
msg = err.format_message()
raise exception.SDKInternalError(msg, modID=modID)
@contextlib.contextmanager
def log_and_reraise_sdkbase_error(action):
"""Catch SDK base exception and print error log before reraise exception.
msg: the error message to be logged.
"""
try:
yield
except exception.SDKBaseException:
msg = "Failed to " + action + "."
LOG.error(msg)
raise
@contextlib.contextmanager
def log_and_reraise_smt_request_failed(action=None):
"""Catch SDK base exception and print error log before reraise exception.
msg: the error message to be logged.
"""
try:
yield
except exception.SDKSMTRequestFailed as err:
msg = ''
if action is not None:
msg = "Failed to %s. " % action
msg += "SMT error: %s" % err.format_message()
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
@contextlib.contextmanager
def ignore_errors():
"""Only execute the clauses and ignore the results"""
try:
yield
except Exception as err:
LOG.error('ignore an error: ' + str(err))
pass
def get_smt_userid():
"""Get the userid of smt server"""
cmd = ["sudo", "/sbin/vmcp", "query userid"]
try:
userid = subprocess.check_output(cmd,
close_fds=True,
stderr=subprocess.STDOUT)
userid = bytes.decode(userid)
userid = userid.split()[0]
return userid
except Exception as err:
msg = ("Could not find the userid of the smt server: %s") % err
raise exception.SDKInternalError(msg=msg)
def get_lpar_name():
"""Get the name of the LPAR that this vm is on."""
cmd = ["sudo", "/sbin/vmcp", "query userid"]
try:
userid = subprocess.check_output(cmd,
close_fds=True,
stderr=subprocess.STDOUT)
userid = bytes.decode(userid)
userid = userid.split()[-1]
return userid
except Exception as err:
msg = ("Failed to get the LPAR name for the smt server: %s") % err
raise exception.SDKInternalError(msg=msg)
def get_namelist():
"""Generate namelist.
Either through set CONF.zvm.namelist, or by generate based on smt userid.
"""
if CONF.zvm.namelist is not None:
# namelist length limit should be 64, but there's bug limit to 8
# will change the limit to 8 once the bug fixed
if len(CONF.zvm.namelist) <= 8:
return CONF.zvm.namelist
# return ''.join(('NL', get_smt_userid().rjust(6, '0')[-6:]))
# py3 compatible changes
userid = get_smt_userid()
return 'NL' + userid.rjust(6, '0')[-6:]
def generate_iucv_authfile(fn, client):
"""Generate the iucv_authorized_userid file"""
lines = ['#!/bin/bash\n',
'echo -n %s > /etc/iucv_authorized_userid\n' % client]
with open(fn, 'w') as f:
f.writelines(lines)
def translate_response_data_to_expect_dict(results, step):
"""
Translate SMT response to a python dictionary
['volume name: IASFBA', 'volume_type:9336-ET', 'volume_size:564718',
'volume_name: IAS1CM', 'volume_type:3390-09', 'volume_size:60102']
translate to:
{'IASFBA': {'volume_type': '9336-ET', 'volume_size': '564718'},
'IAS1CM': {'volume_type': '3390-09', 'volume_size': '60102'}}
:results: the SMT response in list format
:step: count list members converted to one member of the directory
"""
data = {}
for i in range(0, len(results), step):
volume_name = results[i].split(':')[1].strip()
data[volume_name] = {}
for k in range(1, step):
key, value = results[i + k].split(':')
data[volume_name][key] = value
return data
@wrap_invalid_resp_data_error
def translate_response_to_dict(rawdata, dirt):
"""Translate SMT response to a python dictionary.
SMT response example:
keyword1: value1\n
keyword2: value2\n
...
keywordn: valuen\n
Will return a python dictionary:
{keyword1: value1,
keyword2: value2,
...
keywordn: valuen,}
"""
data_list = rawdata.split("\n")
data = {}
for ls in data_list:
for k in list(dirt.keys()):
if ls.__contains__(dirt[k]):
data[k] = ls[(ls.find(dirt[k]) + len(dirt[k])):].strip()
break
if data == {}:
msg = ("Invalid smt response data. Error: No value matched with "
"keywords. Raw Data: %(raw)s; Keywords: %(kws)s" %
{'raw': rawdata, 'kws': str(dirt)})
raise exception.SDKInternalError(msg=msg)
return data
def make_dummy_image(image_path, d_type='CKD'):
if d_type not in ('CKD', 'FBA'):
d_type = 'CKD'
d_unit = 'CYL'
if d_type == 'FBA':
d_unit = 'BLK'
header = ("z/VM %(type)s Disk Image: 0 %(unit)s" %
{'type': d_type, 'unit': d_unit})
header = (' '.join((header, 'HLen: 0055', 'GZIP: 0')))
with open(image_path, 'wb') as f:
f.write(header.encode())
@contextlib.contextmanager
def acquire_lock(lock):
""" lock wrapper """
lock.acquire()
try:
yield
finally:
lock.release()
def check_userid_exist(userid, needLogon=False):
"""The successful output is: FBA0004 - DSC
The successful output for device is (vmcp q 0100):
DASD 0100 3390 IAS106 R/W 29128 CYL ON DASD 1356 SUBCHANNEL = 0003
Errors are:
HCPCQV003E Invalid option - XXXXX
HCPQVD040E Device XXXX does not exist
HCPCFC026E Operand missing or invalid
HCPCQU045E XXXXX not logged on
Success msgs:
HCPCQU361E LOGOFF/FORCE pending for user xxxxxx
"""
cmd = 'sudo vmcp q %s' % userid
rc, output = execute(cmd)
if needLogon:
strfail = '(^HCP\w\w\w003E|^HCP\w\w\w040E|' + \
'^HCP\w\w\w026E|^HCP\w\w\w045E)'
strok = '(^%s)' % userid
else:
strfail = '(^HCP\w\w\w003E|^HCP\w\w\w040E|^HCP\w\w\w026E)'
strok = '(^%s|^HCP\w\w\w045E|^HCP\w\w\w361E)' % userid
if re.search(strfail, output):
# userid not exist
return False
if re.search(strok, output):
# userid exist
return True
# When reaching here most likely the userid represents a device
# and anyway it's not a guest.
return False
def check_userid_on_others(userid):
try:
check_userid_exist(userid)
cmd = 'sudo vmcp q %s' % userid
rc, output = execute(cmd)
if re.search(' - SSI', output):
return True
return False
except Exception as err:
msg = ("Could not find the userid: %s") % err
raise exception.SDKInternalError(msg=msg)
def expand_fcp_list(fcp_list):
"""Expand fcp list string into a python list object which contains
each fcp devices in the list string. A fcp list is composed of fcp
device addresses, range indicator '-', and split indicator ';'.
Example 1:
if fcp_list is "0011-0013;0015;0017-0018",
then the function will return
{
0: {'0011' ,'0012', '0013'}
1: {'0015'}
2: {'0017', '0018'}
}
Example 2:
if fcp_list is empty string: '',
then the function will return an empty set: {}
ATTENTION: To support multipath, we expect fcp_list should be like
"0011-0014;0021-0024", "0011-0014" should have been on same physical
WWPN which we called path0, "0021-0024" should be on another physical
WWPN we called path1 which is different from "0011-0014".
path0 and path1 should have same count of FCP devices in their group.
When attach, we will choose one WWPN from path0 group, and choose
another one from path1 group. Then we will attach this pair of WWPNs
together to the guest as a way to implement multipath.
"""
LOG.debug("Expand FCP list %s" % fcp_list)
if not fcp_list:
return dict()
fcp_list = fcp_list.strip()
fcp_list = fcp_list.replace(' ', '')
range_pattern = '[0-9a-fA-F]{1,4}(-[0-9a-fA-F]{1,4})?'
match_pattern = "^(%(range)s)(;%(range)s;?)*$" % \
{'range': range_pattern}
item_pattern = "(%(range)s)(,%(range)s?)*" % \
{'range': range_pattern}
multi_match_pattern = "^(%(range)s)(;%(range)s;?)*$" % \
{'range': item_pattern}
if not re.match(match_pattern, fcp_list) and \
not re.match(multi_match_pattern, fcp_list):
errmsg = ("Invalid FCP address %s") % fcp_list
raise exception.SDKInternalError(msg=errmsg)
fcp_devices = {}
path_no = 0
for _range in fcp_list.split(';'):
for item in _range.split(','):
# remove duplicate entries
devices = set()
if item != '':
if '-' not in item:
# single device
fcp_addr = int(item, 16)
devices.add("%04x" % fcp_addr)
else:
# a range of address
(_min, _max) = item.split('-')
_min = int(_min, 16)
_max = int(_max, 16)
for fcp_addr in range(_min, _max + 1):
devices.add("%04x" % fcp_addr)
if fcp_devices.get(path_no):
fcp_devices[path_no].update(devices)
else:
fcp_devices[path_no] = devices
path_no = path_no + 1
return fcp_devices
def shrink_fcp_list(fcp_list):
""" Transform a FCP list to a string.
:param fcp_list: (list) a list object contains FCPs.
Case 1: only one FCP in the list.
e.g. fcp_list = ['1A01']
Case 2: all the FCPs are continuous.
e.g. fcp_list =['1A01', '1A02', '1A03']
Case 3: not all the FCPs are continuous.
e.g. fcp_list = ['1A01', '1A02', '1A03',
'1A05',
'1AFF', '1B00', '1B01',
'1B04']
Case 4: an empty list.
e.g. fcp_list = []
:return fcp_str: (str)
Case 1: fcp_str = '1A01'
Case 2: fcp_str = '1A01 - 1A03'
Case 3: fcp_str = '1A01 - 1A03, 1A05,
1AFF - 1B01, 1B04'
Case 4: fcp_str = ''
"""
def __transform_fcp_list_into_str(local_fcp_list):
""" Transform the FCP list into a string
by recursively do the transformation
against the first continuous range of the list,
which is being shortened by list.pop(0) on the fly
:param local_fcp_list:
(list) a list object contains FCPs.
In Python, hex is stored in the form of strings.
Because incrementing is done on integers,
we need to convert hex to an integer for doing math.
"""
# Case 1: only one FCP in the list.
if len(local_fcp_list) == 1:
fcp_section.append(local_fcp_list[0])
else:
start_fcp = int(local_fcp_list[0], 16)
end_fcp = int(local_fcp_list[-1], 16)
count = len(local_fcp_list) - 1
# Case 2: all the FCPs are continuous.
if start_fcp + count == end_fcp:
# e.g. hex(int('1A01',16)) is '0x1a01'
section_str = '{} - {}'.format(
hex(start_fcp)[2:], hex(end_fcp)[2:])
fcp_section.append(section_str)
# Case 3: not all the FCPs are continuous.
else:
start_fcp = int(local_fcp_list.pop(0), 16)
for idx, fcp in enumerate(local_fcp_list.copy()):
next_fcp = int(fcp, 16)
# pop the fcp if it is continuous with the last
# e.g.
# when start_fcp is '1A01',
# pop '1A02' and '1A03'
if start_fcp + idx + 1 == next_fcp:
local_fcp_list.pop(0)
continue
# e.g.
# when start_fcp is '1A01',
# next_fcp '1A05' is NOT continuous with the last
else:
end_fcp = start_fcp + idx
# e.g.
# when start_fcp is '1A01',
# end_fcp is '1A03'
if start_fcp != end_fcp:
# e.g. hex(int('1A01',16)) is '0x1a01'
section_str = '{} - {}'.format(
hex(start_fcp)[2:], hex(end_fcp)[2:])
# e.g.
# when start_fcp is '1A05',
# end_fcp is '1A05'
else:
section_str = hex(start_fcp)[2:]
fcp_section.append(section_str)
break
# recursively transform if FCP list still not empty
if local_fcp_list:
__transform_fcp_list_into_str(local_fcp_list)
fcp_section = list()
fcp_str = ''
if fcp_list:
# sort fcp_list in hex order, e.g.
# before sort: ['1E01', '1A02', '1D03']
# after sort: ['1A02', '1D03', '1E01']
fcp_list.sort()
__transform_fcp_list_into_str(fcp_list)
# return a string contains all FCP
fcp_str = ', '.join(fcp_section).upper()
return fcp_str
def verify_fcp_list_in_hex_format(fcp_list):
"""Verify each FCP in the list is in Hex format
:param fcp_list: (list) a list object contains FCPs.
"""
if not isinstance(fcp_list, list):
errmsg = ('fcp_list ({}) is not a list object.'
'').format(fcp_list)
raise exception.SDKInvalidInputFormat(msg=errmsg)
# Verify each FCP should be a 4-digit hex
for fcp in fcp_list:
if not (len(fcp) == 4 and
all(char in string.hexdigits for char in fcp)):
errmsg = ('FCP list {} contains non-hex value.'
'').format(fcp_list)
raise exception.SDKInvalidInputFormat(msg=errmsg) | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmsdk/utils.py | utils.py |
import os
import six
import shutil
from zvmsdk import config
from zvmsdk import dist
from zvmsdk import exception
from zvmsdk import log
from zvmsdk import smtclient
from zvmsdk import database
from zvmsdk import utils as zvmutils
_VMOPS = None
CONF = config.CONF
LOG = log.LOG
def get_vmops():
global _VMOPS
if _VMOPS is None:
_VMOPS = VMOps()
return _VMOPS
class VMOps(object):
def __init__(self):
self._smtclient = smtclient.get_smtclient()
self._dist_manager = dist.LinuxDistManager()
self._pathutils = zvmutils.PathUtils()
self._namelist = zvmutils.get_namelist()
self._GuestDbOperator = database.GuestDbOperator()
self._ImageDbOperator = database.ImageDbOperator()
def get_power_state(self, userid):
"""Get power status of a z/VM instance."""
return self._smtclient.get_power_state(userid)
def _get_cpu_num_from_user_dict(self, dict_info):
cpu_num = 0
for inf in dict_info:
if 'CPU ' in inf:
cpu_num += 1
return cpu_num
def _get_max_memory_from_user_dict(self, dict_info):
with zvmutils.expect_invalid_resp_data():
mem = dict_info[0].split(' ')[4]
return zvmutils.convert_to_mb(mem) * 1024
def get_info(self, userid):
power_stat = self.get_power_state(userid)
perf_info = self._smtclient.get_image_performance_info(userid)
# Get the online CPU number, OS distro and kernel version
try:
act_cpus = self._smtclient.get_active_cpu_addrs(userid)
act_cpus_num = len(act_cpus)
LOG.debug('Online cpu info: %s, %d' % (act_cpus, act_cpus_num))
except exception.SDKSMTRequestFailed as err:
msg = ('Failed to execute command on capture source vm %(vm)s '
'to get online cpu number with error %(err)s'
% {'vm': userid, 'err': err.results['response'][0]})
LOG.error(msg)
act_cpus_num = 0
try:
os_distro = self._smtclient.guest_get_os_version(userid)
kernel_info = self._smtclient.guest_get_kernel_info(userid)
LOG.debug('OS and kernel info: %s, %s' % (os_distro, kernel_info))
except exception.SDKSMTRequestFailed as err:
msg = ('Failed to execute command on capture source vm %(vm)s '
'to get OS distro with error %(err)s'
% {'vm': userid, 'err': err.results['response'][0]})
LOG.error(msg)
os_distro = ''
kernel_info = ''
if perf_info:
try:
max_mem_kb = int(perf_info['max_memory'].split()[0])
mem_kb = int(perf_info['used_memory'].split()[0])
num_cpu = int(perf_info['guest_cpus'])
cpu_time_us = int(perf_info['used_cpu_time'].split()[0])
except (ValueError, TypeError, IndexError, AttributeError,
KeyError) as err:
LOG.error('Parse performance_info encounter error: %s',
str(perf_info))
raise exception.SDKInternalError(msg=str(err),
modID='guest')
return {'power_state': power_stat,
'max_mem_kb': max_mem_kb,
'mem_kb': mem_kb,
'num_cpu': num_cpu,
'cpu_time_us': cpu_time_us,
'online_cpu_num': act_cpus_num,
'os_distro': os_distro,
'kernel_info': kernel_info}
else:
# virtual machine in shutdown state or not exists
dict_info = self._smtclient.get_user_direct(userid)
return {
'power_state': power_stat,
'max_mem_kb': self._get_max_memory_from_user_dict(dict_info),
'mem_kb': 0,
'num_cpu': self._get_cpu_num_from_user_dict(dict_info),
'cpu_time_us': 0,
'online_cpu_num': act_cpus_num,
'os_distro': os_distro,
'kernel_info': kernel_info}
def get_adapters_info(self, userid):
adapters_info = self._smtclient.get_adapters_info(userid)
if not adapters_info:
msg = 'Get network information failed on: %s' % userid
LOG.error(msg)
raise exception.SDKInternalError(msg=msg, modID='guest')
return {'adapters': adapters_info}
def instance_metadata(self, instance, content, extra_md):
pass
def add_instance_metadata(self):
pass
def is_reachable(self, userid):
"""Reachable through IUCV communication channel."""
return self._smtclient.get_guest_connection_status(userid)
def wait_for_reachable(self, userid, timeout=CONF.guest.reachable_timeout):
"""Return until guest reachable or timeout."""
def _check_reachable():
if not self.is_reachable(userid):
raise exception.SDKRetryException()
zvmutils.looping_call(_check_reachable, 5, 0, 5, timeout,
exception.SDKRetryException)
def guest_start(self, userid, timeout=0):
""""Power on z/VM instance."""
LOG.info("Begin to power on vm %s", userid)
self._smtclient.guest_start(userid)
if timeout > 0:
self.wait_for_reachable(userid, timeout)
if not self.is_reachable(userid):
msg = ("compute node is not able to connect to the virtual "
"machine in %d seconds" % timeout)
raise exception.SDKGuestOperationError(rs=16, userid=userid,
msg=msg)
LOG.info("Complete power on vm %s", userid)
def guest_stop(self, userid, **kwargs):
LOG.info("Begin to power off vm %s", userid)
self._smtclient.guest_stop(userid, **kwargs)
LOG.info("Complete power off vm %s", userid)
def guest_softstop(self, userid, **kwargs):
LOG.info("Begin to soft power off vm %s", userid)
self._smtclient.guest_softstop(userid, **kwargs)
LOG.info("Complete soft power off vm %s", userid)
def guest_pause(self, userid):
LOG.info("Begin to pause vm %s", userid)
self._smtclient.guest_pause(userid)
LOG.info("Complete pause vm %s", userid)
def guest_unpause(self, userid):
LOG.info("Begin to unpause vm %s", userid)
self._smtclient.guest_unpause(userid)
LOG.info("Complete unpause vm %s", userid)
def guest_reboot(self, userid):
"""Reboot a guest vm."""
LOG.info("Begin to reboot vm %s", userid)
self._smtclient.guest_reboot(userid)
LOG.info("Complete reboot vm %s", userid)
def guest_reset(self, userid):
"""Reset z/VM instance."""
LOG.info("Begin to reset vm %s", userid)
self._smtclient.guest_reset(userid)
LOG.info("Complete reset vm %s", userid)
def live_migrate_vm(self, userid, destination, parms, action):
"""Move an eligible, running z/VM(R) virtual machine transparently
from one z/VM system to another within an SSI cluster."""
# Check guest state is 'on'
state = self.get_power_state(userid)
if state != 'on':
LOG.error("Failed to live migrate guest %s, error: "
"guest is inactive, cann't perform live migrate." %
userid)
raise exception.SDKConflictError(modID='guest', rs=1,
userid=userid)
# Do live migrate
if action.lower() == 'move':
LOG.info("Moving the specific vm %s", userid)
self._smtclient.live_migrate_move(userid, destination, parms)
LOG.info("Complete move vm %s", userid)
if action.lower() == 'test':
LOG.info("Testing the eligiblity of specific vm %s", userid)
self._smtclient.live_migrate_test(userid, destination)
def create_vm(self, userid, cpu, memory, disk_list,
user_profile, max_cpu, max_mem, ipl_from,
ipl_param, ipl_loadparam, dedicate_vdevs, loaddev, account,
comment_list, cschedule, cshare, rdomain, pcif):
"""Create z/VM userid into user directory for a z/VM instance."""
LOG.info("Creating the user directory for vm %s", userid)
info = self._smtclient.create_vm(userid, cpu, memory,
disk_list, user_profile,
max_cpu, max_mem, ipl_from,
ipl_param, ipl_loadparam,
dedicate_vdevs, loaddev, account,
comment_list, cschedule, cshare, rdomain,
pcif)
# add userid into smapi namelist
self._smtclient.namelist_add(self._namelist, userid)
return info
def create_disks(self, userid, disk_list):
LOG.info("Beging to create disks for vm: %(userid)s, list: %(list)s",
{'userid': userid, 'list': disk_list})
user_direct = self._smtclient.get_user_direct(userid)
exist_disks = []
for ent in user_direct:
if ent.strip().startswith('MDISK'):
md_vdev = ent.split()[1].strip()
exist_disks.append(md_vdev)
if exist_disks:
start_vdev = hex(int(max(exist_disks), 16) + 1)[2:].rjust(4, '0')
else:
start_vdev = None
info = self._smtclient.add_mdisks(userid, disk_list, start_vdev)
LOG.info("Complete create disks for vm: %s", userid)
return info
def delete_disks(self, userid, vdev_list):
LOG.info("Begin to delete disk on vm: %(userid), vdev list: %(list)s",
{'userid': userid, 'list': vdev_list})
# not support delete disks when guest is active
if self._smtclient.get_power_state(userid) == 'on':
func = 'delete disks when guest is active'
raise exception.SDKFunctionNotImplementError(func)
self._smtclient.remove_mdisks(userid, vdev_list)
LOG.info("Complete delete disks for vm: %s", userid)
def guest_config_minidisks(self, userid, disk_info):
LOG.info("Begin to configure disks on vm: %(userid), info: %(info)s",
{'userid': userid, 'info': disk_info})
if disk_info != []:
self._smtclient.process_additional_minidisks(userid, disk_info)
LOG.info("Complete configure disks for vm: %s", userid)
else:
LOG.info("No disk to handle on %s." % userid)
def guest_grow_root_volume(self, userid, os_version):
""" Punch the grow partition script to the target guest. """
# firstly check if user wants to extend the volume
if CONF.guest.extend_partition_fs.lower() != 'true':
return
LOG.debug('Begin to punch grow partition commands to guest: %s',
userid)
linuxdist = self._dist_manager.get_linux_dist(os_version)()
# get configuration commands
config_cmds = linuxdist.get_extend_partition_cmds()
# Creating tmp file with these cmds
temp_folder = self._pathutils.get_guest_temp_path(userid)
file_path = os.path.join(temp_folder, 'gpartvol.sh')
LOG.debug('Creating file %s to contain root partition extension '
'commands' % file_path)
with open(file_path, "w") as f:
f.write(config_cmds)
try:
self._smtclient.punch_file(userid, file_path, "X")
finally:
LOG.debug('Removing the folder %s ', temp_folder)
shutil.rmtree(temp_folder)
def is_powered_off(self, instance_name):
"""Return True if the instance is powered off."""
return self._smtclient.get_power_state(instance_name) == 'off'
def delete_vm(self, userid):
"""Delete z/VM userid for the instance."""
LOG.info("Begin to delete vm %s", userid)
self._smtclient.delete_vm(userid)
LOG.info("Complete delete vm %s", userid)
def execute_cmd(self, userid, cmdStr):
"""Execute commands on the guest vm."""
LOG.debug("executing cmd: %s", cmdStr)
return self._smtclient.execute_cmd(userid, cmdStr)
def set_hostname(self, userid, hostname, os_version):
"""Punch a script that used to set the hostname of the guest.
:param str guest: the user id of the guest
:param str hostname: the hostname of the guest
:param str os_version: version of guest operation system
"""
tmp_path = self._pathutils.get_guest_temp_path(userid)
if not os.path.exists(tmp_path):
os.makedirs(tmp_path)
tmp_file = tmp_path + '/hostname.sh'
lnxdist = self._dist_manager.get_linux_dist(os_version)()
lines = lnxdist.generate_set_hostname_script(hostname)
with open(tmp_file, 'w') as f:
f.writelines(lines)
requestData = "ChangeVM " + userid + " punchfile " + \
tmp_file + " --class x"
LOG.debug("Punch script to guest %s to set hostname" % userid)
try:
self._smtclient._request(requestData)
except exception.SDKSMTRequestFailed as err:
msg = ("Failed to punch set_hostname script to userid '%s'. SMT "
"error: %s" % (userid, err.format_message()))
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
finally:
self._pathutils.clean_temp_folder(tmp_path)
def guest_deploy(self, userid, image_name, transportfiles=None,
remotehost=None, vdev=None, hostname=None,
skipdiskcopy=False):
LOG.info("Begin to deploy image on vm %s", userid)
if not skipdiskcopy:
os_version = self._smtclient.image_get_os_distro(image_name)
else:
os_version = image_name
if not self._smtclient.is_rhcos(os_version):
self._smtclient.guest_deploy(userid, image_name, transportfiles,
remotehost, vdev, skipdiskcopy)
# punch scripts to set hostname
if (transportfiles is None) and hostname:
self.set_hostname(userid, hostname, os_version)
else:
self._smtclient.guest_deploy_rhcos(userid, image_name,
transportfiles, remotehost, vdev, hostname,
skipdiskcopy)
def guest_capture(self, userid, image_name, capture_type='rootonly',
compress_level=6):
LOG.info("Begin to capture vm %(userid), image name is %(name)s",
{'userid': userid, 'name': image_name})
self._smtclient.guest_capture(userid, image_name,
capture_type=capture_type,
compress_level=compress_level)
LOG.info("Complete capture image on vm %s", userid)
def guest_list(self):
return self._smtclient.get_vm_list()
def get_definition_info(self, userid, **kwargs):
check_command = ["nic_coupled"]
direct_info = self._smtclient.get_user_direct(userid)
info = {}
info['user_direct'] = direct_info
for k, v in kwargs.items():
if k in check_command:
if (k == 'nic_coupled'):
info['nic_coupled'] = False
nstr = "NICDEF %s TYPE QDIO LAN SYSTEM" % v
for inf in direct_info:
if nstr in inf:
info['nic_coupled'] = True
break
else:
raise exception.SDKInvalidInputFormat(
msg=("invalid check option for user direct: %s") % k)
return info
def get_console_output(self, userid):
def append_to_log(log_data, log_path):
LOG.debug('log_data: %(log_data)r, log_path: %(log_path)r',
{'log_data': log_data, 'log_path': log_path})
with open(log_path, 'a+') as fp:
fp.write(log_data)
return log_path
LOG.info("Begin to capture console log on vm %s", userid)
log_size = CONF.guest.console_log_size * 1024
console_log = self._smtclient.get_user_console_output(userid)
log_path = self._pathutils.get_console_log_path(userid)
# TODO: need consider shrink log file size
append_to_log(console_log, log_path)
log_fp = open(log_path, 'rb')
try:
log_data, remaining = zvmutils.last_bytes(log_fp, log_size)
log_data = bytes.decode(log_data)
except Exception as err:
msg = ("Failed to truncate console log, error: %s" %
six.text_type(err))
LOG.error(msg)
raise exception.SDKInternalError(msg)
if remaining > 0:
LOG.info('Truncated console log returned, %d bytes ignored' %
remaining)
LOG.info("Complete get console output on vm %s", userid)
return log_data
def check_guests_exist_in_db(self, userids, raise_exc=True):
if not isinstance(userids, list):
# convert userid string to list
userids = [userids]
all_userids = self.guest_list()
userids_not_in_db = list(set(userids) - set(all_userids))
if userids_not_in_db:
if raise_exc:
# log and raise exception
userids_not_in_db = ' '.join(userids_not_in_db)
LOG.error("Guest '%s' does not exist in guests database" %
userids_not_in_db)
raise exception.SDKObjectNotExistError(
obj_desc=("Guest '%s'" % userids_not_in_db), modID='guest')
else:
return False
else:
userids_migrated = self._GuestDbOperator.get_migrated_guest_list()
userids_in_migrated = list(set(userids) & set(userids_migrated))
# case1 userid has been migrated.
if userids_in_migrated:
if raise_exc:
migrated_userids = ' '.join(userids_in_migrated)
LOG.error("Guest(s) '%s' has been migrated." %
migrated_userids)
raise exception.SDKObjectNotExistError(
obj_desc=("Guest(s) '%s'" % migrated_userids),
modID='guest')
else:
return False
flag = True
for uid in userids:
# case2 userid has been shudown and started on other host.
if zvmutils.check_userid_on_others(uid):
flag = False
comment = self._GuestDbOperator.get_comments_by_userid(uid)
comment['migrated'] = 1
action = "update guest '%s' in database" % uid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._GuestDbOperator.update_guest_by_userid(
uid, comments=comment)
return flag
def live_resize_cpus(self, userid, count):
# Check power state is 'on'
state = self.get_power_state(userid)
if state != 'on':
LOG.error("Failed to live resize cpus of guest %s, error: "
"guest is inactive, cann't perform live resize." %
userid)
raise exception.SDKConflictError(modID='guest', rs=1,
userid=userid)
# Do live resize
self._smtclient.live_resize_cpus(userid, count)
LOG.info("Complete live resize cpu on vm %s", userid)
def resize_cpus(self, userid, count):
LOG.info("Begin to resize cpu on vm %s", userid)
# Do resize
self._smtclient.resize_cpus(userid, count)
LOG.info("Complete resize cpu on vm %s", userid)
def live_resize_memory(self, userid, memory):
# Check power state is 'on'
state = self.get_power_state(userid)
if state != 'on':
LOG.error("Failed to live resize memory of guest %s, error: "
"guest is inactive, cann't perform live resize." %
userid)
raise exception.SDKConflictError(modID='guest', rs=1,
userid=userid)
# Do live resize
self._smtclient.live_resize_memory(userid, memory)
LOG.info("Complete live resize memory on vm %s", userid)
def resize_memory(self, userid, memory):
LOG.info("Begin to resize memory on vm %s", userid)
# Do resize
self._smtclient.resize_memory(userid, memory)
LOG.info("Complete resize memory on vm %s", userid) | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmsdk/vmops.py | vmops.py |
import time
from zvmsdk import config
from zvmsdk import constants as const
from zvmsdk import exception
from zvmsdk import log
from zvmsdk import smtclient
from zvmsdk import utils as zvmutils
_HOSTOPS = None
CONF = config.CONF
LOG = log.LOG
def get_hostops():
global _HOSTOPS
if _HOSTOPS is None:
_HOSTOPS = HOSTOps()
return _HOSTOPS
class HOSTOps(object):
def __init__(self):
self._smtclient = smtclient.get_smtclient()
self._volume_infos = {}
self._volumes = None
self.cache_expiration = time.time()
self.disk_pool = None
def get_info(self):
inv_info = self._smtclient.get_host_info()
host_info = {}
with zvmutils.expect_invalid_resp_data(inv_info):
host_info['zcc_userid'] = inv_info['zcc_userid']
host_info['zvm_host'] = inv_info['zvm_host']
host_info['vcpus'] = int(inv_info['lpar_cpu_total'])
host_info['vcpus_used'] = int(inv_info['lpar_cpu_used'])
host_info['cpu_info'] = {}
host_info['cpu_info'] = {'architecture': const.ARCHITECTURE,
'cec_model': inv_info['cec_model'], }
mem_mb = zvmutils.convert_to_mb(inv_info['lpar_memory_total'])
host_info['memory_mb'] = mem_mb
mem_mb_used = zvmutils.convert_to_mb(inv_info['lpar_memory_used'])
host_info['memory_mb_used'] = mem_mb_used
host_info['hypervisor_type'] = const.HYPERVISOR_TYPE
verl = inv_info['hypervisor_os'].split()[1].split('.')
version = int(''.join(verl))
host_info['hypervisor_version'] = version
host_info['hypervisor_hostname'] = inv_info['hypervisor_name']
host_info['ipl_time'] = inv_info['ipl_time']
disk_pool = CONF.zvm.disk_pool
if disk_pool is None:
dp_info = {'disk_total': 0, 'disk_used': 0, 'disk_available': 0}
else:
diskpool_name = disk_pool.split(':')[1]
dp_info = self.diskpool_get_info(diskpool_name)
host_info.update(dp_info)
return host_info
def guest_list(self):
guest_list = self._smtclient.get_all_user_direct()
with zvmutils.expect_invalid_resp_data(guest_list):
# If the z/VM is an SSI cluster member, it could get
# guests on other z/VMs in the same SSI cluster, need
# get rid of these guests.
if self._smtclient.host_get_ssi_info():
new_guest_list = []
for userid in guest_list:
if not zvmutils.check_userid_on_others(userid):
new_guest_list.append(userid)
guest_list = new_guest_list
return guest_list
def _cache_enabled(self):
return CONF.monitor.cache_interval > 0
def diskpool_get_volumes(self, disk_pool):
pool_name = disk_pool.split(':')[1].upper()
if self._cache_enabled():
if (time.time() > self.cache_expiration):
self._volumes = None
if self._volumes:
if disk_pool == self.disk_pool:
return self._volumes
self._volumes = self._smtclient.get_diskpool_volumes(pool_name)
self.cache_expiration = time.time() + \
float(CONF.monitor.cache_interval * 10)
self.disk_pool = disk_pool
return self._volumes
else:
self._volumes = self._smtclient. \
get_diskpool_volumes(pool_name)
self.disk_pool = disk_pool
return self._volumes
def get_volume_info(self, volume_name):
update_needed = False
with zvmutils.expect_invalid_resp_data():
if self._volume_infos is not None:
volume_info = self._volume_infos.get(volume_name)
if not volume_info:
update_needed = True
else:
return volume_info
else:
update_needed = True
if update_needed:
# results of get_volume_info() is the format like:
# {'IAS100': { 'volume_type': '3390-54',
# 'volume_size': '60102'},
# 'IAS101': { 'volume_type': '3390-09',
# 'volume_size': '60102'}}
self._volume_infos = self._smtclient.get_volume_info()
volume_info = self._volume_infos.get(volume_name)
if not volume_info:
msg = ("Not found the volume info for the"
" volume %(volume)s: make sure the volume"
" is in the disk_pool configured for sdkserver.") \
% {'volume': volume_name}
raise exception.ZVMNotFound(msg=msg)
else:
return volume_info
def diskpool_get_info(self, pool):
dp_info = self._smtclient.get_diskpool_info(pool)
with zvmutils.expect_invalid_resp_data(dp_info):
for k in list(dp_info.keys()):
s = dp_info[k].strip().upper()
if s.endswith('G'):
sl = s[:-1].split('.')
n1, n2 = int(sl[0]), int(sl[1])
if n2 >= 5:
n1 += 1
dp_info[k] = n1
elif s.endswith('M'):
n_mb = int(s[:-3])
n_gb, n_ad = n_mb // 1024, n_mb % 1024
if n_ad >= 512:
n_gb += 1
dp_info[k] = n_gb
else:
exp = "ending with a 'G' or 'M'"
errmsg = ("Invalid diskpool size format: %(invalid)s; "
"Expected: %(exp)s") % {'invalid': s, 'exp': exp}
LOG.error(errmsg)
raise exception.SDKInternalError(msg=errmsg)
return dp_info
def host_get_ssi_info(self):
return self._smtclient.host_get_ssi_info() | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmsdk/hostops.py | hostops.py |
import netaddr
import six
import ast
from zvmsdk import config
from zvmsdk import constants
from zvmsdk import exception
from zvmsdk import hostops
from zvmsdk import imageops
from zvmsdk import log
from zvmsdk import monitor
from zvmsdk import networkops
from zvmsdk import vmops
from zvmsdk import smtclient
from zvmsdk import volumeop
from zvmsdk import database
from zvmsdk import utils as zvmutils
CONF = config.CONF
LOG = log.LOG
def check_guest_exist(check_index=0):
"""Check guest exist in database.
:param check_index: The parameter index of userid(s), default as 0
"""
def outer(f):
@six.wraps(f)
def inner(self, *args, **kw):
userids = args[check_index]
if isinstance(userids, list):
# convert all userids to upper case
userids = [uid.upper() for uid in userids]
new_args = (args[:check_index] + (userids,) +
args[check_index + 1:])
else:
# convert the userid to upper case
userids = userids.upper()
new_args = (args[:check_index] + (userids,) +
args[check_index + 1:])
userids = [userids]
self._vmops.check_guests_exist_in_db(userids)
return f(self, *new_args, **kw)
return inner
return outer
def check_fcp_exist(check_index=0):
"""Check FCP exist in database.
:param check_index: The parameter index of fcp, default as 0
"""
def outer(f):
@six.wraps(f)
def inner(self, *args, **kw):
fcp = args[check_index]
self._volumeop.check_fcp_exist_in_db(fcp)
return f(self, *args, **kw)
return inner
return outer
class SDKAPI(object):
"""Compute action interfaces."""
def __init__(self, **kwargs):
self._vmops = vmops.get_vmops()
self._smtclient = smtclient.get_smtclient()
self._hostops = hostops.get_hostops()
self._networkops = networkops.get_networkops()
self._imageops = imageops.get_imageops()
self._monitor = monitor.get_monitor()
self._volumeop = volumeop.get_volumeop()
self._GuestDbOperator = database.GuestDbOperator()
self._NetworkDbOperator = database.NetworkDbOperator()
@check_guest_exist()
def guest_start(self, userid, timeout=0):
"""Power on a virtual machine.
:param str userid: the id of the virtual machine to be power on
:param int timeout: the timeout of waiting virtual machine reachable
default as 0, which mean not wait for virtual
machine reachable status
:returns: None
"""
action = "start guest '%s'" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._vmops.guest_start(userid, timeout)
@check_guest_exist()
def guest_stop(self, userid, **kwargs):
"""Power off a virtual machine.
:param str userid: the id of the virtual machine to be power off
:param dict kwargs:
- timeout=<value>:
Integer, time to wait for vm to be deactivate, the
recommended value is 300
- poll_interval=<value>
Integer, how often to signal guest while waiting for it
to be deactivate, the recommended value is 20
:returns: None
"""
action = "stop guest '%s'" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._vmops.guest_stop(userid, **kwargs)
@check_guest_exist()
def guest_softstop(self, userid, **kwargs):
"""Issue a shutdown command to shutdown the OS in a virtual
machine and then log the virtual machine off z/VM..
:param str userid: the id of the virtual machine to be power off
:param dict kwargs:
- timeout=<value>:
Integer, time to wait for vm to be deactivate, the
recommended value is 300
- poll_interval=<value>
Integer, how often to signal guest while waiting for it
to be deactivate, the recommended value is 20
:returns: None
"""
action = "soft stop guest '%s'" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._vmops.guest_softstop(userid, **kwargs)
@check_guest_exist()
def guest_reboot(self, userid):
"""Reboot a virtual machine
:param str userid: the id of the virtual machine to be reboot
:returns: None
"""
action = "reboot guest '%s'" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._vmops.guest_reboot(userid)
@check_guest_exist()
def guest_reset(self, userid):
"""reset a virtual machine
:param str userid: the id of the virtual machine to be reset
:returns: None
"""
action = "reset guest '%s'" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._vmops.guest_reset(userid)
@check_guest_exist()
def guest_pause(self, userid):
"""Pause a virtual machine.
:param str userid: the id of the virtual machine to be paused
:returns: None
"""
action = "pause guest '%s'" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._vmops.guest_pause(userid)
@check_guest_exist()
def guest_unpause(self, userid):
"""Unpause a virtual machine.
:param str userid: the id of the virtual machine to be unpaused
:returns: None
"""
action = "unpause guest '%s'" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._vmops.guest_unpause(userid)
@check_guest_exist()
def guest_get_power_state(self, userid):
"""Returns power state."""
if not zvmutils.check_userid_exist(userid.upper()):
LOG.error("User directory of '%s' does not exist "
"although it is in DB. The guest could have been "
"deleted out of z/VM Cloud Connector." % userid)
raise exception.SDKObjectNotExistError(
obj_desc=("Guest '%s'" % userid), modID='guest', rs=3)
action = "get power state of guest '%s'" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
return self._vmops.get_power_state(userid)
@check_guest_exist()
def guest_get_info(self, userid):
"""Get the status of a virtual machine.
:param str userid: the id of the virtual machine
:returns: Dictionary contains:
power_state: (str) the running state, one of on | off
max_mem_kb: (int) the maximum memory in KBytes allowed
mem_kb: (int) the memory in KBytes used by the instance
num_cpu: (int) the number of virtual CPUs for the instance
cpu_time_us: (int) the CPU time used in microseconds
"""
action = "get info of guest '%s'" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
return self._vmops.get_info(userid)
def guest_get_power_state_real(self, userid):
"""Returns power state of a virtual machine from hypervisor."""
action = "get power state of guest '%s' from hypervisor" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
return self._vmops.get_power_state(userid)
def guest_get_adapters_info(self, userid):
"""Get the network information of a virtual machine.
this userid may not in zCC.
:param str userid: the id of the virtual machine
:returns: Dictionary contains:
ip: (str) the IP address of the virtual machine
mac: (str) the MAC address of the virtual machine
"""
action = "get network info of guest '%s'" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
return self._vmops.get_adapters_info(userid)
def guest_get_user_direct(self, userid):
"""Get user direct of the specified guest vm
:param str userid: the user id of the guest vm
:returns: Dictionary describing user direct and check info result
:rtype: dict
"""
action = "get the user direct of guest '%s'" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
inst_info = self._vmops.get_definition_info(userid)
user_direct = inst_info['user_direct']
item = -1
new_info = ""
for info in user_direct:
item += 1
# replace password with ******
if info.startswith('USER') or info.startswith('IDENTITY'):
fields = info.split()
for i in range(len(fields)):
if i != 2:
new_info += (fields[i] + ' ')
else:
new_info += ('******' + ' ')
user_direct[item] = new_info
break
inst_info['user_direct'] = user_direct
return inst_info
def guest_list(self):
"""list names of all the VMs on this host.
:returns: names of the vm on this host, in a list.
"""
action = "list guests on host"
with zvmutils.log_and_reraise_sdkbase_error(action):
return self._vmops.guest_list()
def host_get_info(self):
""" Retrieve host information including host, memory, disk etc.
:returns: Dictionary describing resources
"""
action = "get host information"
with zvmutils.log_and_reraise_sdkbase_error(action):
return self._hostops.get_info()
def host_get_diskpool_volumes(self, disk_pool=None):
""" Retrieve diskpool volumes.
:param str disk_pool: the disk pool info. It use ':' to separate
disk pool type and pool name, eg "ECKD:eckdpool" or "FBA:fbapool"
:returns: Dictionary describing disk pool usage info
"""
# disk_pool is optional. disk_pool default to None because
# it is more convenient for users to just type function name when
# they want to get the disk pool info of CONF.zvm.disk_pool.
# The default value of CONF.zvm.disk_pool is None, if it's configured,
# the format must be "ECKD:eckdpool" or "FBA:fbapool".
disk_pool = disk_pool or CONF.zvm.disk_pool
if disk_pool is None:
# Support disk_pool not configured, return empty list
return {}
if ':' not in disk_pool:
msg = ('Invalid input parameter disk_pool, expect ":" in'
'disk_pool, eg. ECKD:eckdpool')
LOG.error(msg)
raise exception.SDKInvalidInputFormat(msg)
diskpool_type = disk_pool.split(':')[0].upper()
if diskpool_type not in ('ECKD', 'FBA'):
msg = ('Invalid disk pool type found in disk_pool, expect'
'disk_pool like ECKD:eckdpool or FBA:fbapool')
LOG.error(msg)
raise exception.SDKInvalidInputFormat(msg)
action = "get the volumes of disk pool: '%s'" % disk_pool
with zvmutils.log_and_reraise_sdkbase_error(action):
return self._hostops.diskpool_get_volumes(disk_pool)
def host_get_volume_info(self, volume=None):
""" Retrieve volume information.
:param str volume: the volume name to identify the DASD device.
It's 1 to 6 hexadecimal characters.
:returns: Dictionary describing the volume description info
"""
volume_name = volume
if volume_name is None:
errmsg = ("Invalid volume input None, volume must be specified.")
LOG.error(errmsg)
raise exception.SDKInvalidInputFormat(msg=errmsg)
action = "get information of the volume: '%s'" % volume_name
with zvmutils.log_and_reraise_sdkbase_error(action):
return self._hostops.get_volume_info(volume_name.upper())
def host_get_guest_list(self):
"""list names of all the VMs on the host.
:returns: names of the vm on this hypervisor, in a list.
"""
action = "list guests on the host"
with zvmutils.log_and_reraise_sdkbase_error(action):
return self._hostops.guest_list()
def host_diskpool_get_info(self, disk_pool=None):
""" Retrieve diskpool information.
:param str disk_pool: the disk pool info. It use ':' to separate
disk pool type and pool name, eg "ECKD:eckdpool" or "FBA:fbapool"
:returns: Dictionary describing disk pool usage info
"""
# disk_pool is optional. disk_pool default to None because
# it is more convenient for users to just type function name when
# they want to get the disk pool info of CONF.zvm.disk_pool.
# The default value of CONF.zvm.disk_pool is None, if it's configured,
# the format must be "ECKD:eckdpool" or "FBA:fbapool".
disk_pool = disk_pool or CONF.zvm.disk_pool
if disk_pool is None:
# Return 0 directly if disk_pool not configured
return {'disk_total': 0, 'disk_used': 0, 'disk_available': 0}
if ':' not in disk_pool:
msg = ('Invalid input parameter disk_pool, expect ":" in'
'disk_pool, eg. ECKD:eckdpool')
LOG.error(msg)
raise exception.SDKInvalidInputFormat(msg)
diskpool_type = disk_pool.split(':')[0].upper()
diskpool_name = disk_pool.split(':')[1]
if diskpool_type not in ('ECKD', 'FBA'):
msg = ('Invalid disk pool type found in disk_pool, expect'
'disk_pool like ECKD:eckdpool or FBA:fbapool')
LOG.error(msg)
raise exception.SDKInvalidInputFormat(msg)
action = "get information of disk pool: '%s'" % disk_pool
with zvmutils.log_and_reraise_sdkbase_error(action):
return self._hostops.diskpool_get_info(diskpool_name)
def image_delete(self, image_name):
"""Delete image from image repository
:param image_name: the name of the image to be deleted
"""
try:
self._imageops.image_delete(image_name)
except exception.SDKBaseException:
LOG.error("Failed to delete image '%s'" % image_name)
raise
def image_get_root_disk_size(self, image_name):
"""Get the root disk size of the image
:param image_name: the image name in image Repository
:returns: the disk size in units CYL or BLK
"""
try:
return self._imageops.image_get_root_disk_size(image_name)
except exception.SDKBaseException:
LOG.error("Failed to get root disk size units of image '%s'" %
image_name)
raise
def image_import(self, image_name, url, image_meta, remote_host=None):
"""Import image to zvmsdk image repository
:param image_name: image name that can be uniquely identify an image
:param str url: image url to specify the location of image such as
http://netloc/path/to/file.tar.gz.0
https://netloc/path/to/file.tar.gz.0
file:///path/to/file.tar.gz.0
:param dict image_meta:
a dictionary to describe the image info, such as md5sum,
os_version. For example:
{'os_version': 'rhel6.2',
'md5sum': ' 46f199c336eab1e35a72fa6b5f6f11f5',
'disk_type': 'DASD'}
:param string remote_host:
if the image url schema is file, the remote_host is used to
indicate where the image comes from, the format is username@IP
eg. [email protected], the default value is None, it indicate
the image is from a local file system. If the image url schema
is http/https, this value will be useless
"""
try:
self._imageops.image_import(image_name, url, image_meta,
remote_host=remote_host)
except exception.SDKBaseException:
LOG.error("Failed to import image '%s'" % image_name)
raise
def image_query(self, imagename=None):
"""Get the list of image info in image repository
:param imagename: Used to retrieve the specified image info,
if not specified, all images info will be returned
:returns: A list that contains the specified or all images info
"""
try:
return self._imageops.image_query(imagename)
except exception.SDKBaseException:
LOG.error("Failed to query image")
raise
def image_export(self, image_name, dest_url, remote_host=None):
"""Export the image to the specified location
:param image_name: image name that can be uniquely identify an image
:param dest_url: the location of exported image, eg.
file:///opt/images/export.img, now only support export to remote server
or local server's file system
:param remote_host: the server that the image will be export to, if
remote_host is None, the image will be stored in the dest_path in
local server, the format is username@IP eg. [email protected]
:returns a dictionary that contains the exported image info
{
'image_name': the image_name that exported
'image_path': the image_path after exported
'os_version': the os version of the exported image
'md5sum': the md5sum of the original image
'comments': the comments of the original image
}
"""
try:
return self._imageops.image_export(image_name, dest_url,
remote_host)
except exception.SDKBaseException:
LOG.error("Failed to export image '%s'" % image_name)
raise
@check_guest_exist()
def guest_deploy(self, userid, image_name, transportfiles=None,
remotehost=None, vdev=None, hostname=None,
skipdiskcopy=False):
""" Deploy the image to vm.
:param userid: (str) the user id of the vm
:param image_name: (str) If the skipdiskcopy is False, this would be
used as the name of image that used to deploy the vm;
Otherwise, this value should be the os version.
:param transportfiles: (str) the files that used to customize the vm
:param remotehost: the server where the transportfiles located, the
format is username@IP, eg [email protected]
:param vdev: (str) the device that image will be deploy to
:param hostname: (str) the hostname of the vm. This parameter will be
ignored if transportfiles present.
:param skipdiskcopy: (bool) whether to skip the disk copy process.
If True, the os version should be specified in the parameter
image_name.
"""
action = ("deploy image '%(img)s' to guest '%(vm)s'" %
{'img': image_name, 'vm': userid})
with zvmutils.log_and_reraise_sdkbase_error(action):
self._vmops.guest_deploy(userid, image_name, transportfiles,
remotehost, vdev, hostname, skipdiskcopy)
@check_guest_exist()
def guest_capture(self, userid, image_name, capture_type='rootonly',
compress_level=6):
""" Capture the guest to generate a image
:param userid: (str) the user id of the vm
:param image_name: (str) the unique image name after capture
:param capture_type: (str) the type of capture, the value can be:
rootonly: indicate just root device will be captured
alldisks: indicate all the devices of the userid will be
captured
:param compress_level: the compression level of the image, default
is 6
"""
action = ("capture guest '%(vm)s' to generate image '%(img)s'" %
{'vm': userid, 'img': image_name})
with zvmutils.log_and_reraise_sdkbase_error(action):
self._vmops.guest_capture(userid, image_name,
capture_type=capture_type,
compress_level=compress_level)
@check_guest_exist()
def guest_create_nic(self, userid, vdev=None, nic_id=None,
mac_addr=None, active=False):
""" Create the nic for the vm, add NICDEF record into the user direct.
:param str userid: the user id of the vm
:param str vdev: nic device number, 1- to 4- hexadecimal digits
:param str nic_id: nic identifier
:param str mac_addr: mac address, it is only be used when changing
the guest's user direct. Format should be xx:xx:xx:xx:xx:xx,
and x is a hexadecimal digit
:param bool active: whether add a nic on active guest system
:returns: nic device number, 1- to 4- hexadecimal digits
:rtype: str
"""
if mac_addr is not None:
if not zvmutils.valid_mac_addr(mac_addr):
raise exception.SDKInvalidInputFormat(
msg=("Invalid mac address, format should be "
"xx:xx:xx:xx:xx:xx, and x is a hexadecimal digit"))
return self._networkops.create_nic(userid, vdev=vdev, nic_id=nic_id,
mac_addr=mac_addr, active=active)
@check_guest_exist()
def guest_delete_nic(self, userid, vdev, active=False):
""" delete the nic for the vm
:param str userid: the user id of the vm
:param str vdev: nic device number, 1- to 4- hexadecimal digits
:param bool active: whether delete a nic on active guest system
"""
self._networkops.delete_nic(userid, vdev, active=active)
@check_guest_exist()
def guest_get_definition_info(self, userid, **kwargs):
"""Get definition info for the specified guest vm, also could be used
to check specific info.
:param str userid: the user id of the guest vm
:param dict kwargs: Dictionary used to check specific info in user
direct. Valid keywords for kwargs:
nic_coupled=<vdev>, where <vdev> is the virtual
device number of the nic to be checked the couple
status.
:returns: Dictionary describing user direct and check info result
:rtype: dict
"""
action = "get the definition info of guest '%s'" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
return self._vmops.get_definition_info(userid, **kwargs)
"""Parse the nics' info from the user directory
:param user_direct: (str) the user directory info to be parsed
"""
def _parse_nic_info(self, user_direct):
nics_info = {}
for nic_info in user_direct:
if nic_info.startswith('NICDEF'):
split_info = nic_info.split()
nic_id = split_info[1].strip()
count = 2
one_nic = nics_info.get(nic_id, {})
while count < len(split_info):
if split_info[count] == 'LAN':
one_nic['vswitch'] = split_info[count + 2].strip()
count += 3
continue
elif split_info[count] == 'MACID':
one_nic['mac'] = split_info[count + 1].strip()
count += 2
continue
elif split_info[count] == 'VLAN':
one_nic['vid'] = split_info[count + 1].strip()
count += 2
continue
else:
count += 1
nics_info[nic_id] = one_nic
return nics_info
def guest_register(self, userid, meta, net_set, port_macs=None):
"""Register vm by inserting or updating DB for e.g. migration and onboarding
:param userid: (str) the userid of the vm to be relocated or tested
:param meta: (str) the metadata of the vm to be relocated or tested
:param net_set: (str) the net_set of the vm, default is 1.
:param port_macs: (dir) the virtual interface port id maps with mac id
Format: { macid1 : portid1, macid2 : portid2}.
For example,
{
'EF5091':'6e2ecc4f-14a2-4f33-9f12-5ac4a42f97e7',
'69FCF1':'389dee5e-7b03-405c-b1e8-7c9c235d1425'
}
"""
if port_macs is not None and not isinstance(port_macs, dict):
msg = ('Invalid input parameter port_macs, expect dict')
LOG.error(msg)
raise exception.SDKInvalidInputFormat(msg)
userid = userid.upper()
if not zvmutils.check_userid_exist(userid):
LOG.error("User directory '%s' does not exist." % userid)
raise exception.SDKObjectNotExistError(
obj_desc=("Guest '%s'" % userid), modID='guest')
else:
action = "query the guest in database."
with zvmutils.log_and_reraise_sdkbase_error(action):
guest = self._GuestDbOperator.get_guest_by_userid(userid)
if guest is not None:
# The below handling is for migration
action = "list all guests in database which has been migrated."
with zvmutils.log_and_reraise_sdkbase_error(action):
guests = self._GuestDbOperator.get_migrated_guest_list()
if userid in str(guests):
"""change comments for vm"""
comments = self._GuestDbOperator.get_comments_by_userid(
userid)
comments['migrated'] = 0
action = "update guest '%s' in database" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._GuestDbOperator.update_guest_by_userid(userid,
comments=comments)
LOG.info("Guest %s comments updated." % userid)
# We just return no matter onboarding or migration
# since the guest exists
return
# add one record for new vm for both onboarding and migration,
# and even others later.
action = "add guest '%s' to database" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._GuestDbOperator.add_guest_registered(userid, meta,
net_set)
# We need to query and add vswitch to the database.
action = "add switches of guest '%s' to database" % userid
# The result of get_adpaters_info
# [{'adapter_address': '1000', 'adapter_status': '02',
# 'lan_owner': 'SYSTEM', 'lan_name': 'VSC11590',
# 'mac_address': '02:55:36:00:00:10', 'mac_ip_version': '4',
# 'mac_ip_address': '9.152.85.95'}]
adapters_info = self._smtclient.get_adapters_info(userid)
for adapter in adapters_info:
interface = adapter.get('adapter_address')
switch = adapter.get('lan_name')
port = None
if port_macs is not None:
if adapter.get('mac_address'):
mac = ''.join(
adapter.get('mac_address').split(':'))[6:].upper()
if mac in port_macs.keys():
port = port_macs[mac]
if port is None:
LOG.warning("Port not found for nic %s, %s." %
(interface, port_macs))
else:
LOG.info("Port found for nic %s." % interface)
with zvmutils.log_and_reraise_sdkbase_error(action):
self._NetworkDbOperator.switch_add_record(
userid, interface, port, switch)
LOG.info("Guest %s registered." % userid)
# Deregister the guest (not delete), this function has no relationship with
# migration.
def guest_deregister(self, userid):
"""DB operation for deregister vm for offboard (dismiss) request.
:param userid: (str) the userid of the vm to be deregistered
"""
userid = userid.upper()
# We don't check if the VM exists in the LPAR or zCC DB, just delete it
# from DB anyway, cause there could be the case that the VM is deleted
# outside of zCC e.g. smcli, and the DB record is still there.
if not self._vmops.check_guests_exist_in_db(userid, raise_exc=False):
LOG.warning("User directory '%s' does not exist in guest DB."
"But let's still delete it as there is also switch"
" table" % userid)
action = "delete switches of guest '%s' from database" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._NetworkDbOperator.switch_delete_record_for_userid(userid)
action = "delete guest '%s' from database" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._GuestDbOperator.delete_guest_by_userid(userid)
LOG.info("Guest %s deregistered." % userid)
@check_guest_exist()
def guest_live_migrate(self, userid, dest_zcc_userid, destination,
parms, lgr_action):
"""Move an eligible, running z/VM(R) virtual machine transparently
from one z/VM system to another within an SSI cluster.
:param userid: (str) the userid of the vm to be relocated or tested
:param dest_zcc_userid: (str) the userid of zcc on destination.
If None, no any userid is set into the guest.
:param destination: (str) the system ID of the z/VM system to which
the specified vm will be relocated or tested.
:param parms: (dict) a dictionary of options for relocation.
It has one dictionary that contains some of the below keys:
{'maxtotal': i,
'maxquiesce': i,
'immediate': str}
In which, 'maxtotal':indicates the maximum total time
(in seconds)
that the command issuer is willing to
wait for the entire relocation
to complete or -1 to indicate there is no limit for time.
'maxquiesce':indicates the maximum quiesce time
for this relocation.
This is the amount of time (in seconds)
a virtual machine may be stopped
during a relocation attempt or -1 to indicate
there is no limit for time.
'immediate':If present, immediate=YES is set,
which causes the VMRELOCATE command
to do one early pass through virtual machine storage
and then go directly to the quiesce stage.
:param lgr_action: (str) indicates the action is move or test for vm.
"""
if lgr_action.lower() == 'move':
if dest_zcc_userid is None or dest_zcc_userid.strip() == '':
msg = "dest_zcc_userid is empty so it will not be set " \
"during LGR."
LOG.info(msg)
# Live_migrate the guest
operation = "Move guest '%s' to SSI '%s'" % (userid, destination)
with zvmutils.log_and_reraise_sdkbase_error(operation):
self._vmops.live_migrate_vm(userid, destination,
parms, lgr_action)
comments = self._GuestDbOperator.get_comments_by_userid(userid)
comments['migrated'] = 1
action = "update guest '%s' in database" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._GuestDbOperator.update_guest_by_userid(userid,
comments=comments)
# Skip IUCV authorization for RHCOS guests
is_rhcos = 'rhcos' in self._GuestDbOperator.get_guest_by_userid(
userid)[2].lower()
if is_rhcos:
LOG.debug("Skip IUCV authorization when migrating RHCOS "
"guests: %s" % userid)
# Add authorization for new zcc.
# This should be done after migration succeeds.
# If the dest_zcc_userid is empty, nothing will be done because
# this should be a onboarded guest and no permission to do it.
if (dest_zcc_userid is not None and
dest_zcc_userid.strip() != '' and
not is_rhcos):
cmd = ('echo -n %s > /etc/iucv_authorized_userid\n' %
dest_zcc_userid)
rc = self._smtclient.execute_cmd(userid, cmd)
if rc != 0:
err_msg = ("Add authorization for new zcc failed")
LOG.error(err_msg)
if lgr_action.lower() == 'test':
operation = "Test move guest '%s' to SSI '%s'" % (userid,
destination)
with zvmutils.log_and_reraise_sdkbase_error(operation):
self._vmops.live_migrate_vm(userid, destination,
parms, lgr_action)
def guest_create(self, userid, vcpus, memory, disk_list=None,
user_profile='',
max_cpu=CONF.zvm.user_default_max_cpu,
max_mem=CONF.zvm.user_default_max_memory,
ipl_from='', ipl_param='', ipl_loadparam='',
dedicate_vdevs=None, loaddev={}, account='',
comment_list=None, cschedule='', cshare='',
rdomain='', pcif=''):
"""create a vm in z/VM
:param userid: (str) the userid of the vm to be created
:param vcpus: (int) amount of vcpus
:param memory: (int) size of memory in MB
:param disk_list: (dict) a list of disks info for the guest.
It has one dictionary that contain some of the below keys for
each disk, the root disk should be the first element in the
list, the format is:
{'size': str,
'format': str,
'is_boot_disk': bool,
'disk_pool': str}
In which, 'size': case insensitive, the unit can be in
Megabytes (M), Gigabytes (G), or number of cylinders/blocks, eg
512M, 1g or just 2000.
'format': can be ext2, ext3, ext4, xfs and none.
'is_boot_disk': For root disk, this key must be set to indicate
the image that will be deployed on this disk.
'disk_pool': optional, if not specified, the disk will be
created by using the value from configure file,the format is
ECKD:eckdpoolname or FBA:fbapoolname.
For example:
[{'size': '1g',
'is_boot_disk': True,
'disk_pool': 'ECKD:eckdpool1'},
{'size': '200000',
'disk_pool': 'FBA:fbapool1',
'format': 'ext3'},
{'size': '1g',
'format': 'ext3'}]
In this case it will create one disk 0100(in case the vdev
for root disk is 0100) with size 1g from ECKD disk pool
eckdpool1 for guest , then set IPL 0100 in guest's user
directory, and it will create 0101 with 200000 blocks from
FBA disk pool fbapool1, and formated with ext3. As for the third
case, if the disk_pool isn't configured in configure file, the
default value is None, the disk_pool here is None, report error.
If it's configured, such as ECKD:eckdpool2, it will
create 0102 with size 1g from ECKD diskpool eckdpool2 for guest.
:param user_profile: (str) the profile for the guest
:param max_cpu: (int) the maximum number of virtual cpu this user can
define. The value should be a decimal value between 1 and 64.
:param max_mem: (str) the maximum size of memory the user can define.
The value should be specified by 1-4 bits of number suffixed by
either M (Megabytes) or G (Gigabytes). And the number should be
an integer.
:param ipl_from: (str) where to ipl the guest from, it can be given
by guest input param, e.g CMS.
:param ipl_param: the param to use when IPL for as PARM
:param ipl_loadparam: the param to use when IPL for as LOADPARM
:param dedicate_vdevs: (list) the list of device vdevs to dedicate to
the guest.
:param loaddev: (dict) the loaddev parms to add in the guest directory.
Current supported key includes: 'portname', 'lun'.
Both the 'portname' and 'lun' can specify only one one- to
eight-byte hexadecimal value in the range of 0-FFFFFFFFFFFFFFFF
The format should be:
{'portname': str,
'lun': str}
:param account: (str) account string, see
https://www.ibm.com/docs/en/zvm/6.4?topic=SSB27U_6.4.0/
com.ibm.zvm.v640.hcpa5/daccoun.htm#daccoun
:param comment_list: (array) a list of comment string
:param cschedule: a command input for schedule cpu pool
:param cshare: a command input for share settings
:param rdomain: a command input for relocation domain
:param pcif: a command input for pci function
"""
dedicate_vdevs = dedicate_vdevs or []
userid = userid.upper()
if disk_list:
# special case for swap disk, for boot from volume, might add swap
# disk but not disk pool given, then we use vdisk instead
swap_only = False
if len(disk_list) == 1:
disk = disk_list[0]
if 'format' in disk and disk['format'].lower() == 'swap':
swap_only = True
for disk in disk_list:
if not isinstance(disk, dict):
errmsg = ('Invalid "disk_list" input, it should be a '
'dictionary. Details could be found in doc.')
LOG.error(errmsg)
raise exception.SDKInvalidInputFormat(msg=errmsg)
# 'size' is required for each disk
if 'size' not in disk.keys():
errmsg = ('Invalid "disk_list" input, "size" is required '
'for each disk.')
LOG.error(errmsg)
raise exception.SDKInvalidInputFormat(msg=errmsg)
# check disk_pool
disk_pool = disk.get('disk_pool') or CONF.zvm.disk_pool
if not swap_only:
if disk_pool is None:
errmsg = ("Invalid disk_pool input, disk_pool should"
" be configured for sdkserver.")
LOG.error(errmsg)
raise exception.SDKInvalidInputFormat(msg=errmsg)
# 'disk_pool' format check
if ':' not in disk_pool or (disk_pool.split(':')[0].upper()
not in ['ECKD', 'FBA']):
errmsg = ("Invalid disk_pool input, its format must be"
" ECKD:eckdpoolname or FBA:fbapoolname")
LOG.error(errmsg)
raise exception.SDKInvalidInputFormat(msg=errmsg)
else:
# in this case, it's swap only, and we will check whether
# no VDISK is allowed, if not allow, then return error
if disk_pool is None and CONF.zvm.swap_force_mdisk:
errmsg = ("Invalid disk_pool input, disk_pool should"
" be configured for sdkserver and use"
" VDISK as swap disk is not configured."
" check CONF.zvm.swap_force_mdisk for"
" additional information.")
LOG.error(errmsg)
raise exception.SDKInvalidInputFormat(msg=errmsg)
# 'format' value check
if ('format' in disk.keys()) and (disk['format'].lower() not in
('ext2', 'ext3', 'ext4',
'swap', 'xfs', 'none')):
errmsg = ("Invalid disk_pool input, supported 'format' "
"includes 'ext2', 'ext3', 'ext4', 'xfs', "
"'swap', 'none'")
LOG.error(errmsg)
raise exception.SDKInvalidInputFormat(msg=errmsg)
if dedicate_vdevs and not isinstance(dedicate_vdevs, list):
errmsg = ('Invalid "dedicate_vdevs" input, it should be a '
'list. Details could be found in doc.')
LOG.error(errmsg)
raise exception.SDKInvalidInputFormat(msg=errmsg)
if loaddev and not isinstance(loaddev, dict):
errmsg = ('Invalid "loaddev" input, it should be a '
'dictionary. Details could be found in doc.')
LOG.error(errmsg)
raise exception.SDKInvalidInputFormat(msg=errmsg)
if not user_profile or len(user_profile) == 0:
user_profile = CONF.zvm.user_profile
action = "create guest '%s'" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
return self._vmops.create_vm(userid, vcpus, memory, disk_list,
user_profile, max_cpu, max_mem,
ipl_from, ipl_param, ipl_loadparam,
dedicate_vdevs, loaddev, account,
comment_list, cschedule, cshare,
rdomain, pcif)
@check_guest_exist()
def guest_live_resize_cpus(self, userid, cpu_cnt):
"""Live resize virtual cpus of guests.
:param userid: (str) the userid of the guest to be live resized
:param cpu_cnt: (int) The number of virtual cpus that the guest should
have in active state after live resize. The value should be an
integer between 1 and 64.
"""
action = "live resize guest '%s' to have '%i' virtual cpus" % (userid,
cpu_cnt)
LOG.info("Begin to %s" % action)
with zvmutils.log_and_reraise_sdkbase_error(action):
self._vmops.live_resize_cpus(userid, cpu_cnt)
LOG.info("%s successfully." % action)
@check_guest_exist()
def guest_resize_cpus(self, userid, cpu_cnt):
"""Resize virtual cpus of guests.
:param userid: (str) the userid of the guest to be resized
:param cpu_cnt: (int) The number of virtual cpus that the guest should
have defined in user directory after resize. The value should
be an integer between 1 and 64.
"""
action = "resize guest '%s' to have '%i' virtual cpus" % (userid,
cpu_cnt)
LOG.info("Begin to %s" % action)
with zvmutils.log_and_reraise_sdkbase_error(action):
self._vmops.resize_cpus(userid, cpu_cnt)
LOG.info("%s successfully." % action)
@check_guest_exist()
def guest_live_resize_mem(self, userid, size):
"""Live resize memory of guests.
:param userid: (str) the userid of the guest to be live resized
:param size: (str) The memory size that the guest should have
in available status after live resize.
The value should be specified by 1-4 bits of number suffixed by
either M (Megabytes) or G (Gigabytes). And the number should be
an integer.
"""
action = "live resize guest '%s' to have '%s' memory" % (userid,
size)
LOG.info("Begin to %s" % action)
with zvmutils.log_and_reraise_sdkbase_error(action):
self._vmops.live_resize_memory(userid, size)
LOG.info("%s successfully." % action)
@check_guest_exist()
def guest_resize_mem(self, userid, size):
"""Resize memory of guests.
:param userid: (str) the userid of the guest to be resized
:param size: (str) The memory size that the guest should have
defined in user directory after resize.
The value should be specified by 1-4 bits of number suffixed by
either M (Megabytes) or G (Gigabytes). And the number should be
an integer.
"""
action = "resize guest '%s' to have '%s' memory" % (userid, size)
LOG.info("Begin to %s" % action)
with zvmutils.log_and_reraise_sdkbase_error(action):
self._vmops.resize_memory(userid, size)
LOG.info("%s successfully." % action)
@check_guest_exist()
def guest_create_disks(self, userid, disk_list):
"""Add disks to an existing guest vm.
:param userid: (str) the userid of the vm to be created
:param disk_list: (list) a list of disks info for the guest.
It has one dictionary that contain some of the below keys for
each disk, the root disk should be the first element in the
list, the format is:
{'size': str,
'format': str,
'is_boot_disk': bool,
'disk_pool': str}
In which, 'size': case insensitive, the unit can be in
Megabytes (M), Gigabytes (G), or number of cylinders/blocks, eg
512M, 1g or just 2000.
'format': optional, can be ext2, ext3, ext4, xfs, if not
specified, the disk will not be formatted.
'is_boot_disk': For root disk, this key must be set to indicate
the image that will be deployed on this disk.
'disk_pool': optional, if not specified, the disk will be
created by using the value from configure file,the format is
ECKD:eckdpoolname or FBA:fbapoolname.
For example:
[{'size': '1g',
'is_boot_disk': True,
'disk_pool': 'ECKD:eckdpool1'},
{'size': '200000',
'disk_pool': 'FBA:fbapool1',
'format': 'ext3'},
{'size': '1g',
'format': 'ext3'}]
In this case it will create one disk 0100(in case the vdev
for root disk is 0100) with size 1g from ECKD disk pool
eckdpool1 for guest , then set IPL 0100 in guest's user
directory, and it will create 0101 with 200000 blocks from
FBA disk pool fbapool1, and formated with ext3. As for the third
case, if the disk_pool isn't configured in configure file, the
default value is None, the disk_pool here is None, report error.
If it's configured, such as ECKD:eckdpool2, it will
create 0102 with size 1g from ECKD diskpool eckdpool2 for guest.
"""
if disk_list == [] or disk_list is None:
# nothing to do
LOG.debug("No disk specified when calling guest_create_disks, "
"nothing happened")
return
for disk in disk_list:
if not isinstance(disk, dict):
errmsg = ('Invalid "disk_list" input, it should be a '
'dictionary. Details could be found in doc.')
LOG.error(errmsg)
raise exception.SDKInvalidInputFormat(msg=errmsg)
# check disk_pool
disk_pool = disk.get('disk_pool') or CONF.zvm.disk_pool
if disk_pool is None:
errmsg = ("Invalid disk_pool input, it should be configured"
" for sdkserver.")
LOG.error(errmsg)
raise exception.SDKInvalidInputFormat(msg=errmsg)
action = "create disks '%s' for guest '%s'" % (str(disk_list), userid)
with zvmutils.log_and_reraise_sdkbase_error(action):
return self._vmops.create_disks(userid, disk_list)
@check_guest_exist()
def guest_delete_disks(self, userid, disk_vdev_list):
"""Delete disks from an existing guest vm.
:param userid: (str) the userid of the vm to be deleted
:param disk_vdev_list: (list) the vdev list of disks to be deleted,
for example: ['0101', '0102']
"""
action = "delete disks '%s' from guest '%s'" % (str(disk_vdev_list),
userid)
with zvmutils.log_and_reraise_sdkbase_error(action):
self._vmops.delete_disks(userid, disk_vdev_list)
@check_guest_exist()
def guest_nic_couple_to_vswitch(self, userid, nic_vdev,
vswitch_name, active=False, vlan_id=-1):
""" Couple nic device to specified vswitch.
:param str userid: the user's name who owns the nic
:param str nic_vdev: nic device number, 1- to 4- hexadecimal digits
:param str vswitch_name: the name of the vswitch
:param bool active: whether make the change on active guest system
:param str vlan_id: the VLAN ID of the NIC
"""
self._networkops.couple_nic_to_vswitch(userid, nic_vdev,
vswitch_name, active=active,
vlan_id=vlan_id)
@check_guest_exist()
def guest_nic_uncouple_from_vswitch(self, userid, nic_vdev,
active=False):
""" Disonnect nic device with network.
:param str userid: the user's name who owns the nic
:param str nic_vdev: nic device number, 1- to 4- hexadecimal digits
:param bool active: whether make the change on active guest system
"""
self._networkops.uncouple_nic_from_vswitch(userid, nic_vdev,
active=active)
def vswitch_get_list(self):
""" Get the vswitch list.
:returns: vswitch name list
:rtype: list
"""
return self._networkops.get_vswitch_list()
def vswitch_create(self, name, rdev=None, controller='*',
connection='CONNECT', network_type='ETHERNET',
router="NONROUTER", vid='UNAWARE', port_type='ACCESS',
gvrp='GVRP', queue_mem=8, native_vid=1,
persist=True):
""" Create vswitch.
:param str name: the vswitch name
:param str rdev: the real device number, a maximum of three devices,
all 1-4 characters in length, delimited by blanks. 'NONE'
may also be specified
:param str controller: the vswitch's controller, it could be the userid
controlling the real device, or '*' to specifies that any
available controller may be used
:param str connection:
- CONnect:
Activate the real device connection.
- DISCONnect:
Do not activate the real device connection.
- NOUPLINK:
The vswitch will never have connectivity through
the UPLINK port
:param str network_type: Specifies the transport mechanism to be used
for the vswitch, as follows: IP, ETHERNET
:param str router:
- NONrouter:
The OSA-Express device identified in
real_device_address= will not act as a router to the
vswitch
- PRIrouter:
The OSA-Express device identified in
real_device_address= will act as a primary router to the
vswitch
- Note: If the network_type is ETHERNET, this value must be
unspecified, otherwise, if this value is unspecified, default
is NONROUTER
:param str/int vid: the VLAN ID. This can be any of the following
values: UNAWARE, AWARE or 1-4094
:param str port_type:
- ACCESS:
The default porttype attribute for
guests authorized for the virtual switch.
The guest is unaware of VLAN IDs and sends and
receives only untagged traffic
- TRUNK:
The default porttype attribute for
guests authorized for the virtual switch.
The guest is VLAN aware and sends and receives tagged
traffic for those VLANs to which the guest is authorized.
If the guest is also authorized to the natvid, untagged
traffic sent or received by the guest is associated with
the native VLAN ID (natvid) of the virtual switch.
:param str gvrp:
- GVRP:
Indicates that the VLAN IDs in use on the virtual
switch should be registered with GVRP-aware switches on the
LAN. This provides dynamic VLAN registration and VLAN
registration removal for networking switches. This
eliminates the need to manually configure the individual
port VLAN assignments.
- NOGVRP:
Do not register VLAN IDs with GVRP-aware switches on
the LAN. When NOGVRP is specified VLAN port assignments
must be configured manually
:param int queue_mem: A number between 1 and 8, specifying the QDIO
buffer size in megabytes.
:param int native_vid: the native vlan id, 1-4094 or None
:param bool persist: whether create the vswitch in the permanent
configuration for the system
"""
if ((queue_mem < 1) or (queue_mem > 8)):
errmsg = ('API vswitch_create: Invalid "queue_mem" input, '
'it should be 1-8')
raise exception.SDKInvalidInputFormat(msg=errmsg)
if isinstance(vid, int) or vid.upper() != 'UNAWARE':
if ((native_vid is not None) and
((native_vid < 1) or (native_vid > 4094))):
errmsg = ('API vswitch_create: Invalid "native_vid" input, '
'it should be 1-4094 or None')
raise exception.SDKInvalidInputFormat(msg=errmsg)
if network_type.upper() == 'ETHERNET':
router = None
self._networkops.add_vswitch(name, rdev=rdev, controller=controller,
connection=connection,
network_type=network_type,
router=router, vid=vid,
port_type=port_type, gvrp=gvrp,
queue_mem=queue_mem,
native_vid=native_vid,
persist=persist)
@check_guest_exist()
def guest_get_console_output(self, userid):
"""Get the console output of the guest virtual machine.
:param str userid: the user id of the vm
:returns: console log string
:rtype: str
"""
action = "get the console output of guest '%s'" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
output = self._vmops.get_console_output(userid)
return output
def guest_delete(self, userid):
"""Delete guest.
:param userid: the user id of the vm
"""
# check guest exist in database or not
userid = userid.upper()
if not self._vmops.check_guests_exist_in_db(userid, raise_exc=False):
if zvmutils.check_userid_exist(userid):
LOG.error("Guest '%s' does not exist in guests database" %
userid)
raise exception.SDKObjectNotExistError(
obj_desc=("Guest '%s'" % userid), modID='guest')
else:
LOG.debug("The guest %s does not exist." % userid)
return
action = "delete guest '%s'" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
return self._vmops.delete_vm(userid)
@check_guest_exist()
def guest_inspect_stats(self, userid_list):
"""Get the statistics including cpu and mem of the guests
:param userid_list: a single userid string or a list of guest userids
:returns: dictionary describing the cpu statistics of the vm
in the form {'UID1':
{
'guest_cpus': xx,
'used_cpu_time_us': xx,
'elapsed_cpu_time_us': xx,
'min_cpu_count': xx,
'max_cpu_limit': xx,
'samples_cpu_in_use': xx,
'samples_cpu_delay': xx,
'used_mem_kb': xx,
'max_mem_kb': xx,
'min_mem_kb': xx,
'shared_mem_kb': xx
},
'UID2':
{
'guest_cpus': xx,
'used_cpu_time_us': xx,
'elapsed_cpu_time_us': xx,
'min_cpu_count': xx,
'max_cpu_limit': xx,
'samples_cpu_in_use': xx,
'samples_cpu_delay': xx,
'used_mem_kb': xx,
'max_mem_kb': xx,
'min_mem_kb': xx,
'shared_mem_kb': xx
}
}
for the guests that are shutdown or not exist, no data
returned in the dictionary
"""
if not isinstance(userid_list, list):
userid_list = [userid_list]
action = "get the statistics of guest '%s'" % str(userid_list)
with zvmutils.log_and_reraise_sdkbase_error(action):
return self._monitor.inspect_stats(userid_list)
@check_guest_exist()
def guest_inspect_vnics(self, userid_list):
"""Get the vnics statistics of the guest virtual machines
:param userid_list: a single userid string or a list of guest userids
:returns: dictionary describing the vnics statistics of the vm
in the form
{'UID1':
[{
'vswitch_name': xx,
'nic_vdev': xx,
'nic_fr_rx': xx,
'nic_fr_tx': xx,
'nic_fr_rx_dsc': xx,
'nic_fr_tx_dsc': xx,
'nic_fr_rx_err': xx,
'nic_fr_tx_err': xx,
'nic_rx': xx,
'nic_tx': xx
},
],
'UID2':
[{
'vswitch_name': xx,
'nic_vdev': xx,
'nic_fr_rx': xx,
'nic_fr_tx': xx,
'nic_fr_rx_dsc': xx,
'nic_fr_tx_dsc': xx,
'nic_fr_rx_err': xx,
'nic_fr_tx_err': xx,
'nic_rx': xx,
'nic_tx': xx
},
]
}
for the guests that are shutdown or not exist, no data
returned in the dictionary
"""
if not isinstance(userid_list, list):
userid_list = [userid_list]
action = "get the vnics statistics of guest '%s'" % str(userid_list)
with zvmutils.log_and_reraise_sdkbase_error(action):
return self._monitor.inspect_vnics(userid_list)
@check_guest_exist(check_index=1)
def vswitch_grant_user(self, vswitch_name, userid):
"""Set vswitch to grant user
:param str vswitch_name: the name of the vswitch
:param str userid: the user id of the vm
"""
self._networkops.grant_user_to_vswitch(vswitch_name, userid)
def vswitch_revoke_user(self, vswitch_name, userid):
"""Revoke user for vswitch
:param str vswitch_name: the name of the vswitch
:param str userid: the user id of the vm
"""
self._networkops.revoke_user_from_vswitch(vswitch_name, userid)
@check_guest_exist(check_index=1)
def vswitch_set_vlan_id_for_user(self, vswitch_name, userid, vlan_id):
"""Set vlan id for user when connecting to the vswitch
:param str vswitch_name: the name of the vswitch
:param str userid: the user id of the vm
:param int vlan_id: the VLAN id
"""
self._networkops.set_vswitch_port_vlan_id(vswitch_name,
userid, vlan_id)
@check_guest_exist()
def guest_config_minidisks(self, userid, disk_info):
"""Punch the script that used to process additional disks to vm
:param str userid: the user id of the vm
:param disk_info: a list contains disks info for the guest. It
contains dictionaries that describes disk info for each disk.
Each dictionary has 3 keys, format is required, vdev and
mntdir are optional. For example, if vdev is not specified, it
will start from the next vdev of CONF.zvm.user_root_vdev, eg.
if CONF.zvm.user_root_vdev is 0100, zvmsdk will use 0101 as the
vdev for first additional disk in disk_info, and if mntdir is
not specified, zvmsdk will use /mnt/ephemeral0 as the mount
point of first additional disk
Here are some examples:
[{'vdev': '0101',
'format': 'ext3',
'mntdir': '/mnt/ephemeral0'}]
In this case, the zvmsdk will treat 0101 as additional disk's
vdev, and it's formatted with ext3, and will be mounted to
/mnt/ephemeral0
[{'format': 'ext3'},
{'format': 'ext4'}]
In this case, if CONF.zvm.user_root_vdev is 0100, zvmsdk will
configure the first additional disk as 0101, mount it to
/mnt/ephemeral0 with ext3, and configure the second additional
disk 0102, mount it to /mnt/ephemeral1 with ext4.
"""
action = "config disks for userid '%s'" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._vmops.guest_config_minidisks(userid, disk_info)
@check_guest_exist()
def guest_grow_root_volume(self, userid, os_version):
""" Punch script to guest to grow root partition and extend
root file system.
Note:
1. Only multipath SCSI disk is supported.
2. Only one partition is supported.
3. xfs file system is not supported.
:param str userid: the user id of the vm
:param str os_version: operating system version of the guest
"""
return self._vmops.guest_grow_root_volume(userid, os_version)
def vswitch_set(self, vswitch_name, **kwargs):
"""Change the configuration of an existing virtual switch
:param str vswitch_name: the name of the virtual switch
:param dict kwargs:
- grant_userid=<value>:
A userid to be added to the access list
- user_vlan_id=<value>:
user VLAN ID. Support following ways:
1. As single values between 1 and 4094. A maximum of four
values may be specified, separated by blanks.
Example: 1010 2020 3030 4040
2. As a range of two numbers, separated by a dash (-).
A maximum of two ranges may be specified.
Example: 10-12 20-22
- revoke_userid=<value>:
A userid to be removed from the access list
- real_device_address=<value>:
The real device address or the real device address and
OSA Express port number of a QDIO OSA
Express device to be used to create the switch to the virtual
adapter. If using a real device and an OSA Express port number,
specify the real device number followed by a period(.),
the letter 'P' (or 'p'), followed by the port number as a
hexadecimal number. A maximum of three device addresses,
all 1-7 characters in length, may be specified, delimited by
blanks. 'None' may also be specified
- port_name=<value>:
The name used to identify the OSA Expanded
adapter. A maximum of three port names, all 1-8 characters in
length, may be specified, delimited by blanks.
- controller_name=<value>:
One of the following:
1. The userid controlling the real device. A maximum of eight
userids, all 1-8 characters in length, may be specified,
delimited by blanks.
2. '*': Specifies that any available controller may be used
- connection_value=<value>:
One of the following values:
CONnect: Activate the real device connection.
DISCONnect: Do not activate the real device connection.
- queue_memory_limit=<value>:
A number between 1 and 8
specifying the QDIO buffer size in megabytes.
- routing_value=<value>:
Specifies whether the OSA-Express QDIO
device will act as a router to the virtual switch, as follows:
NONrouter: The OSA-Express device identified in
real_device_address= will not act as a router to the vswitch
PRIrouter: The OSA-Express device identified in
real_device_address= will act as a primary router to the
vswitch
- port_type=<value>:
Specifies the port type, ACCESS or TRUNK
- persist=<value>:
one of the following values:
NO: The vswitch is updated on the active system, but is not
updated in the permanent configuration for the system.
YES: The vswitch is updated on the active system and also in
the permanent configuration for the system.
If not specified, the default is NO.
- gvrp_value=<value>:
GVRP or NOGVRP
- mac_id=<value>:
A unique identifier (up to six hexadecimal
digits) used as part of the vswitch MAC address
- uplink=<value>:
One of the following:
NO: The port being enabled is not the vswitch's UPLINK port.
YES: The port being enabled is the vswitch's UPLINK port.
- nic_userid=<value>:
One of the following:
1. The userid of the port to/from which the UPLINK port will
be connected or disconnected. If a userid is specified,
then nic_vdev= must also be specified
2. '*': Disconnect the currently connected guest port to/from
the special virtual switch UPLINK port. (This is equivalent
to specifying NIC NONE on CP SET VSWITCH).
- nic_vdev=<value>:
The virtual device to/from which the the
UPLINK port will be connected/disconnected. If this value is
specified, nic_userid= must also be specified, with a userid.
- lacp=<value>:
One of the following values:
ACTIVE: Indicates that the virtual switch will initiate
negotiations with the physical switch via the link aggregation
control protocol (LACP) and will respond to LACP packets sent
by the physical switch.
INACTIVE: Indicates that aggregation is to be performed,
but without LACP.
- Interval=<value>:
The interval to be used by the control
program (CP) when doing load balancing of conversations across
multiple links in the group. This can be any of the following
values:
1 - 9990: Indicates the number of seconds between load
balancing operations across the link aggregation group.
OFF: Indicates that no load balancing is done.
- group_rdev=<value>:
The real device address or the real device
address and OSA Express port number of a QDIO OSA Express
devcie to be affected within the link aggregation group
associated with this vswitch. If using a real device and an OSA
Express port number, specify the real device number followed
by a period (.), the letter 'P' (or 'p'), followed by the port
number as a hexadecimal number. A maximum of eight device
addresses all 1-7 characters in length, may be specified,
delimited by blanks.
Note: If a real device address is specified, this device will
be added to the link aggregation group associated with this
vswitch. (The link aggregation group will be created if it does
not already exist.)
- iptimeout=<value>:
A number between 1 and 240 specifying the
length of time in minutes that a remote IP address table entry
remains in the IP address table for the virtual switch.
- port_isolation=<value>:
ON or OFF
- promiscuous=<value>:
One of the following:
NO: The userid or port on the grant is not authorized to use
the vswitch in promiscuous mode
YES: The userid or port on the grant is authorized to use the
vswitch in promiscuous mode.
- MAC_protect=<value>:
ON, OFF or UNSPECified
- VLAN_counters=<value>:
ON or OFF
"""
for k in kwargs.keys():
if k not in constants.SET_VSWITCH_KEYWORDS:
errmsg = ('API vswitch_set: Invalid keyword %s' % k)
raise exception.SDKInvalidInputFormat(msg=errmsg)
self._networkops.set_vswitch(vswitch_name, **kwargs)
def vswitch_delete(self, vswitch_name, persist=True):
""" Delete vswitch.
:param str name: the vswitch name
:param bool persist: whether delete the vswitch from the permanent
configuration for the system
"""
self._networkops.delete_vswitch(vswitch_name, persist)
def get_volume_connector(self, userid, reserve=False,
fcp_template_id=None, sp_name=None):
"""Get connector information of the guest for attaching to volumes.
This API is for Openstack Cinder driver only now.
Connector information is a dictionary representing the
machine that will be making the connection as follows::
{
'zvm_fcp': fcp
'wwpns': [wwpn]
'host': host
'phy_to_virt_initiators': {},
'fcp_paths': 0,
'fcp_template_id': fcp_template_id
}
This information will be used by IBM storwize FC driver in Cinder.
:param str userid: the user id of the guest
:param boolean reserve: the flag to reserve FCP device
:param str fcp_template_id: the FCP Multipath Template id
which FCP devices are allocated by
:param str sp_name: the storage provider name
"""
return self._volumeop.get_volume_connector(
userid, reserve, fcp_template_id, sp_name)
def get_fcp_templates(self, template_id_list=None, assigner_id=None,
default_sp_list= None, host_default=None):
"""Get template base info
:param template_id_list: (list) a list of template id,
if it is None, get FCP Multipath Templates with other parameter
:param assigner_id: (str) a string of VM userid
:param host_default: (boolean) whether or not get host default fcp
template
:param default_sp_list: (list) a list of storage provider, to get the
list of storage provider's default FCP Multipath Templates
:return: (dict) the base info of template
example:
{
templates: [
{
name: t1,
id: template1_id,
description: description,
host_default: 0,
sp_default: [sp1]
},
{
name: t2,
id: template2_id,
description: description,
host_default: 1,
sp_default: [sp1, sp2]
}
]
}
"""
# pass in template_id_list and default_sp_list is string:
# "['36439338-db14-11ec-bb41-0201018b1dd2']"
# convert to list
if template_id_list and not isinstance(template_id_list, list):
template_id_list = ast.literal_eval(template_id_list)
if default_sp_list and not isinstance(default_sp_list, list):
default_sp_list = ast.literal_eval(default_sp_list)
return self._volumeop.get_fcp_templates(
template_id_list=template_id_list, assigner_id=assigner_id,
default_sp_list=default_sp_list, host_default=host_default)
def get_fcp_templates_details(self, template_id_list=None,
raw=False, statistics=True,
sync_with_zvm=False):
"""Get FCP Multipath Templates detail info.
:param template_list: (list) if is None,
will get all the templates on the host
:return: (dict) the raw and/or statistic data
of temlate_list FCP devices
if sync_with_zvm:
self.fcp_mgr._sync_db_with_zvm()
if FCP DB is NOT empty and raw=True statistics=True
{
"fcp_templates":[
{
"id":"36439338-db14-11ec-bb41-0201018b1dd2",
"name":"default_template",
"description":"This is Default template",
"is_default":True,
"sp_name":[
"sp4",
"v7k60"
],
"raw":{
# (fcp_id, template_id, assigner_id, connections,
# reserved, wwpn_npiv, wwpn_phy, chpid, state, owner,
# tmpl_id)
"0":[
[
"1a0f",
"36439338-db14-11ec-bb41-0201018b1dd2",
"HLP0000B",
0,
0,
"c05076de3300038b",
"c05076de33002e41",
"27",
"free",
"none",
"36439338-db14-11ec-bb41-0201018b1dd2"
],
[
"1a0e",
"36439338-db14-11ec-bb41-0201018b1dd2",
"",
0,
0,
"c05076de330003a2",
"c05076de33002e41",
"27",
"free",
"none",
"36439338-db14-11ec-bb41-0201018b1dd2"
]
],
"1":[
[
"1c0d",
"36439338-db14-11ec-bb41-0201018b1dd2",
"",
0,
0,
"c05076de33000353",
"c05076de33002641",
"32",
"free",
"none",
"36439338-db14-11ec-bb41-0201018b1dd2"
]
]
},
"statistics":{
"0":{
"total":"1A0E - 1A0F",
"available":"1A0E - 1A0F",
"allocated":"",
"reserve_only":"",
"connection_only":"",
"unallocated_but_active":[
],
"allocated_but_free":"",
"notfound":"",
"offline":"",
"CHPIDs":{
"27":"1A0E - 1A0F"
}
},
"1":{
"total":"1C0D",
"available":"1C0D",
"allocated":"",
"reserve_only":"",
"connection_only":"",
"unallocated_but_active":[
],
"allocated_but_free":"",
"notfound":"",
"offline":"",
"CHPIDs":{
"32":"1C0D"
}
}
}
}
]
}
"""
# pass in template_id_list is string:
# "['36439338-db14-11ec-bb41-0201018b1dd2']"
# convert to list
if template_id_list and not isinstance(template_id_list, list):
template_id_list = ast.literal_eval(template_id_list)
return self._volumeop.get_fcp_templates_details(
template_id_list=template_id_list, raw=raw,
statistics=statistics, sync_with_zvm=sync_with_zvm)
def delete_fcp_template(self, template_id):
return self._volumeop.delete_fcp_template(template_id)
@check_fcp_exist()
def get_fcp_usage(self, fcp):
"""API for getting FCP usage in database manually.
:param str userid: the user id of the guest
:param str fcp: the fcp ID of FCP device
:returns: list describing reserved,connections values of the FCP
in database. For example,
['fakeid', 1, 3, 'b7ad5cba-f225-11ec-a5cf-02553600000f'] means
the userid is fakeid, reserved value is 1, connections is 3,
fcp_template_id is 'b7ad5cba-f225-11ec-a5cf-02553600000f'.
"""
return self._volumeop.get_fcp_usage(fcp)
@check_fcp_exist()
def set_fcp_usage(self, fcp, userid, reserved, connections,
fcp_template_id):
"""API for setting FCP usage in database manually.
:param str userid: the user id of the guest
:param str fcp: the fcp ID of FCP device
:param int reserved: the value set to reserved value of FCP database
:param int connections: the value set to connections value of
FCP database
:param str fcp_template_id: the ID of the FCP Multipath Template.
"""
return self._volumeop.set_fcp_usage(userid, fcp, reserved,
connections, fcp_template_id)
def create_fcp_template(self, name, description: str = '',
fcp_devices: str = '',
host_default: bool = False,
default_sp_list: list = [],
min_fcp_paths_count: int = None):
"""API for creating a FCP Multipath Template in database.
:param str name: the name of the template
:param str description: the description for the template
:param str fcp_devices: a fcp list is composed of fcp device IDs,
range indicator '-', and split indicator ';'.
:param bool host_default: this template is default to this
host or not
:param list default_sp_list: the list of storage providers that will
use this FCP Multipath Template as default FCP Multipath Template. If None, it means
no storage provider would use this FCP Multipath Template as default.
:param min_fcp_paths_count: The minimum number of FCP paths that
should be defined to a vm when attachinga data volume to a vm or
BFV (deploying a vm from SCSI image).
"""
return self._volumeop.create_fcp_template(
name, description, fcp_devices,
host_default=host_default, default_sp_list=default_sp_list, min_fcp_paths_count=min_fcp_paths_count)
def edit_fcp_template(self, fcp_template_id, name=None,
description=None, fcp_devices=None,
host_default=None, default_sp_list=None, min_fcp_paths_count: int = None):
""" Edit a FCP Multipath Template.
The kwargs values are pre-validated in two places:
validate kwargs types
in zvmsdk/sdkwsgi/schemas/volume.py
set a kwarg as None if not passed by user
in zvmsdk/sdkwsgi/handlers/volume.py
If any kwarg is None, the kwarg will not be updated.
:param fcp_template_id: template id
:param name: template name
:param description: template desc
:param fcp_devices: FCP devices divided into
different paths by semicolon
Format:
"fcp-devices-from-path0;fcp-devices-from-path1;..."
Example:
"0011-0013;0015;0017-0018",
:param host_default: (bool)
:param default_sp_list: (list)
Example:
["SP1", "SP2"]
:param min_fcp_paths_count: min fcp paths count
:return:
Example
{
'fcp_template': {
'name': 'bjcb-test-template',
'id': '36439338-db14-11ec-bb41-0201018b1dd2',
'description': 'This is Default template',
'is_default': True,
'sp_name': ['sp4', 'v7k60']
}
}
"""
return self._volumeop.edit_fcp_template(
fcp_template_id, name=name, description=description,
fcp_devices=fcp_devices, host_default=host_default,
default_sp_list=default_sp_list, min_fcp_paths_count=min_fcp_paths_count)
def volume_attach(self, connection_info):
""" Attach a volume to a guest. It's prerequisite to active multipath
feature on the guest before utilizing persistent volumes.
:param dict connection_info:
- alias: of type string. A constant valid alias of the volume
after it being attached onto the guest, i.e. '/dev/vda'.
Because the system generating device name could change
after each rebooting, it's necessary to have a constant
name to represent the volume in its life time.
- protocol: of type string. The protocol by which the volume is
connected to the guest. The only one supported now is 'fc'
which implies FibreChannel.
- fcps: of type list. The address of the FCP devices used by
the guest to connect to the volume. They should belong to
different channel path IDs in order to work properly.
- wwpns: of type list. The WWPN values through which the volume
can be accessed, excluding prefixing '0x'.
- dedicate: of type list. The address of the FCP devices which
will be dedicated to the guest before accessing the volume.
They should belong to different channel path IDs in order
to work properly.
"""
self._volumeop.attach_volume_to_instance(connection_info)
def volume_refresh_bootmap(self, fcpchannels, wwpns, lun,
wwid='',
transportfiles=None, guest_networks=None, fcp_template_id=None):
""" Refresh a volume's bootmap info.
:param list of fcpchannels
:param list of wwpns
:param string lun
:param wwid: (str) the wwid of the target volume
:param transportfiles: (str) the files that used to customize the vm
:param list guest_networks: a list of network info for the guest.
It has one dictionary that contain some of the below keys for
each network, the format is:
{'ip_addr': (str) IP address or None,
'dns_addr': (list) dns addresses or None,
'gateway_addr': (str) gateway address or None,
'cidr': (str) cidr format,
'nic_vdev': (str)nic VDEV, 1- to 4- hexadecimal digits or None,
'nic_id': (str) nic identifier or None,
'mac_addr': (str) mac address or None, it is only be used when
changing the guest's user direct. Format should be
xx:xx:xx:xx:xx:xx, and x is a hexadecimal digit
'osa_device': (str) OSA address or None,
'hostname': (str) Optional. The hostname of the guest}
Example for guest_networks:
[{'ip_addr': '192.168.95.10',
'dns_addr': ['9.0.2.1', '9.0.3.1'],
'gateway_addr': '192.168.95.1',
'cidr': "192.168.95.0/24",
'nic_vdev': '1000',
'mac_addr': '02:00:00:12:34:56',
'hostname': 'instance-00001'},
{'ip_addr': '192.168.96.10',
'dns_addr': ['9.0.2.1', '9.0.3.1'],
'gateway_addr': '192.168.96.1',
'cidr': "192.168.96.0/24",
'nic_vdev': '1003}],
:param fcp_template_id
"""
return self._volumeop.volume_refresh_bootmap(fcpchannels, wwpns, lun,
wwid=wwid,
transportfiles=transportfiles,
guest_networks=guest_networks,
fcp_template_id=fcp_template_id)
def volume_detach(self, connection_info):
""" Detach a volume from a guest. It's prerequisite to active multipath
feature on the guest before utilizing persistent volumes.
:param dict connection_info: A dict comprised of a list of information
used to establish host-volume connection, including:
- alias: of type string. A constant valid alias of the volume
after it being attached onto the guest, i.e. '/dev/vda'. Because
the system generating device name could change after each
rebooting, it's necessary to have a constant name to represent
the volume in its life time.
- protocol: of type string. The protocol by which the volume is
connected to the guest. The only one supported now is 'fc' which
implies FibreChannel.
- fcps: of type list. The address of the FCP devices used by the
guest to connect to the volume.
- wwpns: of type list. The WWPN values through which the volume
can be accessed, excluding prefixing '0x'.
- dedicate: of type list. The address of the FCP devices which
will be undedicated from the guest after removing the volume.
"""
self._volumeop.detach_volume_from_instance(connection_info)
@check_guest_exist()
def guest_create_network_interface(self, userid, os_version,
guest_networks, active=False):
""" Create network interface(s) for the guest inux system. It will
create the nic for the guest, add NICDEF record into the user
direct. It will also construct network interface configuration
files and punch the files to the guest. These files will take
effect when initializing and configure guest.
:param str userid: the user id of the guest
:param str os_version: operating system version of the guest
:param list guest_networks: a list of network info for the guest.
It has one dictionary that contain some of the below keys for
each network, the format is:
{'ip_addr': (str) IP address or None,
'dns_addr': (list) dns addresses or None,
'gateway_addr': (str) gateway address or None,
'cidr': (str) cidr format,
'nic_vdev': (str)nic VDEV, 1- to 4- hexadecimal digits or None,
'nic_id': (str) nic identifier or None,
'mac_addr': (str) mac address or None, it is only be used when
changing the guest's user direct. Format should be
xx:xx:xx:xx:xx:xx, and x is a hexadecimal digit
'osa_device': (str) OSA address or None,
'hostname': (str) Optional. The hostname of the vm.}
Example for guest_networks:
[{'ip_addr': '192.168.95.10',
'dns_addr': ['9.0.2.1', '9.0.3.1'],
'gateway_addr': '192.168.95.1',
'cidr': "192.168.95.0/24",
'nic_vdev': '1000',
'mac_addr': '02:00:00:12:34:56',
'hostname': 'instance-00001'},
{'ip_addr': '192.168.96.10',
'dns_addr': ['9.0.2.1', '9.0.3.1'],
'gateway_addr': '192.168.96.1',
'cidr': "192.168.96.0/24",
'nic_vdev': '1003}]
:param bool active: whether add a nic on active guest system
:returns: guest_networks list, including nic_vdev for each network
:rtype: list
"""
if len(guest_networks) == 0:
errmsg = ("API guest_create_network_interface: "
"Network information is required but not provided")
raise exception.SDKInvalidInputFormat(msg=errmsg)
for network in guest_networks:
vdev = nic_id = mac_addr = ip_addr = OSA = None
if 'nic_vdev' in network.keys():
vdev = network['nic_vdev']
if 'osa_device' in network.keys():
OSA = network['osa_device']
if 'nic_id' in network.keys():
nic_id = network['nic_id']
if (('mac_addr' in network.keys()) and
(network['mac_addr'] is not None)):
mac_addr = network['mac_addr']
if not zvmutils.valid_mac_addr(mac_addr):
errmsg = ("API guest_create_network_interface: "
"Invalid mac address, format should be "
"xx:xx:xx:xx:xx:xx, and x is a hexadecimal "
"digit")
raise exception.SDKInvalidInputFormat(msg=errmsg)
if (('ip_addr' in network.keys()) and
(network['ip_addr'] is not None)):
ip_addr = network['ip_addr']
if not netaddr.valid_ipv4(ip_addr):
errmsg = ("API guest_create_network_interface: "
"Invalid management IP address, it should be "
"the value between 0.0.0.0 and 255.255.255.255")
raise exception.SDKInvalidInputFormat(msg=errmsg)
if (('dns_addr' in network.keys()) and
(network['dns_addr'] is not None)):
if not isinstance(network['dns_addr'], list):
raise exception.SDKInvalidInputTypes(
'guest_config_network',
str(list), str(type(network['dns_addr'])))
for dns in network['dns_addr']:
if not netaddr.valid_ipv4(dns):
errmsg = ("API guest_create_network_interface: "
"Invalid dns IP address, it should be the "
"value between 0.0.0.0 and 255.255.255.255")
raise exception.SDKInvalidInputFormat(msg=errmsg)
if (('gateway_addr' in network.keys()) and
(network['gateway_addr'] is not None)):
if not netaddr.valid_ipv4(
network['gateway_addr']):
errmsg = ("API guest_create_network_interface: "
"Invalid gateway IP address, it should be "
"the value between 0.0.0.0 and 255.255.255.255")
raise exception.SDKInvalidInputFormat(msg=errmsg)
if (('cidr' in network.keys()) and
(network['cidr'] is not None)):
if not zvmutils.valid_cidr(network['cidr']):
errmsg = ("API guest_create_network_interface: "
"Invalid CIDR, format should be a.b.c.d/n, and "
"a.b.c.d is IP address, n is the value "
"between 0-32")
raise exception.SDKInvalidInputFormat(msg=errmsg)
try:
if OSA is None:
used_vdev = self._networkops.create_nic(userid, vdev=vdev,
nic_id=nic_id,
mac_addr=mac_addr,
active=active)
else:
used_vdev = self._networkops.dedicate_OSA(userid, OSA,
vdev=vdev,
active=active)
network['nic_vdev'] = used_vdev
except exception.SDKBaseException:
LOG.error(('Failed to create nic on vm %s') % userid)
raise
try:
self._networkops.network_configuration(userid, os_version,
guest_networks,
active=active)
except exception.SDKBaseException:
LOG.error(('Failed to set network configuration file on vm %s') %
userid)
raise
return guest_networks
def guests_get_nic_info(self, userid=None, nic_id=None, vswitch=None):
""" Retrieve nic information in the network database according to
the requirements, the nic information will include the guest
name, nic device number, vswitch name that the nic is coupled
to, nic identifier and the comments.
:param str userid: the user id of the vm
:param str nic_id: nic identifier
:param str vswitch: the name of the vswitch
:returns: list describing nic information, format is
[
(userid, interface, vswitch, nic_id, comments),
(userid, interface, vswitch, nic_id, comments)
], such as
[
('VM01', '1000', 'xcatvsw2', '1111-2222', None),
('VM02', '2000', 'xcatvsw3', None, None)
]
:rtype: list
"""
action = "get nic information"
with zvmutils.log_and_reraise_sdkbase_error(action):
return self._networkops.get_nic_info(userid=userid, nic_id=nic_id,
vswitch=vswitch)
def vswitch_query(self, vswitch_name):
"""Check the virtual switch status
:param str vswitch_name: the name of the virtual switch
:returns: Dictionary describing virtual switch info
:rtype: dict
"""
action = "get virtual switch information"
with zvmutils.log_and_reraise_sdkbase_error(action):
return self._networkops.vswitch_query(vswitch_name)
@check_guest_exist()
def guest_delete_network_interface(self, userid, os_version,
vdev, active=False):
""" delete the nic and network configuration for the vm
:param str userid: the user id of the guest
:param str os_version: operating system version of the guest
:param str vdev: nic device number, 1- to 4- hexadecimal digits
:param bool active: whether delete a nic on active guest system
"""
self._networkops.delete_nic(userid, vdev, active=active)
self._networkops.delete_network_configuration(userid, os_version,
vdev, active=active)
def host_get_ssi_info(self):
"""Get z/VM host SSI information.
:returns: If current z/VM host is an SSI cluster member,
returns a list of SSI cluster info, format is:
['ssi_name = SSI',
'ssi_mode = Stable',
'ssi_pdr = IAS7CM_on_139E',
'cross_system_timeouts = Enabled',
'output.ssiInfoCount = 4', '',
'member_slot = 1',
'member_system_id = BOEIAAS7',
'member_state = Joined',
'member_pdr_heartbeat = 12/28/2021_05:10:21',
'member_received_heartbeat = 12/28/2021_05:10:21',
'',
'member_slot = 2',
'member_system_id = BOEIAAS8',
'member_state = Joined',
'member_pdr_heartbeat = 12/28/2021_05:10:36',
'member_received_heartbeat = 12/28/2021_05:10:36',
'']
otherwise, return [].
:rtype: list
"""
return self._hostops.host_get_ssi_info() | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmsdk/api.py | api.py |
import contextlib
import random
import os
import six
import sqlite3
import threading
import uuid
import json
from zvmsdk import config
from zvmsdk import constants as const
from zvmsdk import exception
from zvmsdk import log
from zvmsdk import utils
CONF = config.CONF
LOG = log.LOG
_DIR_MODE = 0o755
_NETWORK_CONN = None
_IMAGE_CONN = None
_GUEST_CONN = None
_FCP_CONN = None
_DBLOCK_VOLUME = threading.RLock()
_DBLOCK_NETWORK = threading.RLock()
_DBLOCK_IMAGE = threading.RLock()
_DBLOCK_GUEST = threading.RLock()
_DBLOCK_FCP = threading.RLock()
@contextlib.contextmanager
def get_network_conn():
global _NETWORK_CONN, _DBLOCK_NETWORK
if not _NETWORK_CONN:
_NETWORK_CONN = _init_db_conn(const.DATABASE_NETWORK)
_DBLOCK_NETWORK.acquire()
try:
yield _NETWORK_CONN
except Exception as err:
msg = "Execute SQL statements error: %s" % six.text_type(err)
LOG.error(msg)
raise exception.SDKNetworkOperationError(rs=1, msg=msg)
finally:
_DBLOCK_NETWORK.release()
@contextlib.contextmanager
def get_image_conn():
global _IMAGE_CONN, _DBLOCK_IMAGE
if not _IMAGE_CONN:
_IMAGE_CONN = _init_db_conn(const.DATABASE_IMAGE)
_DBLOCK_IMAGE.acquire()
try:
yield _IMAGE_CONN
except Exception as err:
LOG.error("Execute SQL statements error: %s", six.text_type(err))
raise exception.SDKDatabaseException(msg=err)
finally:
_DBLOCK_IMAGE.release()
@contextlib.contextmanager
def get_guest_conn():
global _GUEST_CONN, _DBLOCK_GUEST
if not _GUEST_CONN:
_GUEST_CONN = _init_db_conn(const.DATABASE_GUEST)
_DBLOCK_GUEST.acquire()
try:
yield _GUEST_CONN
except Exception as err:
msg = "Execute SQL statements error: %s" % six.text_type(err)
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=1, msg=msg)
finally:
_DBLOCK_GUEST.release()
@contextlib.contextmanager
def get_fcp_conn():
global _FCP_CONN, _DBLOCK_FCP
if not _FCP_CONN:
_FCP_CONN = _init_db_conn(const.DATABASE_FCP)
# enable access columns by name
_FCP_CONN.row_factory = sqlite3.Row
_DBLOCK_FCP.acquire()
try:
# sqlite DB not allow to start a transaction within a transaction,
# so, only begin a transaction when no other alive transaction
if not _FCP_CONN.in_transaction:
_FCP_CONN.execute("BEGIN")
skip_commit = False
else:
skip_commit = True
yield _FCP_CONN
except exception.SDKBaseException as err:
# rollback only if _FCP_CONN.execute("BEGIN")
# is invoked when entering the contextmanager
if not skip_commit:
_FCP_CONN.execute("ROLLBACK")
msg = "Got SDK exception in FCP DB operation: %s" % six.text_type(err)
LOG.error(msg)
raise
except Exception as err:
# rollback only if _FCP_CONN.execute("BEGIN")
# is invoked when entering the contextmanager
if not skip_commit:
_FCP_CONN.execute("ROLLBACK")
msg = "Execute SQL statements error: %s" % six.text_type(err)
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=1, msg=msg)
else:
# commit only if _FCP_CONN.execute("BEGIN")
# is invoked when entering the contextmanager
if not skip_commit:
_FCP_CONN.execute("COMMIT")
finally:
_DBLOCK_FCP.release()
def _init_db_conn(db_file):
db_dir = CONF.database.dir
if not os.path.exists(db_dir):
os.makedirs(db_dir, _DIR_MODE)
database = os.path.join(db_dir, db_file)
return sqlite3.connect(database,
check_same_thread=False,
isolation_level=None)
class NetworkDbOperator(object):
def __init__(self):
self._module_id = 'network'
self._create_switch_table()
def _create_switch_table(self):
create_table_sql = ' '.join((
'create table if not exists switch (',
'userid varchar(8) COLLATE NOCASE,',
'interface varchar(4) COLLATE NOCASE,',
'switch varchar(8) COLLATE NOCASE,',
'port varchar(128) COLLATE NOCASE,',
'comments varchar(128),',
'primary key (userid, interface));'))
with get_network_conn() as conn:
conn.execute(create_table_sql)
def _get_switch_by_user_interface(self, userid, interface):
with get_network_conn() as conn:
res = conn.execute("SELECT * FROM switch "
"WHERE userid=? and interface=?",
(userid, interface))
switch_record = res.fetchall()
if len(switch_record) == 1:
return switch_record[0]
elif len(switch_record) == 0:
return None
def switch_delete_record_for_userid(self, userid):
"""Remove userid switch record from switch table."""
with get_network_conn() as conn:
conn.execute("DELETE FROM switch WHERE userid=?",
(userid,))
LOG.debug("Switch record for user %s is removed from "
"switch table" % userid)
def switch_delete_record_for_nic(self, userid, interface):
"""Remove userid switch record from switch table."""
with get_network_conn() as conn:
conn.execute("DELETE FROM switch WHERE userid=? and interface=?",
(userid, interface))
LOG.debug("Switch record for user %s with nic %s is removed from "
"switch table" % (userid, interface))
def switch_add_record(self, userid, interface, port=None,
switch=None, comments=None):
"""Add userid and nic name address into switch table."""
with get_network_conn() as conn:
conn.execute("INSERT INTO switch VALUES (?, ?, ?, ?, ?)",
(userid, interface, switch, port, comments))
LOG.debug("New record in the switch table: user %s, "
"nic %s, port %s" %
(userid, interface, port))
def switch_add_record_migrated(self, userid, interface, switch,
port=None, comments=None):
"""Add userid and interfaces and switch into switch table."""
with get_network_conn() as conn:
conn.execute("INSERT INTO switch VALUES (?, ?, ?, ?, ?)",
(userid, interface, switch, port, comments))
LOG.debug("New record in the switch table: user %s, "
"nic %s, switch %s" %
(userid, interface, switch))
def switch_update_record_with_switch(self, userid, interface,
switch=None):
"""Update information in switch table."""
if not self._get_switch_by_user_interface(userid, interface):
msg = "User %s with nic %s does not exist in DB" % (userid,
interface)
LOG.error(msg)
obj_desc = ('User %s with nic %s' % (userid, interface))
raise exception.SDKObjectNotExistError(obj_desc,
modID=self._module_id)
if switch is not None:
with get_network_conn() as conn:
conn.execute("UPDATE switch SET switch=? "
"WHERE userid=? and interface=?",
(switch, userid, interface))
LOG.debug("Set switch to %s for user %s with nic %s "
"in switch table" %
(switch, userid, interface))
else:
with get_network_conn() as conn:
conn.execute("UPDATE switch SET switch=NULL "
"WHERE userid=? and interface=?",
(userid, interface))
LOG.debug("Set switch to None for user %s with nic %s "
"in switch table" %
(userid, interface))
def _parse_switch_record(self, switch_list):
# Map each switch record to be a dict, with the key is the field name
# in switch DB
switch_keys_list = ['userid', 'interface', 'switch',
'port', 'comments']
switch_result = []
for item in switch_list:
switch_item = dict(zip(switch_keys_list, item))
switch_result.append(switch_item)
return switch_result
def switch_select_table(self):
with get_network_conn() as conn:
result = conn.execute("SELECT * FROM switch")
nic_settings = result.fetchall()
return self._parse_switch_record(nic_settings)
def switch_select_record_for_userid(self, userid):
with get_network_conn() as conn:
result = conn.execute("SELECT * FROM switch "
"WHERE userid=?", (userid,))
switch_info = result.fetchall()
return self._parse_switch_record(switch_info)
def switch_select_record(self, userid=None, nic_id=None, vswitch=None):
if ((userid is None) and
(nic_id is None) and
(vswitch is None)):
return self.switch_select_table()
sql_cmd = "SELECT * FROM switch WHERE"
sql_var = []
if userid is not None:
sql_cmd += " userid=? and"
sql_var.append(userid)
if nic_id is not None:
sql_cmd += " port=? and"
sql_var.append(nic_id)
if vswitch is not None:
sql_cmd += " switch=?"
sql_var.append(vswitch)
# remove the tailing ' and'
sql_cmd = sql_cmd.strip(' and')
with get_network_conn() as conn:
result = conn.execute(sql_cmd, sql_var)
switch_list = result.fetchall()
return self._parse_switch_record(switch_list)
class FCPDbOperator(object):
def __init__(self):
self._module_id = 'volume'
self._initialize_table()
def _initialize_table(self):
# fcp_info_tables:
# map the table name to the corresponding SQL to create it
# key is the name of table to be created
# value is the SQL to be executed to create the table
fcp_info_tables = {}
# table for basic info of FCP devices
# fcp_id: FCP device ID, the primary key
# assigner_id: VM userid representing an unique VM,
# it is allocated by zvmsdk and may differ with owner
# connections: how many volumes connected to this FCP device,
# 0 means no assigner
# reserved: 0 for not reserved by some operation
# wwpn_npiv: NPIV WWPN
# wwpn_phy: Physical WWPN
# chpid: channel ID of FCP device
# state: FCP device status
# owner: VM userid representing an unique VM,
# it is read from z/VM hypervisor and
# may differ with assigner_id
# tmpl_id: indicate from which FCP Multipath Template this FCP device was
# allocated, not to which FCP Multipath Template this FCP
# device belong. because a FCP device may belong
# to multiple FCP Multipath Templates.
fcp_info_tables['fcp'] = (
"CREATE TABLE IF NOT EXISTS fcp("
"fcp_id char(4) NOT NULL COLLATE NOCASE,"
"assigner_id varchar(8) NOT NULL DEFAULT '' COLLATE NOCASE,"
"connections integer NOT NULL DEFAULT 0,"
"reserved integer NOT NULL DEFAULT 0,"
"wwpn_npiv varchar(16) NOT NULL DEFAULT '' COLLATE NOCASE,"
"wwpn_phy varchar(16) NOT NULL DEFAULT '' COLLATE NOCASE,"
"chpid char(2) NOT NULL DEFAULT '' COLLATE NOCASE,"
"state varchar(8) NOT NULL DEFAULT '' COLLATE NOCASE,"
"owner varchar(8) NOT NULL DEFAULT '' COLLATE NOCASE,"
"tmpl_id varchar(32) NOT NULL DEFAULT '' COLLATE NOCASE,"
"PRIMARY KEY (fcp_id))")
# table for FCP Multipath Templates:
# id: template id, the primary key
# name: the name of the template
# description: the description for this template
# is_default: is this template the default one on this host or not
# 1/True for yes, 0/False for no
# note: SQLite recognizes the keywords "TRUE" and "FALSE",
# those keywords are saved in SQLite
# as integer 1 and 0 respectively
fcp_info_tables['template'] = (
"CREATE TABLE IF NOT EXISTS template("
"id varchar(32) NOT NULL COLLATE NOCASE,"
"name varchar(128) NOT NULL COLLATE NOCASE,"
"description varchar(255) NOT NULL DEFAULT '' COLLATE NOCASE,"
"is_default integer NOT NULL DEFAULT 0,"
"min_fcp_paths_count integer NOT NULL DEFAULT -1,"
"PRIMARY KEY (id))")
# table for relationships between templates and storage providers:
# sp_name: name of storage provider, the primary key
# tmpl_id: template id
fcp_info_tables['template_sp_mapping'] = (
'CREATE TABLE IF NOT EXISTS template_sp_mapping('
'sp_name varchar(128) NOT NULL COLLATE NOCASE,'
'tmpl_id varchar(32) NOT NULL COLLATE NOCASE,'
'PRIMARY KEY (sp_name))')
# table for relationships between templates and FCP devices:
# fcp_id: the fcp device ID
# tmpl_id: the template id
# path: the path number, 0 means the FCP device is in path0
# 1 means the FCP devices is in path1, and so on.
# composite primary key (fcp_id, tmpl_id)
fcp_info_tables['template_fcp_mapping'] = (
'CREATE TABLE IF NOT EXISTS template_fcp_mapping('
'fcp_id char(4) NOT NULL COLLATE NOCASE,'
'tmpl_id varchar(32) NOT NULL COLLATE NOCASE,'
'path integer NOT NULL,'
'PRIMARY KEY (fcp_id, tmpl_id))')
# create all the tables
LOG.info("Initializing FCP database.")
with get_fcp_conn() as conn:
for table_name in fcp_info_tables:
create_table_sql = fcp_info_tables[table_name]
conn.execute(create_table_sql)
LOG.info("FCP database initialized.")
#########################################################
# DML for Table fcp #
#########################################################
def unreserve_fcps(self, fcp_ids):
if not fcp_ids:
return
fcp_update_info = []
for fcp_id in fcp_ids:
fcp_update_info.append((fcp_id,))
with get_fcp_conn() as conn:
conn.executemany("UPDATE fcp SET reserved=0, tmpl_id='' "
"WHERE fcp_id=?", fcp_update_info)
def reserve_fcps(self, fcp_ids, assigner_id, fcp_template_id):
fcp_update_info = []
for fcp_id in fcp_ids:
fcp_update_info.append(
(assigner_id, fcp_template_id, fcp_id))
with get_fcp_conn() as conn:
conn.executemany("UPDATE fcp "
"SET reserved=1, assigner_id=?, tmpl_id=? "
"WHERE fcp_id=?", fcp_update_info)
def bulk_insert_zvm_fcp_info_into_fcp_table(self, fcp_info_list: list):
"""Insert multiple records into fcp table witch fcp info queried
from z/VM.
The input fcp_info_list should be list of FCP info, for example:
[(fcp_id, wwpn_npiv, wwpn_phy, chpid, state, owner),
('1a06', 'c05076de33000355', 'c05076de33002641', '27', 'active',
'user1'),
('1a07', 'c05076de33000355', 'c05076de33002641', '27', 'free',
'user1'),
('1a08', 'c05076de33000355', 'c05076de33002641', '27', 'active',
'user2')]
"""
with get_fcp_conn() as conn:
conn.executemany("INSERT INTO fcp (fcp_id, wwpn_npiv, wwpn_phy, "
"chpid, state, owner) "
"VALUES (?, ?, ?, ?, ?, ?)", fcp_info_list)
def bulk_delete_from_fcp_table(self, fcp_id_list: list):
"""Delete multiple FCP records from fcp table
The fcp_id_list is list of FCP IDs, for example:
['1a00', '1b01', '1c02']
"""
fcp_id_list = [(fcp_id,) for fcp_id in fcp_id_list]
with get_fcp_conn() as conn:
conn.executemany("DELETE FROM fcp "
"WHERE fcp_id=?", fcp_id_list)
def bulk_update_zvm_fcp_info_in_fcp_table(self, fcp_info_list: list):
"""Update multiple records with FCP info queried from z/VM.
The input fcp_info_list should be list of FCP info set, for example:
[(fcp_id, wwpn_npiv, wwpn_phy, chpid, state, owner),
('1a06', 'c05076de33000355', 'c05076de33002641', '27', 'active',
'user1'),
('1a07', 'c05076de33000355', 'c05076de33002641', '27', 'free',
'user1'),
('1a08', 'c05076de33000355', 'c05076de33002641', '27', 'active',
'user2')]
"""
# transfer state and owner to a comment dict
# the key is the id of the FCP device, the value is a comment dict
# for example:
# {'1a07': {'state': 'free', 'owner': 'user1'},
# '1a08': {'state': 'active', 'owner': 'user2'}}
data_to_update = list()
for fcp in fcp_info_list:
# change order of update data
# the new order is like:
# (wwpn_npiv, wwpn_phy, chpid, state, owner, fcp_id)
new_record = list(fcp[1:]) + [fcp[0]]
data_to_update.append(new_record)
with get_fcp_conn() as conn:
conn.executemany("UPDATE fcp SET wwpn_npiv=?, wwpn_phy=?, "
"chpid=?, state=?, owner=? WHERE "
"fcp_id=?", data_to_update)
def bulk_update_state_in_fcp_table(self, fcp_id_list: list,
new_state: str):
"""Update multiple records' comments to update the state to nofound.
"""
data_to_update = list()
for id in fcp_id_list:
new_record = [new_state, id]
data_to_update.append(new_record)
with get_fcp_conn() as conn:
conn.executemany("UPDATE fcp set state=? "
"WHERE fcp_id=?", data_to_update)
def get_all_fcps_of_assigner(self, assigner_id=None):
"""Get dict of all fcp records of specified assigner.
If assigner is None, will get all fcp records.
Format of return is like :
[
(fcp_id, userid, connections, reserved, wwpn_npiv, wwpn_phy,
chpid, state, owner, tmpl_id),
('283c', 'user1', 2, 1, 'c05076ddf7000002', 'c05076ddf7001d81',
27,'active', 'user1', ''),
('483c', 'user2', 0, 0, 'c05076ddf7000001', 'c05076ddf7001d82',
27, 'free', 'NONE', '')
]
"""
with get_fcp_conn() as conn:
if assigner_id:
result = conn.execute("SELECT fcp_id, assigner_id, "
"connections, reserved, wwpn_npiv, "
"wwpn_phy, chpid, state, owner, "
"tmpl_id FROM fcp WHERE "
"assigner_id=?", (assigner_id,))
else:
result = conn.execute("SELECT fcp_id, assigner_id, "
"connections, reserved, wwpn_npiv, "
"wwpn_phy, chpid, state, owner, "
"tmpl_id FROM fcp")
fcp_info = result.fetchall()
if not fcp_info:
if assigner_id:
obj_desc = ("FCP record in fcp table belongs to "
"userid: %s" % assigner_id)
else:
obj_desc = "FCP records in fcp table"
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID=self._module_id)
return fcp_info
def get_usage_of_fcp(self, fcp_id):
connections = 0
reserved = 0
with get_fcp_conn() as conn:
result = conn.execute("SELECT * FROM fcp "
"WHERE fcp_id=?", (fcp_id,))
fcp_info = result.fetchone()
if not fcp_info:
msg = 'FCP with id: %s does not exist in DB.' % fcp_id
LOG.error(msg)
obj_desc = "FCP with id: %s" % fcp_id
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID=self._module_id)
assigner_id = fcp_info['assigner_id']
reserved = fcp_info['reserved']
connections = fcp_info['connections']
tmpl_id = fcp_info['tmpl_id']
return assigner_id, reserved, connections, tmpl_id
def update_usage_of_fcp(self, fcp, assigner_id, reserved, connections,
fcp_template_id):
with get_fcp_conn() as conn:
conn.execute("UPDATE fcp SET assigner_id=?, reserved=?, "
"connections=?, tmpl_id=? WHERE fcp_id=?",
(assigner_id, reserved, connections,
fcp_template_id, fcp))
def increase_connections_by_assigner(self, fcp, assigner_id):
"""Increase connections of the given FCP device
:param fcp: (str) a FCP device
:param assigner_id: (str) the userid of the virtual machine
:return connections: (dict) the connections of the FCP device
"""
with get_fcp_conn() as conn:
result = conn.execute("SELECT * FROM fcp WHERE fcp_id=? "
"AND assigner_id=?", (fcp, assigner_id))
fcp_info = result.fetchone()
if not fcp_info:
msg = 'FCP with id: %s does not exist in DB.' % fcp
LOG.error(msg)
obj_desc = "FCP with id: %s" % fcp
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID=self._module_id)
connections = fcp_info['connections'] + 1
conn.execute("UPDATE fcp SET connections=? WHERE fcp_id=? "
"AND assigner_id=?", (connections, fcp, assigner_id))
# check the result
result = conn.execute("SELECT connections FROM fcp "
"WHERE fcp_id=?", (fcp,))
connections = result.fetchone()['connections']
return connections
def decrease_connections(self, fcp):
"""Decrease connections of the given FCP device
:param fcp: (str) a FCP device
:return connections: (dict) the connections of the FCP device
"""
with get_fcp_conn() as conn:
result = conn.execute("SELECT * FROM fcp WHERE "
"fcp_id=?", (fcp,))
fcp_list = result.fetchall()
if not fcp_list:
msg = 'FCP with id: %s does not exist in DB.' % fcp
LOG.error(msg)
obj_desc = "FCP with id: %s" % fcp
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID=self._module_id)
connections = fcp_list[0][2]
if connections == 0:
msg = 'FCP with id: %s no connections in DB.' % fcp
LOG.error(msg)
obj_desc = "FCP with id: %s" % fcp
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID=self._module_id)
else:
connections -= 1
if connections < 0:
connections = 0
LOG.warning("Warning: connections of fcp is negative",
fcp)
# decrease connections by 1
conn.execute("UPDATE fcp SET connections=? "
"WHERE fcp_id=?",
(connections, fcp))
# check the result
result = conn.execute("SELECT connections FROM fcp "
"WHERE fcp_id=?", (fcp, ))
connections = result.fetchone()['connections']
return connections
def get_connections_from_fcp(self, fcp):
connections = 0
with get_fcp_conn() as conn:
result = conn.execute("SELECT connections FROM fcp WHERE "
"fcp_id=?", (fcp,))
fcp_info = result.fetchone()
if not fcp_info:
msg = 'FCP with id: %s does not exist in DB.' % fcp
LOG.error(msg)
obj_desc = "FCP with id: %s" % fcp
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID=self._module_id)
connections = fcp_info['connections']
return connections
def get_all(self):
with get_fcp_conn() as conn:
result = conn.execute("SELECT * FROM fcp")
fcp_list = result.fetchall()
return fcp_list
@staticmethod
def get_inuse_fcp_device_by_fcp_template(fcp_template_id):
""" Get the FCP devices allocated from the template """
with get_fcp_conn() as conn:
query_sql = conn.execute("SELECT * FROM fcp "
"WHERE tmpl_id=?",
(fcp_template_id,))
result = query_sql.fetchall()
# result example
# [<sqlite3.Row object at 0x3ff8d1d64d0>,
# <sqlite3.Row object at 0x3ff8d1d6570>,
# <sqlite3.Row object at 0x3ff8d1d6590>]
return result
#########################################################
# DML for Table template_fcp_mapping #
#########################################################
@staticmethod
def update_path_of_fcp_device(record):
""" update path of single fcp device
from table template_fcp_mapping
:param record (tuple)
example:
(path, fcp_id, fcp_template_id)
:return NULL
"""
with get_fcp_conn() as conn:
conn.execute("UPDATE template_fcp_mapping "
"SET path=? "
"WHERE fcp_id=? and tmpl_id=?",
record)
def get_path_count(self, fcp_template_id):
with get_fcp_conn() as conn:
# Get distinct path list in DB
result = conn.execute(
"SELECT DISTINCT path FROM template_fcp_mapping "
"WHERE tmpl_id=?", (fcp_template_id,))
path_list = result.fetchall()
return len(path_list)
@staticmethod
def bulk_delete_fcp_device_from_fcp_template(records):
""" Delete multiple fcp device
from table template_fcp_mapping
:param records (iter)
example:
[(fcp_template_id, fcp_id), ...]
:return NULL
"""
with get_fcp_conn() as conn:
conn.executemany(
"DELETE FROM template_fcp_mapping "
"WHERE tmpl_id=? AND fcp_id=?",
records)
@staticmethod
def bulk_insert_fcp_device_into_fcp_template(records):
""" Insert multiple fcp device
from table template_fcp_mapping
:param records (iter)
example:
[
(fcp_template_id, fcp_id, path),
...
]
:return NULL
"""
with get_fcp_conn() as conn:
conn.executemany(
"INSERT INTO template_fcp_mapping "
"(tmpl_id, fcp_id, path) VALUES (?, ?, ?)",
records)
#########################################################
# DML for Table template #
#########################################################
def fcp_template_exist_in_db(self, fcp_template_id: str):
with get_fcp_conn() as conn:
query_sql = conn.execute("SELECT id FROM template "
"WHERE id=?", (fcp_template_id,))
query_ids = query_sql.fetchall()
if query_ids:
return True
else:
return False
def get_min_fcp_paths_count_from_db(self, fcp_template_id):
with get_fcp_conn() as conn:
query_sql = conn.execute("SELECT min_fcp_paths_count FROM template "
"WHERE id=?", (fcp_template_id,))
min_fcp_paths_count = query_sql.fetchone()
if min_fcp_paths_count:
return min_fcp_paths_count['min_fcp_paths_count']
else:
return None
@staticmethod
def update_basic_info_of_fcp_template(record):
""" update basic info of a FCP Multipath Template
in table template
:param record (tuple)
example:
(name, description, host_default, min_fcp_paths_count, fcp_template_id)
:return NULL
"""
name, description, host_default, min_fcp_paths_count, fcp_template_id = record
with get_fcp_conn() as conn:
# 1. change the is_default of existing templates to False,
# if the is_default of the being-created template is True,
# because only one default template per host is allowed
if host_default is True:
conn.execute("UPDATE template SET is_default=?", (False,))
# 2. update current template
conn.execute("UPDATE template "
"SET name=?, description=?, is_default=?, "
"min_fcp_paths_count=? WHERE id=?",
record)
#########################################################
# DML for Table template_sp_mapping #
#########################################################
def sp_name_exist_in_db(self, sp_name: str):
with get_fcp_conn() as conn:
query_sp = conn.execute("SELECT sp_name FROM template_sp_mapping "
"WHERE sp_name=?", (sp_name,))
query_sp_names = query_sp.fetchall()
if query_sp_names:
return True
else:
return False
@staticmethod
def bulk_set_sp_default_by_fcp_template(template_id,
sp_name_list):
""" Set a default FCP Multipath Template
for multiple storage providers
The function only manipulate table(template_fcp_mapping)
:param template_id: the FCP Multipath Template ID
:param sp_name_list: a list of storage provider hostname
:return NULL
"""
# Example:
# if
# a.the existing-in-db storage providers for template_id:
# ['sp1', 'sp2']
# b.the sp_name_list is ['sp3', 'sp4']
# then
# c.remove records of ['sp1', 'sp2'] from db
# d.remove records of ['sp3', 'sp4'] if any from db
# e.insert ['sp3', 'sp4'] with template_id as default
with get_fcp_conn() as conn:
# delete all records related to the template_id
conn.execute("DELETE FROM template_sp_mapping "
"WHERE tmpl_id=?", (template_id,))
# delete all records related to the
# storage providers in sp_name_list
records = ((sp, ) for sp in sp_name_list)
conn.executemany("DELETE FROM template_sp_mapping "
"WHERE sp_name=?", records)
# insert new record for each
# storage provider in sp_name_list
records = ((template_id, sp) for sp in sp_name_list)
conn.executemany("INSERT INTO template_sp_mapping "
"(tmpl_id, sp_name) VALUES (?, ?)",
records)
#########################################################
# DML related to multiple tables #
#########################################################
def get_allocated_fcps_from_assigner(self,
assigner_id, fcp_template_id):
with get_fcp_conn() as conn:
result = conn.execute(
"SELECT "
"fcp.fcp_id, fcp.wwpn_npiv, fcp.wwpn_phy "
"FROM template_fcp_mapping "
"INNER JOIN fcp "
"ON template_fcp_mapping.fcp_id=fcp.fcp_id "
"WHERE template_fcp_mapping.tmpl_id=? "
"AND fcp.assigner_id=? "
"AND (fcp.connections<>0 OR fcp.reserved<>0) "
"AND fcp.tmpl_id=? "
"ORDER BY template_fcp_mapping.fcp_id ASC",
(fcp_template_id, assigner_id, fcp_template_id))
fcp_list = result.fetchall()
return fcp_list
def get_reserved_fcps_from_assigner(self, assigner_id, fcp_template_id):
with get_fcp_conn() as conn:
result = conn.execute(
"SELECT fcp.fcp_id, fcp.wwpn_npiv, "
"fcp.wwpn_phy, fcp.connections "
"FROM template_fcp_mapping "
"INNER JOIN fcp "
"ON template_fcp_mapping.fcp_id=fcp.fcp_id "
"WHERE template_fcp_mapping.tmpl_id=? "
"AND fcp.assigner_id=? "
"AND fcp.reserved<>0 "
"AND fcp.tmpl_id=? "
"ORDER BY template_fcp_mapping.fcp_id ASC",
(fcp_template_id, assigner_id, fcp_template_id))
fcp_list = result.fetchall()
return fcp_list
def get_fcp_devices_with_same_index(self, fcp_template_id):
""" Get a group of available FCPs with the same index,
which also need satisfy the following conditions:
a. connections = 0
b. reserved = 0
c. state = 'free'
:return fcp_list: (list)
case 1
an empty list(i.e. [])
if no fcp exist in DB
case 2
an empty list(i.e. [])
if no expected pair found
case 3
randomly choose a pair of below combinations:
[1a00,1b00] ,[1a01,1b01] ,[1a02,1b02]...
rather than below combinations:
[1a00,1b02] ,[1a03,1b00]
[1a02], [1b03]
"""
fcp_list = []
fcp_pair_map = {}
with get_fcp_conn() as conn:
'''
count_per_path examples:
in normal cases, all path has same count, eg.
4 paths: [7, 7, 7, 7]
2 paths: [7, 7]
we can also handle rare abnormal cases,
where path count differs, eg.
4 paths: [7, 4, 5, 6]
2 paths: [7, 6]
'''
result = conn.execute("SELECT COUNT(path) "
"FROM template_fcp_mapping "
"WHERE tmpl_id=? "
"GROUP BY path "
"ORDER BY path ASC", (fcp_template_id,))
count_per_path = [a[0] for a in result.fetchall()]
# case1: return [] if no fcp found in FCP DB
if not count_per_path:
LOG.error("Not enough FCPs available, return empty list.")
return fcp_list
result = conn.execute(
"SELECT COUNT(template_fcp_mapping.path) "
"FROM template_fcp_mapping "
"INNER JOIN fcp "
"ON template_fcp_mapping.fcp_id=fcp.fcp_id "
"WHERE template_fcp_mapping.tmpl_id=? "
"AND fcp.connections=0 "
"AND fcp.reserved=0 "
"AND fcp.state='free' "
"AND fcp.wwpn_npiv IS NOT '' "
"AND fcp.wwpn_phy IS NOT '' "
"GROUP BY template_fcp_mapping.path "
"ORDER BY template_fcp_mapping.path", (fcp_template_id,))
free_count_per_path = [a[0] for a in result.fetchall()]
# case2: return [] if no free fcp found from at least one path
if len(free_count_per_path) < len(count_per_path):
# For get_fcp_pair_with_same_index, we will not check the
# CONF.volume.min_fcp_paths_count, the returned fcp count
# should always equal to the total paths count
LOG.error("Available paths count: %s, total paths count: "
"%s." %
(len(free_count_per_path), len(count_per_path)))
return fcp_list
'''
fcps 2 paths example:
fcp conn reserved
------------------
[('1a00', 1, 1, 'active'),
('1a01', 0, 0, 'free'),
('1a02', 0, 0, 'free'),
('1a03', 0, 0, 'free'),
('1a04', 0, 0, 'offline'"),
...
('1b00', 1, 0, 'active'),
('1b01', 2, 1, 'active'),
('1b02', 0, 0, 'free'),
('1b03', 0, 0, 'free'),
('1b04', 0, 0, 'free'),
... ]
'''
result = conn.execute(
"SELECT fcp.fcp_id, fcp.connections, "
"fcp.reserved, fcp.state, fcp.wwpn_npiv, fcp.wwpn_phy "
"FROM fcp "
"INNER JOIN template_fcp_mapping "
"ON template_fcp_mapping.fcp_id=fcp.fcp_id "
"WHERE template_fcp_mapping.tmpl_id=? "
"ORDER BY template_fcp_mapping.path, "
"template_fcp_mapping.fcp_id", (fcp_template_id,))
fcps = result.fetchall()
'''
get all free fcps from 1st path
fcp_pair_map example:
idx fcp_pair
----------------
{ 1 : [('1a01', 'c05076de330003a3', '', 1)],
2 : ['1a02'],
3 : ['1a03']}
'''
# The FCP count of 1st path
for i in range(count_per_path[0]):
(fcp_no, connections, reserved,
state, wwpn_npiv, wwpn_phy) = fcps[i]
if connections == reserved == 0 and state == 'free':
fcp_pair_map[i] = [(fcp_no, wwpn_npiv, wwpn_phy)]
'''
select out pairs if member count == path count
fcp_pair_map example:
idx fcp_pair
----------------------
{ 2 : ['1a02', '1b02'],
3 : ['1a03', '1b03']}
'''
for idx in fcp_pair_map.copy():
s = 0
for i, c in enumerate(count_per_path[:-1]):
s += c
# avoid index out of range for per path in fcps[]
(fcp_no, connections, reserved,
state, wwpn_npiv, wwpn_phy) = fcps[s + idx]
if (idx < count_per_path[i + 1] and
connections == reserved == 0 and
state == 'free'):
fcp_pair_map[idx].append(
(fcp_no, wwpn_npiv, wwpn_phy))
else:
fcp_pair_map.pop(idx)
break
'''
case3: return one group randomly chosen from fcp_pair_map
fcp_list example:
['1a03', '1b03']
'''
LOG.info("Print at most 5 available FCP groups: {}".format(
list(fcp_pair_map.values())[:5]))
if fcp_pair_map:
fcp_list = random.choice(sorted(fcp_pair_map.values()))
else:
LOG.error("Not eligible FCP group found in FCP DB.")
return fcp_list
def get_fcp_devices(self, fcp_template_id):
""" Get a group of available FCPs,
which satisfy the following conditions:
a. connections = 0
b. reserved = 0
c. state = free
"""
fcp_list = []
with get_fcp_conn() as conn:
min_fcp_paths_count = self.get_min_fcp_paths_count(fcp_template_id)
# Get distinct path list in DB
result = conn.execute("SELECT DISTINCT path "
"FROM template_fcp_mapping "
"WHERE tmpl_id=?", (fcp_template_id,))
path_list = result.fetchall()
# Get fcp_list of every path
for no in path_list:
result = conn.execute(
"SELECT fcp.fcp_id, fcp.wwpn_npiv, fcp.wwpn_phy "
"FROM template_fcp_mapping "
"INNER JOIN fcp "
"ON template_fcp_mapping.fcp_id=fcp.fcp_id "
"WHERE template_fcp_mapping.tmpl_id=? "
"AND fcp.connections=0 "
"AND fcp.reserved=0 "
"AND fcp.state='free' "
"AND template_fcp_mapping.path=? "
"AND fcp.wwpn_npiv IS NOT '' "
"AND fcp.wwpn_phy IS NOT '' "
"ORDER BY template_fcp_mapping.path",
(fcp_template_id, no[0]))
fcps = result.fetchall()
if not fcps:
# continue to find whether
# other paths has available FCP
continue
index = random.randint(0, len(fcps) - 1)
fcp_list.append(fcps[index])
# Start to check whether the available count >= min_fcp_paths_count
allocated_paths = len(fcp_list)
total_paths = len(path_list)
if allocated_paths < total_paths:
LOG.info("Not all paths of FCP Multipath Template (id={}) "
"have available FCP devices. "
"The count of minimum FCP device path is {}. "
"The count of total paths is {}. "
"The count of paths with available FCP devices is {}, "
"which is less than the total path count."
.format(fcp_template_id, min_fcp_paths_count,
total_paths, allocated_paths))
if allocated_paths >= min_fcp_paths_count:
LOG.warning("The count of paths with available FCP devices "
"is less than that of total path, but not less "
"than that of minimum FCP device path. "
"Return the FCP devices {} from the available "
"paths to continue.".format(fcp_list))
return fcp_list
else:
LOG.error("The count of paths with available FCP devices "
"must not be less than that of minimum FCP device "
"path, return empty list to abort the volume attachment.")
return []
else:
return fcp_list
def create_fcp_template(self, fcp_template_id, name, description,
fcp_devices_by_path, host_default,
default_sp_list, min_fcp_paths_count=None):
""" Insert records of new FCP Multipath Template in fcp DB
:param fcp_template_id: FCP Multipath Template ID
:param name: FCP Multipath Template name
:param description: description
:param fcp_devices_by_path:
Example:
if fcp_list is "0011-0013;0015;0017-0018",
then fcp_devices_by_path should be passed like:
{
0: {'0011' ,'0012', '0013'}
1: {'0015'}
2: {'0017', '0018'}
}
:param host_default: (bool)
:param default_sp_list: (list)
:param min_fcp_paths_count: (int) if it is None, -1 will be saved
to template table as default value.
:return: NULL
"""
# The following multiple DQLs(Database query)
# are put into the with-block with DMLs
# because the consequent DMLs(Database modification)
# depend on the result of the DQLs.
# So that, other threads can NOT begin a sqlite transacation
# util current thread exits the with-block.
# Refer to 'def get_fcp_conn' for thread lock
with get_fcp_conn() as conn:
# first check the template exist or not
# if already exist, raise exception
if self.fcp_template_exist_in_db(fcp_template_id):
raise exception.SDKObjectAlreadyExistError(
obj_desc=("FCP Multipath Template "
"(id: %s) " % fcp_template_id),
modID=self._module_id)
# then check the SP records exist in template_sp_mapping or not
# if already exist, will update the tmpl_id
# if not exist, will insert new records
sp_mapping_to_add = list()
sp_mapping_to_update = list()
if not default_sp_list:
default_sp_list = []
for sp_name in default_sp_list:
record = (fcp_template_id, sp_name)
if self.sp_name_exist_in_db(sp_name):
sp_mapping_to_update.append(record)
else:
sp_mapping_to_add.append(record)
# Prepare records include (fcp_id, tmpl_id, path)
# to be inserted into table template_fcp_mapping
fcp_mapping = list()
for path in fcp_devices_by_path:
for fcp_id in fcp_devices_by_path[path]:
new_record = [fcp_id, fcp_template_id, path]
fcp_mapping.append(new_record)
# 1. change the is_default of existing templates to False,
# if the is_default of the being-created template is True,
# because only one default template per host is allowed
if host_default is True:
conn.execute("UPDATE template SET is_default=?", (False,))
# 2. insert a new record in template table
# if min_fcp_paths_count is None, will not insert it to db
if not min_fcp_paths_count:
tmpl_basics = (fcp_template_id, name, description, host_default)
sql = ("INSERT INTO template (id, name, description, "
"is_default) VALUES (?, ?, ?, ?)")
else:
tmpl_basics = (fcp_template_id, name, description, host_default, min_fcp_paths_count)
sql = ("INSERT INTO template (id, name, description, "
"is_default, min_fcp_paths_count) VALUES (?, ?, ?, ?, ?)")
conn.execute(sql, tmpl_basics)
# 3. insert new records in template_fcp_mapping
conn.executemany("INSERT INTO template_fcp_mapping (fcp_id, "
"tmpl_id, path) VALUES (?, ?, ?)", fcp_mapping)
# 4. insert a new record in template_sp_mapping
if default_sp_list:
if sp_mapping_to_add:
conn.executemany("INSERT INTO template_sp_mapping "
"(tmpl_id, sp_name) VALUES "
"(?, ?)", sp_mapping_to_add)
if sp_mapping_to_update:
conn.executemany("UPDATE template_sp_mapping SET "
"tmpl_id=? WHERE sp_name=?",
sp_mapping_to_update)
def _validate_min_fcp_paths_count(self, fcp_devices, min_fcp_paths_count, fcp_template_id):
"""
When to edit FCP Multipath Template, if min_fcp_paths_count is not None or
fcp_devices is not None (None means no need to update this field, but keep the original value),
need to validate the values.
min_fcp_paths_count should not be larger than fcp_device_path_count.
If min_fcp_paths_count is None, get the value from template table.
If fcp_devices is None, get the fcp_device_path_count from template_fcp_mapping table.
"""
if min_fcp_paths_count or fcp_devices:
with get_fcp_conn():
if not fcp_devices:
fcp_devices_path_count = self.get_path_count(fcp_template_id)
else:
fcp_devices_by_path = utils.expand_fcp_list(fcp_devices)
fcp_devices_path_count = len(fcp_devices_by_path)
if not min_fcp_paths_count:
min_fcp_paths_count = self.get_min_fcp_paths_count_from_db(fcp_template_id)
# raise exception
if min_fcp_paths_count > fcp_devices_path_count:
msg = ("min_fcp_paths_count %s is larger than fcp device path count %s. "
"Adjust the fcp_devices setting or "
"min_fcp_paths_count." % (min_fcp_paths_count, fcp_devices_path_count))
LOG.error(msg)
raise exception.SDKConflictError(modID=self._module_id, rs=23, msg=msg)
def get_min_fcp_paths_count(self, fcp_template_id):
""" Get min_fcp_paths_count, query template table first, if it is -1, then return the
value of fcp devices path count from template_fcp_mapping table. If it is None, raise error.
"""
if not fcp_template_id:
min_fcp_paths_count = None
else:
with get_fcp_conn():
min_fcp_paths_count = self.get_min_fcp_paths_count_from_db(fcp_template_id)
if min_fcp_paths_count == -1:
min_fcp_paths_count = self.get_path_count(fcp_template_id)
if min_fcp_paths_count is None:
obj_desc = "min_fcp_paths_count from fcp_template_id %s" % fcp_template_id
raise exception.SDKObjectNotExistError(obj_desc=obj_desc)
return min_fcp_paths_count
def edit_fcp_template(self, fcp_template_id, name=None, description=None,
fcp_devices=None, host_default=None,
default_sp_list=None, min_fcp_paths_count=None):
""" Edit a FCP Multipath Template.
The kwargs values are pre-validated in two places:
validate kwargs types
in zvmsdk/sdkwsgi/schemas/volume.py
set a kwarg as None if not passed by user
in zvmsdk/sdkwsgi/handlers/volume.py
If any kwarg is None, the kwarg will not be updated.
:param fcp_template_id: template id
:param name: template name
:param description: template desc
:param fcp_devices: FCP devices divided into
different paths by semicolon
Format:
"fcp-devices-from-path0;fcp-devices-from-path1;..."
Example:
"0011-0013;0015;0017-0018",
:param host_default: (bool)
:param default_sp_list: (list)
Example:
["SP1", "SP2"]
:param min_fcp_paths_count: if it is None, then will not update this field in db.
:return:
Example
{
'fcp_template': {
'name': 'bjcb-test-template',
'id': '36439338-db14-11ec-bb41-0201018b1dd2',
'description': 'This is Default template',
'host_default': True,
'storage_providers': ['sp4', 'v7k60'],
'min_fcp_paths_count': 2
}
}
"""
# The following multiple DQLs(Database query)
# are put into the with-block with DMLs
# because the consequent DMLs(Database modification)
# depend on the result of the DQLs.
# So that, other threads can NOT begin a sqlite transacation
# util current thread exits the with-block.
# Refer to 'def get_fcp_conn' for thread lock
with get_fcp_conn():
# DQL: validate: FCP Multipath Template
if not self.fcp_template_exist_in_db(fcp_template_id):
obj_desc = ("FCP Multipath Template {}".format(fcp_template_id))
raise exception.SDKObjectNotExistError(obj_desc=obj_desc)
# DQL: validate: add or delete path from FCP Multipath Template.
# If fcp_devices is None, it means user do not want to
# modify fcp_devices, so skip the validation;
# otherwise, perform the validation.
if fcp_devices is not None:
fcp_path_count_from_input = len(
[i for i in fcp_devices.split(';') if i])
fcp_path_count_in_db = self.get_path_count(fcp_template_id)
if fcp_path_count_from_input != fcp_path_count_in_db:
inuse_fcp = self.get_inuse_fcp_device_by_fcp_template(
fcp_template_id)
if inuse_fcp:
inuse_fcp = utils.shrink_fcp_list(
[fcp['fcp_id'] for fcp in inuse_fcp])
detail = ("The FCP devices ({}) are allocated to virtual machines "
"by the FCP Multipath Template (id={}). "
"Adding or deleting a FCP device path from a FCP Multipath Template "
"is not allowed if there is any FCP device allocated from the template. "
"You must deallocate those FCP devices "
"before adding or deleting a path from the template."
.format(inuse_fcp, fcp_template_id))
raise exception.SDKConflictError(modID=self._module_id, rs=24, msg=detail)
# If min_fcp_paths_count is not None or fcp_devices is not None, need to validate the value.
# min_fcp_paths_count should not be larger than fcp device path count, or else, raise error.
self._validate_min_fcp_paths_count(fcp_devices, min_fcp_paths_count, fcp_template_id)
tmpl_basic, fcp_detail = self.get_fcp_templates_details(
[fcp_template_id])
# DML: table template_fcp_mapping
if fcp_devices is not None:
# fcp_from_input:
# fcp devices from user input
# example:
# {'0011': 0, '0013': 0, <<< path 0
# '0015': 1, <<< path 1
# '0018': 2, '0017': 2} <<< path 2
fcp_from_input = dict()
# fcp_devices_by_path:
# example:
# if fcp_devices is "0011-0013;0015;0017-0018",
# then fcp_devices_by_path is :
# {
# 0: {'0011', '0013'}
# 1: {'0015'}
# 2: {'0017', '0018'}
# }
fcp_devices_by_path = utils.expand_fcp_list(fcp_devices)
for path in fcp_devices_by_path:
for fcp_id in fcp_devices_by_path[path]:
fcp_from_input[fcp_id] = path
# fcp_in_db:
# FCP devices belonging to fcp_template_id
# queried from database including the FCP devices
# that are not found in z/VM
# example:
# {'0011': <sqlite3.Row object at 0x3ff85>,
# '0013': <sqlite3.Row object at 0x3f3da>}
fcp_in_db = dict()
for row in fcp_detail:
fcp_in_db[row['fcp_id']] = row
# Divide the FCP devices into three sets
add_set = set(fcp_from_input) - set(fcp_in_db)
inter_set = set(fcp_from_input) & set(fcp_in_db)
del_set = set(fcp_in_db) - set(fcp_from_input)
# only unused FCP devices can be
# deleted from a FCP Multipath Template.
# Two types of unused FCP devices:
# 1. connections/reserved == None:
# the fcp only exists in table(template_fcp_mapping),
# rather than table(fcp)
# 2. connections/reserved == 0:
# the fcp exists in both tables
# and it is not allocated from FCP DB
not_allow_for_del = set()
for fcp in del_set:
if (fcp_in_db[fcp]['connections'] not in (None, 0) or
fcp_in_db[fcp]['reserved'] not in (None, 0)):
not_allow_for_del.add(fcp)
# For a FCP device included in multiple FCP Multipath Templates,
# the FCP device is allowed to be deleted from the current template
# only if it is allocated from another template rather than the current one
inuse_fcp_devices = self.get_inuse_fcp_device_by_fcp_template(fcp_template_id)
inuse_fcp_by_current_template = set(fcp['fcp_id'] for fcp in inuse_fcp_devices)
not_allow_for_del &= inuse_fcp_by_current_template
# validate: not allowed to remove inuse FCP devices
if not_allow_for_del:
not_allow_for_del = utils.shrink_fcp_list(
list(not_allow_for_del))
detail = ("The FCP devices ({}) are missing from the FCP device list. "
"These FCP devices are allocated to virtual machines "
"from the FCP Multipath Template (id={}). "
"Deleting the allocated FCP devices from this template is not allowed. "
"You must ensure those FCP devices are included in the FCP device list."
.format(not_allow_for_del, fcp_template_id))
raise exception.SDKConflictError(modID=self._module_id, rs=24, msg=detail)
# DML: table template_fcp_mapping
LOG.info("DML: table template_fcp_mapping")
# 1. delete from table template_fcp_mapping
records_to_delete = [
(fcp_template_id, fcp_id)
for fcp_id in del_set]
self.bulk_delete_fcp_device_from_fcp_template(
records_to_delete)
LOG.info("FCP devices ({}) removed from FCP Multipath Template {}."
.format(utils.shrink_fcp_list(list(del_set)),
fcp_template_id))
# 2. insert into table template_fcp_mapping
records_to_insert = [
(fcp_template_id, fcp_id, fcp_from_input[fcp_id])
for fcp_id in add_set]
self.bulk_insert_fcp_device_into_fcp_template(
records_to_insert)
LOG.info("FCP devices ({}) added into FCP Multipath Template {}."
.format(utils.shrink_fcp_list(list(add_set)),
fcp_template_id))
# 3. update table template_fcp_mapping
# update path of fcp devices if changed
for fcp in inter_set:
path_from_input = fcp_from_input[fcp]
path_in_db = fcp_in_db[fcp]['path']
if path_from_input != path_in_db:
record_to_update = (
fcp_from_input[fcp], fcp, fcp_template_id)
self.update_path_of_fcp_device(record_to_update)
LOG.info("FCP device ({}) updated into "
"FCP Multipath Template {} from path {} to path {}."
.format(fcp, fcp_template_id,
fcp_in_db[fcp]['path'],
fcp_from_input[fcp]))
# DML: table template
if (name, description, host_default, min_fcp_paths_count) != (None, None, None, None):
LOG.info("DML: table template")
record_to_update = (
name if name is not None
else tmpl_basic[0]['name'],
description if description is not None
else tmpl_basic[0]['description'],
host_default if host_default is not None
else tmpl_basic[0]['is_default'],
min_fcp_paths_count if min_fcp_paths_count is not None
else tmpl_basic[0]['min_fcp_paths_count'],
fcp_template_id)
self.update_basic_info_of_fcp_template(record_to_update)
LOG.info("FCP Multipath Template basic info updated.")
# DML: table template_sp_mapping
if default_sp_list is not None:
LOG.info("DML: table template_sp_mapping")
self.bulk_set_sp_default_by_fcp_template(fcp_template_id,
default_sp_list)
LOG.info("Default template of storage providers ({}) "
"updated.".format(default_sp_list))
# Return template basic info queried from DB
# tmpl_basic is a list containing one or more sqlite.Row objects
# Example:
# if a template is the SP-level default for 2 SPs (SP1 and SP2)
# (i.e. the template has 2 entries in table template_sp_mapping
# then tmpl_basic is a list containing 2 Row objects,
# the only different value between the 2 Row objects is 'sp_name'
# (i.e. tmpl_basic[0]['sp_name'] is 'SP1',
# while tmpl_basic[1]['sp_name'] is 'SP2'.
tmpl_basic = self.get_fcp_templates_details([fcp_template_id])[0]
return {'fcp_template': {
'name': tmpl_basic[0]['name'],
'id': tmpl_basic[0]['id'],
'description': tmpl_basic[0]['description'],
'host_default': bool(tmpl_basic[0]['is_default']),
'storage_providers':
[] if tmpl_basic[0]['sp_name'] is None
else [r['sp_name'] for r in tmpl_basic],
'min_fcp_paths_count': self.get_min_fcp_paths_count(fcp_template_id)
}}
def get_fcp_templates(self, template_id_list=None):
"""Get FCP Multipath Templates base info by template_id_list.
If template_id_list is None, will get all the FCP Multipath Templates in db.
return format:
[(id|name|description|is_default|min_fcp_paths_count|sp_name)]
"""
cmd = ("SELECT template.id, template.name, template.description, "
"template.is_default, template.min_fcp_paths_count, template_sp_mapping.sp_name "
"FROM template "
"LEFT OUTER JOIN template_sp_mapping "
"ON template.id=template_sp_mapping.tmpl_id")
with get_fcp_conn() as conn:
if template_id_list:
result = conn.execute(
cmd + " WHERE template.id "
"IN (%s)" %
','.join('?' * len(template_id_list)),
template_id_list)
else:
result = conn.execute(cmd)
raw = result.fetchall()
return raw
def get_host_default_fcp_template(self, host_default=True):
"""Get the host default FCP Multipath Template base info.
return format: (id|name|description|is_default|sp_name)
when the template is more than one SP's default,
then it will show up several times in the result.
"""
with get_fcp_conn() as conn:
if host_default:
result = conn.execute(
"SELECT t.id, t.name, t.description, t.is_default, "
"t.min_fcp_paths_count, ts.sp_name "
"FROM template AS t "
"LEFT OUTER JOIN template_sp_mapping AS ts "
"ON t.id=ts.tmpl_id "
"WHERE t.is_default=1")
else:
result = conn.execute(
"SELECT t.id, t.name, t.description, t.is_default, "
"t.min_fcp_paths_count, ts.sp_name "
"FROM template AS t "
"LEFT OUTER JOIN template_sp_mapping AS ts "
"ON t.id=ts.tmpl_id "
"WHERE t.is_default=0")
raw = result.fetchall()
return raw
def get_sp_default_fcp_template(self, sp_host_list):
"""Get the sp_host_list default FCP Multipath Template.
"""
cmd = ("SELECT t.id, t.name, t.description, t.is_default, "
"t.min_fcp_paths_count, ts.sp_name "
"FROM template_sp_mapping AS ts "
"INNER JOIN template AS t "
"ON ts.tmpl_id=t.id")
raw = []
with get_fcp_conn() as conn:
if (len(sp_host_list) == 1 and
sp_host_list[0].lower() == 'all'):
result = conn.execute(cmd)
raw = result.fetchall()
else:
for sp_host in sp_host_list:
result = conn.execute(
cmd + " WHERE ts.sp_name=?", (sp_host,))
raw.extend(result.fetchall())
return raw
def get_fcp_template_by_assigner_id(self, assigner_id):
"""Get a templates list of specified assigner.
"""
with get_fcp_conn() as conn:
result = conn.execute(
"SELECT t.id, t.name, t.description, t.is_default, "
"t.min_fcp_paths_count, ts.sp_name "
"FROM fcp "
"INNER JOIN template AS t "
"ON fcp.tmpl_id=t.id "
"LEFT OUTER JOIN template_sp_mapping AS ts "
"ON fcp.tmpl_id=ts.tmpl_id "
"WHERE fcp.assigner_id=?", (assigner_id,))
raw = result.fetchall()
# id|name|description|is_default|min_fcp_paths_count|sp_name
return raw
def get_fcp_templates_details(self, template_id_list=None):
"""Get templates detail info by template_id_list
:param template_id_list: must be a list or None
If template_id_list=None, will get all the templates detail info.
Detail info including two parts: base info and fcp device info, these
two parts info will use two cmds to get from db and return out, outer
method will join these two return output.
'tmpl_cmd' is used to get base info from template table and
template_sp_mapping table.
tmpl_cmd result format:
id|name|description|is_default|min_fcp_paths_count|sp_name
'devices_cmd' is used to get fcp device info. Device's template id is
gotten from template_fcp_mapping table, device's usage info is gotten
from fcp table. Because not all the templates' fcp device is in fcp
table, so the fcp device's template id should being gotten from
template_fcp_mapping table insteading of fcp table.
'devices_cmd' result format:
fcp_id|tmpl_id|path|assigner_id|connections|reserved|
wwpn_npiv|wwpn_phy|chpid|state|owner|tmpl_id
In 'devices_cmd' result: the first three properties are from
template_fcp_mapping table, and the others are from fcp table.
when the device is not in fcp table, all the properties in fcp
table will be None. For example: template '12345678' has a fcp
"1aaa" on path 0, but this device is not in fcp table, the
query result will be as below.
1aaa|12345678|0|||||||||
"""
tmpl_cmd = (
"SELECT t.id, t.name, t.description, "
"t.is_default, t.min_fcp_paths_count, ts.sp_name "
"FROM template AS t "
"LEFT OUTER JOIN template_sp_mapping AS ts "
"ON t.id=ts.tmpl_id")
devices_cmd = (
"SELECT tf.fcp_id, tf.tmpl_id, tf.path, fcp.assigner_id, "
"fcp.connections, fcp.reserved, fcp.wwpn_npiv, fcp.wwpn_phy, "
"fcp.chpid, fcp.state, fcp.owner, fcp.tmpl_id "
"FROM template_fcp_mapping AS tf "
"LEFT OUTER JOIN fcp "
"ON tf.fcp_id=fcp.fcp_id")
with get_fcp_conn() as conn:
if template_id_list:
tmpl_result = conn.execute(
tmpl_cmd + " WHERE t.id IN (%s)" %
','.join('?' * len(template_id_list)),
template_id_list)
devices_result = conn.execute(
devices_cmd + " WHERE tf.tmpl_id "
"IN (%s)" %
','.join('?' * len(template_id_list)),
template_id_list)
else:
tmpl_result = conn.execute(tmpl_cmd)
devices_result = conn.execute(devices_cmd)
tmpl_result = tmpl_result.fetchall()
devices_result = devices_result.fetchall()
return tmpl_result, devices_result
def bulk_delete_fcp_from_template(self, fcp_id_list, fcp_template_id):
"""Delete multiple FCP records from the table template_fcp_mapping in the
specified FCP Multipath Template only if the FCP devices are available."""
records_to_delete = [(fcp_template_id, fcp_id)
for fcp_id in fcp_id_list]
with get_fcp_conn() as conn:
conn.executemany(
"DELETE FROM template_fcp_mapping "
"WHERE fcp_id NOT IN ("
"SELECT fcp_id FROM fcp "
"WHERE fcp.connections<>0 OR fcp.reserved<>0) "
"AND tmpl_id=? AND fcp_id=?",
records_to_delete)
def delete_fcp_template(self, template_id):
"""Remove FCP Multipath Template record from template, template_sp_mapping,
template_fcp_mapping and fcp tables."""
with get_fcp_conn() as conn:
if not self.fcp_template_exist_in_db(template_id):
obj_desc = ("FCP Multipath Template {} ".format(template_id))
raise exception.SDKObjectNotExistError(obj_desc=obj_desc)
inuse_fcp_devices = self.get_inuse_fcp_device_by_fcp_template(
template_id)
if inuse_fcp_devices:
inuse_fcp_devices = utils.shrink_fcp_list(
[fcp['fcp_id'] for fcp in inuse_fcp_devices])
detail = ("The FCP devices ({}) are allocated to virtual machines "
"by the FCP Multipath Template (id={}). "
"Deleting a FCP Multipath Template is not allowed "
"if there is any FCP device allocated from the template. "
"You must deallocate those FCP devices before deleting the template."
.format(inuse_fcp_devices, template_id))
raise exception.SDKConflictError(modID=self._module_id, rs=22,
msg=detail)
conn.execute("DELETE FROM template WHERE id=?",
(template_id,))
conn.execute("DELETE FROM template_sp_mapping WHERE tmpl_id=?",
(template_id,))
conn.execute("DELETE FROM template_fcp_mapping WHERE tmpl_id=?",
(template_id,))
LOG.info("FCP Multipath Template with id %s is removed from "
"template, template_sp_mapping and "
"template_fcp_mapping tables" % template_id)
class ImageDbOperator(object):
def __init__(self):
self._create_image_table()
self._module_id = 'image'
def _create_image_table(self):
create_image_table_sql = ' '.join((
'CREATE TABLE IF NOT EXISTS image (',
'imagename varchar(128) PRIMARY KEY COLLATE NOCASE,',
'imageosdistro varchar(16),',
'md5sum varchar(512),',
'disk_size_units varchar(512),',
'image_size_in_bytes varchar(512),',
'type varchar(16),',
'comments varchar(128))'))
with get_image_conn() as conn:
conn.execute(create_image_table_sql)
def image_add_record(self, imagename, imageosdistro, md5sum,
disk_size_units, image_size_in_bytes,
type, comments=None):
if comments is not None:
with get_image_conn() as conn:
conn.execute("INSERT INTO image (imagename, imageosdistro,"
"md5sum, disk_size_units, image_size_in_bytes,"
" type, comments) VALUES (?, ?, ?, ?, ?, ?, ?)",
(imagename, imageosdistro, md5sum,
disk_size_units, image_size_in_bytes, type,
comments))
else:
with get_image_conn() as conn:
conn.execute("INSERT INTO image (imagename, imageosdistro,"
"md5sum, disk_size_units, image_size_in_bytes,"
" type) VALUES (?, ?, ?, ?, ?, ?)",
(imagename, imageosdistro, md5sum,
disk_size_units, image_size_in_bytes, type))
def image_query_record(self, imagename=None):
"""Query the image record from database, if imagename is None, all
of the image records will be returned, otherwise only the specified
image record will be returned."""
if imagename:
with get_image_conn() as conn:
result = conn.execute("SELECT * FROM image WHERE "
"imagename=?", (imagename,))
image_list = result.fetchall()
if not image_list:
obj_desc = "Image with name: %s" % imagename
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID=self._module_id)
else:
with get_image_conn() as conn:
result = conn.execute("SELECT * FROM image")
image_list = result.fetchall()
# Map each image record to be a dict, with the key is the field name in
# image DB
image_keys_list = ['imagename', 'imageosdistro', 'md5sum',
'disk_size_units', 'image_size_in_bytes', 'type',
'comments']
image_result = []
for item in image_list:
image_item = dict(zip(image_keys_list, item))
image_result.append(image_item)
return image_result
def image_delete_record(self, imagename):
"""Delete the record of specified imagename from image table"""
with get_image_conn() as conn:
conn.execute("DELETE FROM image WHERE imagename=?", (imagename,))
class GuestDbOperator(object):
def __init__(self):
self._create_guests_table()
self._module_id = 'guest'
def _create_guests_table(self):
"""
net_set: it is used to describe network interface status, the initial
value is 0, no network interface. It will be updated to be
1 after the network interface is configured
"""
sql = ' '.join((
'CREATE TABLE IF NOT EXISTS guests(',
'id char(36) PRIMARY KEY COLLATE NOCASE,',
'userid varchar(8) NOT NULL UNIQUE COLLATE NOCASE,',
'metadata varchar(255),',
'net_set smallint DEFAULT 0,',
'comments text)'))
with get_guest_conn() as conn:
conn.execute(sql)
def _check_existence_by_id(self, guest_id, ignore=False):
guest = self.get_guest_by_id(guest_id)
if guest is None:
msg = 'Guest with id: %s does not exist in DB.' % guest_id
if ignore:
# Just print a warning message
LOG.info(msg)
else:
LOG.error(msg)
obj_desc = "Guest with id: %s" % guest_id
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID=self._module_id)
return guest
def _check_existence_by_userid(self, userid, ignore=False):
guest = self.get_guest_by_userid(userid)
if guest is None:
msg = 'Guest with userid: %s does not exist in DB.' % userid
if ignore:
# Just print a warning message
LOG.info(msg)
else:
LOG.error(msg)
obj_desc = "Guest with userid: %s" % userid
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID=self._module_id)
return guest
def add_guest_registered(self, userid, meta, net_set,
comments=None):
# Add guest which is migrated from other host or onboarded.
guest_id = str(uuid.uuid4())
with get_guest_conn() as conn:
conn.execute(
"INSERT INTO guests VALUES (?, ?, ?, ?, ?)",
(guest_id, userid, meta, net_set, comments))
def add_guest(self, userid, meta='', comments=''):
# Generate uuid automatically
guest_id = str(uuid.uuid4())
net_set = '0'
with get_guest_conn() as conn:
conn.execute(
"INSERT INTO guests VALUES (?, ?, ?, ?, ?)",
(guest_id, userid, meta, net_set, comments))
def delete_guest_by_id(self, guest_id):
# First check whether the guest exist in db table
guest = self._check_existence_by_id(guest_id, ignore=True)
if guest is None:
return
# Update guest if exist
with get_guest_conn() as conn:
conn.execute(
"DELETE FROM guests WHERE id=?", (guest_id,))
def delete_guest_by_userid(self, userid):
# First check whether the guest exist in db table
guest = self._check_existence_by_userid(userid, ignore=True)
if guest is None:
return
with get_guest_conn() as conn:
conn.execute(
"DELETE FROM guests WHERE userid=?", (userid,))
def get_guest_metadata_with_userid(self, userid):
with get_guest_conn() as conn:
res = conn.execute("SELECT metadata FROM guests "
"WHERE userid=?", (userid,))
guests = res.fetchall()
return guests
def update_guest_by_id(self, uuid, userid=None, meta=None, net_set=None,
comments=None):
if ((userid is None) and (meta is None) and
(net_set is None) and (comments is None)):
msg = ("Update guest with id: %s failed, no field "
"specified to be updated." % uuid)
LOG.error(msg)
raise exception.SDKInternalError(msg=msg, modID=self._module_id)
# First check whether the guest exist in db table
self._check_existence_by_id(uuid)
# Start update
sql_cmd = "UPDATE guests SET"
sql_var = []
if userid is not None:
sql_cmd += " userid=?,"
sql_var.append(userid)
if meta is not None:
sql_cmd += " metadata=?,"
sql_var.append(meta)
if net_set is not None:
sql_cmd += " net_set=?,"
sql_var.append(net_set)
if comments is not None:
sql_cmd += " comments=?,"
sql_var.append(comments)
# remove the tailing comma
sql_cmd = sql_cmd.strip(',')
# Add the id filter
sql_cmd += " WHERE id=?"
sql_var.append(uuid)
with get_guest_conn() as conn:
conn.execute(sql_cmd, sql_var)
def update_guest_by_userid(self, userid, meta=None, net_set=None,
comments=None):
userid = userid
if (meta is None) and (net_set is None) and (comments is None):
msg = ("Update guest with userid: %s failed, no field "
"specified to be updated." % userid)
LOG.error(msg)
raise exception.SDKInternalError(msg=msg, modID=self._module_id)
# First check whether the guest exist in db table
self._check_existence_by_userid(userid)
# Start update
sql_cmd = "UPDATE guests SET"
sql_var = []
if meta is not None:
sql_cmd += " metadata=?,"
sql_var.append(meta)
if net_set is not None:
sql_cmd += " net_set=?,"
sql_var.append(net_set)
if comments is not None:
new_comments = json.dumps(comments)
sql_cmd += " comments=?,"
sql_var.append(new_comments)
# remove the tailing comma
sql_cmd = sql_cmd.strip(',')
# Add the id filter
sql_cmd += " WHERE userid=?"
sql_var.append(userid)
with get_guest_conn() as conn:
conn.execute(sql_cmd, sql_var)
def get_guest_list(self):
with get_guest_conn() as conn:
res = conn.execute("SELECT * FROM guests")
guests = res.fetchall()
return guests
def get_migrated_guest_list(self):
with get_guest_conn() as conn:
res = conn.execute("SELECT userid FROM guests "
"WHERE comments LIKE '%\"migrated\": 1%'")
guests = res.fetchall()
return guests
def get_migrated_guest_info_list(self):
with get_guest_conn() as conn:
res = conn.execute("SELECT * FROM guests "
"WHERE comments LIKE '%\"migrated\": 1%'")
guests = res.fetchall()
return guests
def get_comments_by_userid(self, userid):
""" Get comments record.
output should be like: {'k1': 'v1', 'k2': 'v2'}'
"""
userid = userid
with get_guest_conn() as conn:
res = conn.execute("SELECT comments FROM guests "
"WHERE userid=?", (userid,))
result = res.fetchall()
comments = {}
if result[0][0]:
comments = json.loads(result[0][0])
return comments
def get_metadata_by_userid(self, userid):
"""get metadata record.
output should be like: "a=1,b=2,c=3"
"""
userid = userid
with get_guest_conn() as conn:
res = conn.execute("SELECT * FROM guests "
"WHERE userid=?", (userid,))
guest = res.fetchall()
if len(guest) == 1:
return guest[0][2]
elif len(guest) == 0:
LOG.debug("Guest with userid: %s not found from DB!" % userid)
return ''
else:
msg = "Guest with userid: %s have multiple records!" % userid
LOG.error(msg)
raise exception.SDKInternalError(msg=msg, modID=self._module_id)
def transfer_metadata_to_dict(self, meta):
"""transfer str to dict.
output should be like: {'a':1, 'b':2, 'c':3}
"""
dic = {}
arr = meta.strip(' ,').split(',')
for i in arr:
temp = i.split('=')
key = temp[0].strip()
value = temp[1].strip()
dic[key] = value
return dic
def get_guest_by_id(self, guest_id):
with get_guest_conn() as conn:
res = conn.execute("SELECT * FROM guests "
"WHERE id=?", (guest_id,))
guest = res.fetchall()
# As id is the primary key, the filtered entry number should be 0 or 1
if len(guest) == 1:
return guest[0]
elif len(guest) == 0:
LOG.debug("Guest with id: %s not found from DB!" % guest_id)
return None
# Code shouldn't come here, just in case
return None
def get_guest_by_userid(self, userid):
userid = userid
with get_guest_conn() as conn:
res = conn.execute("SELECT * FROM guests "
"WHERE userid=?", (userid,))
guest = res.fetchall()
# As id is the primary key, the filtered entry number should be 0 or 1
if len(guest) == 1:
return guest[0]
elif len(guest) == 0:
LOG.debug("Guest with userid: %s not found from DB!" % userid)
return None
# Code shouldn't come here, just in case
return None | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmsdk/database.py | database.py |
import six
from zvmsdk import config
from zvmsdk import log
from zvmsdk import returncode
CONF = config.CONF
LOG = log.LOG
class SDKBaseException(Exception):
"""
Inherit from this class and define a 'msg_fmt' property.
That msg_fmt will get printf'd with the keyword arguments
provided to the constructor.
"""
msg_fmt = "z/VM SDK error: %(msg)s"
code = 500
headers = {}
safe = False
def __init__(self, message=None, results=None, **kwargs):
self.results = results
self.kw = kwargs
if 'code' in self.kw:
try:
self.kw['code'] = self.code
except AttributeError:
pass
if not message:
try:
message = self.msg_fmt % kwargs
except Exception:
LOG.exception('Exception in string format operation')
for name, value in six.iteritems(kwargs):
LOG.error("%s: %s" % (name, value))
message = self.msg_fmt
self.message = message
super(SDKBaseException, self).__init__(message)
def format_message(self):
return self.args[0]
class ZVMException(SDKBaseException):
msg_fmt = 'ZVMException happened: %(msg)s'
class ZVMNetworkError(SDKBaseException):
msg_fmt = "z/VM network error: %(msg)s"
class ZVMVirtualMachineNotExist(SDKBaseException):
msg_fmt = 'Virtual machine %(userid)s does not exist in %(zvm_host)s'
class NotFound(SDKBaseException):
msg_fmt = 'The resource can not be found'
class InvalidName(SDKBaseException):
msg_fmt = 'Invalid name provided, reason is %(reason)s'
class ValidationError(SDKBaseException):
safe = True
code = 400
msg_fmt = 'Validation error: %(detail)s'
class ZVMUnauthorized(SDKBaseException):
msg_fmt = 'Not authorized to execute'
code = 401
class ZVMNotFound(SDKBaseException):
def __init__(self, msg, modID='zvmsdk'):
rc = returncode.errors['notExist']
results = rc[0]
results['modID'] = returncode.ModRCs[modID]
results['rs'] = 1
errormsg = rc[1][2] % {'msg': msg}
super(ZVMNotFound, self).__init__(results=results,
message=errormsg)
class SDKDatabaseException(SDKBaseException):
msg_fmt = "SDK database error: %(msg)s"
class SDKInvalidInputNumber(SDKBaseException):
def __init__(self, api, expected, provided):
rc = returncode.errors['input']
results = rc[0]
results['modID'] = returncode.ModRCs['zvmsdk']
results['rs'] = 1
errormsg = rc[1][1] % {'api': api, 'expected': expected,
'provided': provided}
super(SDKInvalidInputNumber, self).__init__(results=results,
message=errormsg)
class SDKInvalidInputTypes(SDKBaseException):
def __init__(self, api, expected, inputtypes):
rc = returncode.errors['input']
results = rc[0]
results['modID'] = returncode.ModRCs['zvmsdk']
results['rs'] = 2
errormsg = rc[1][2] % {'api': api, 'expected': expected,
'inputtypes': inputtypes}
super(SDKInvalidInputTypes, self).__init__(results=results,
message=errormsg)
class SDKInvalidInputFormat(SDKBaseException):
def __init__(self, msg):
rc = returncode.errors['input']
results = rc[0]
results['modID'] = returncode.ModRCs['zvmsdk']
results['rs'] = 3
errormsg = rc[1][3] % {'msg': msg}
super(SDKInvalidInputFormat, self).__init__(results=results,
message=errormsg)
class SDKMissingRequiredInput(SDKBaseException):
def __init__(self, msg):
rc = returncode.errors['input']
results = rc[0]
results['modID'] = returncode.ModRCs['zvmsdk']
results['rs'] = 4
errormsg = rc[1][4] % {'msg': msg}
super(SDKInvalidInputFormat, self).__init__(results=results,
message=errormsg)
class SDKInternalError(SDKBaseException):
def __init__(self, msg, modID='zvmsdk', results=None):
# if results is set, it means the internal error comes from
# smt module, we need to keep the rc/rs value from SMT
rc = returncode.errors['internal']
errormsg = rc[1][1] % {'msg': msg}
if results is None:
results = rc[0]
results['rs'] = 1
results['modID'] = returncode.ModRCs[modID]
else:
# SMT internal error
# Reset the overallRC in results to the overallRC value
# corresponding to internal error
results['overallRC'] = (rc[0]['overallRC'])
results['modID'] = returncode.ModRCs['smt']
super(SDKInternalError, self).__init__(results=results,
message=errormsg)
class SDKConflictError(SDKBaseException):
def __init__(self, modID, rs, **kwargs):
# kwargs can be used to contain different keyword for constructing
# the rs error msg
rc = returncode.errors['conflict']
results = rc[0]
results['modID'] = returncode.ModRCs[modID]
results['rs'] = rs
errormsg = rc[1][rs] % kwargs
super(SDKConflictError, self).__init__(results=results,
message=errormsg)
class SDKObjectNotExistError(SDKBaseException):
def __init__(self, obj_desc, modID='zvmsdk', rs=1):
rc = returncode.errors['notExist']
results = rc[0]
results['modID'] = returncode.ModRCs[modID]
results['rs'] = rs
errormsg = rc[1][rs] % {'obj_desc': obj_desc}
super(SDKObjectNotExistError, self).__init__(results=results,
message=errormsg)
class SDKObjectAlreadyExistError(SDKBaseException):
"""The object to create or add is already exist in ZCC."""
def __init__(self, obj_desc, modID='zvmsdk', rs=1):
rc = returncode.errors['alreadyExist']
results = rc[0]
results['modID'] = returncode.ModRCs[modID]
results['rs'] = rs
errormsg = rc[1][rs] % {'obj_desc': obj_desc}
super(SDKObjectAlreadyExistError, self).__init__(results=results,
message=errormsg)
class SDKSMTRequestFailed(SDKBaseException):
def __init__(self, results, msg):
results['modID'] = returncode.ModRCs['smt']
super(SDKSMTRequestFailed, self).__init__(results=results,
message=msg)
class SDKGuestOperationError(SDKBaseException):
def __init__(self, rs, **kwargs):
# kwargs can be used to contain different keyword for constructing
# the rs error msg
rc = returncode.errors['guest']
results = rc[0]
results['rs'] = rs
errormsg = rc[1][rs] % kwargs
super(SDKGuestOperationError, self).__init__(results=results,
message=errormsg)
class SDKNetworkOperationError(SDKBaseException):
def __init__(self, rs, **kwargs):
# kwargs can be used to contain different keyword for constructing
# the rs error msg
rc = returncode.errors['network']
results = rc[0]
results['rs'] = rs
errormsg = rc[1][rs] % kwargs
super(SDKNetworkOperationError, self).__init__(results=results,
message=errormsg)
class SDKImageOperationError(SDKBaseException):
def __init__(self, rs, **kwargs):
# kwargs can be used to contain different keyword for constructing
# the rs error msg
rc = returncode.errors['image']
results = rc[0]
results['rs'] = rs
errormsg = rc[1][rs] % kwargs
results['strError'] = errormsg
super(SDKImageOperationError, self).__init__(results=results,
message=errormsg)
class SDKVolumeOperationError(SDKBaseException):
def __init__(self, rs, **kwargs):
# kwargs can be used to contain different keyword for constructing
# the rs error msg
rc = returncode.errors['volume']
results = rc[0]
results['rs'] = rs
errormsg = rc[1][rs] % kwargs
results['strError'] = errormsg
super(SDKVolumeOperationError, self).__init__(results=results,
message=errormsg)
class SDKFunctionNotImplementError(SDKBaseException):
def __init__(self, func, modID='guest'):
# kwargs can be used to contain different keyword for constructing
# the rs error msg
rc = returncode.errors['serviceNotSupport']
results = rc[0]
results['modID'] = modID
results['rs'] = 1
errormsg = rc[1][1] % {'func': func}
results['strError'] = errormsg
super(SDKFunctionNotImplementError, self).__init__(results=results,
message=errormsg)
class SDKRetryException(SDKBaseException):
msg_fmt = 'Retry exception' | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmsdk/exception.py | exception.py |
import logging
import os
from zvmsdk import config
class Logger():
def __init__(self, logger):
# create a logger
self.logger = logging.getLogger(logger)
self.log_level = logging.INFO
def getlog(self):
return self.logger
def setup(self, log_dir, log_level, log_file_name='zvmsdk.log'):
# make sure target directory exists
if not os.path.exists(log_dir):
if os.access(log_dir, os.W_OK):
os.makedirs(log_dir)
else:
log_dir = '/tmp/'
# Setup log level
self.updateloglevel(log_level)
self.logger.setLevel(self.log_level)
# create a handler for the file
log_file = os.path.join(log_dir, log_file_name)
fh = logging.FileHandler(log_file)
fh.setLevel(self.log_level)
# set the formate of the handler
formatter = logging.Formatter(
'[%(asctime)s] [%(levelname)s] %(message)s', '%Y-%m-%d %H:%M:%S')
fh.setFormatter(formatter)
# add handler in the logger
self.logger.addHandler(fh)
def updateloglevel(self, level):
log_level = level.upper()
if log_level in ('LOGGING.INFO', 'INFO'):
log_level = logging.INFO
elif log_level in ('LOGGING.DEBUG', 'DEBUG'):
log_level = logging.DEBUG
elif log_level in ('LOGGING.WARN', 'WARN'):
log_level = logging.WARN
elif log_level in ('LOGGING.ERROR', 'ERROR'):
log_level = logging.ERROR
elif log_level in ('LOGGING.CRITICAL', 'CRITICAL'):
log_level = logging.CRITICAL
else:
# default to logging.INFO
log_level = logging.INFO
self.log_level = log_level
def getloglevel(self):
return self.log_level
def setup_log():
global LOGGER
LOGGER.setup(log_dir=config.CONF.logging.log_dir,
log_level=config.CONF.logging.log_level)
LOGGER = Logger('ZVMSDK')
LOG = LOGGER.getlog() | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmsdk/log.py | log.py |
import json
import six
import webob
from webob.dec import wsgify
from zvmsdk import log
LOG = log.LOG
SDKWSGI_MODID = 120
# The following globals are used by `mask_tuple_password`
_SANITIZE_KEYS = ['X-Auth-Token']
def extract_json(body):
try:
LOG.debug('Decoding body: %s', body)
# This function actually is received from upper layer through
# socket, so it's bytes in py3
if isinstance(body, bytes):
body = bytes.decode(body)
data = json.loads(body)
except ValueError as exc:
msg = ('Malformed JSON: %(error)s') % {'error': exc}
LOG.debug(msg)
raise webob.exc.HTTPBadRequest(msg,
json_formatter=json_error_formatter)
return data
def json_error_formatter(body, status, title, environ):
"""A json_formatter for webob exceptions."""
body = webob.exc.strip_tags(body)
status_code = int(status.split(None, 1)[0])
error_dict = {
'status': status_code,
'title': title,
'detail': body
}
return {'errors': [error_dict]}
def wsgi_path_item(environ, name):
"""Extract the value of a named field in a URL.
Return None if the name is not present or there are no path items.
"""
try:
return environ['wsgiorg.routing_args'][1][name]
except (KeyError, IndexError):
return None
TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes')
FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no')
def bool_from_string(subject, strict=False, default=False):
if isinstance(subject, bool):
return subject
if not isinstance(subject, six.string_types):
subject = six.text_type(subject)
lowered = subject.strip().lower()
if lowered in TRUE_STRINGS:
return True
elif lowered in FALSE_STRINGS:
return False
elif strict:
acceptable = ', '.join(
"'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS))
msg = ("Unrecognized value '%(val)s', acceptable values are:"
" %(acceptable)s") % {'val': subject,
'acceptable': acceptable}
raise ValueError(msg)
else:
return default
def get_request_uri(environ):
name = environ.get('SCRIPT_NAME', '')
info = environ.get('PATH_INFO', '')
req_uri = name + info
if environ.get('QUERY_STRING'):
req_uri += '?' + environ['QUERY_STRING']
return req_uri
def get_http_code_from_sdk_return(msg, additional_handler=None, default=200):
LOG.debug("Get msg to handle: %s", msg)
if 'overallRC' in msg:
ret = msg['overallRC']
if ret != 0:
# same definition to sdk layer
if ret in [400, 404, 409, 501, 503]:
return ret
# 100 mean validation error in sdk layer and
# lead to a 400 badrequest
if ret in [100]:
return 400
# Add a special handle for smt return
if additional_handler:
ret = additional_handler(msg)
if ret:
return ret
# ok, we reach here because can't handle it
LOG.info("The msg <%s> lead to return internal error", msg)
return 500
else:
# return default code
return default
def handle_not_found(msg):
if 'overallRC' in msg and 'rc' in msg and 'rs' in msg:
# overall rc: 8, rc: 212, rs: 40 means vswitch not exist
if (msg['overallRC'] == 8 and msg['rc'] == 212 and
msg['rs'] == 40):
LOG.debug('vswitch does not exist, change ret to 404')
return 404
# overall rc: 4, rc: 5, rs: 402 means vswitch not exist
if (msg['overallRC'] == 4 and msg['rc'] == 5 and
msg['rs'] == 402):
LOG.debug('disk pool not exist, change ret to 404')
return 404
# overall rc: 300, rc: 300, rs: 20 means image not exist
if (msg['overallRC'] == 300 and msg['rc'] == 300 and
msg['rs'] == 20):
LOG.debug('image not exist, change ret to 404')
return 404
# overall rc: 8, rc: 400, rs: 4 means guest not exist
if (msg['overallRC'] == 8 and msg['rc'] == 400 and
msg['rs'] == 4):
LOG.debug('guest not exist, change ret to 404')
return 404
# overall rc: 8, rc: 200, rs: 4 means guest not exist
if (msg['overallRC'] == 8 and msg['rc'] == 200 and
msg['rs'] == 4):
LOG.debug('guest not exist, change ret to 404')
return 404
# overall rc: 300, rc:300, rs: 3, error message contains
# "not linked; not in CP directory" means target vdev not exist
if (msg['overallRC'] == 300 and msg['rc'] == 300 and
msg['rs'] == 3 and 'not linked; not in CP directory' in
msg['errmsg']):
LOG.debug('deploy target vdev not exist,'
' change ret to 404')
return 404
return 0
def handle_already_exists(msg):
if 'overallRC' in msg and 'rc' in msg and 'rs' in msg:
# overall rc: 8, rc: 212, rs: 36 means vswitch already exist
if (msg['overallRC'] == 8 and msg['rc'] == 212 and
msg['rs'] == 36):
LOG.debug('vswitch already exist, change ret to 409')
return 409
# overall rc: 300, rc: 300, rc: 13 means image already exist
if (msg['overallRC'] == 300 and msg['rc'] == 300 and
msg['rs'] == 13):
LOG.debug('image already exist, change ret to 409')
return 409
# overall rc: 8, rc: 400, rs: 8 means guest already exist
if (msg['overallRC'] == 8 and msg['rc'] == 400 and
msg['rs'] == 8):
LOG.debug('guest already exist, change ret to 409')
return 409
# not handle it well, go to default
return 0
def handle_conflict_state(msg):
if 'overallRC' in msg and 'rc' in msg and 'rs' in msg:
# overall rc: 8, rc: 212, rs: 36 means vswitch already exist
if (msg['overallRC'] == 300 and msg['rc'] == 300 and
msg['rs'] == 5):
LOG.debug('guest power off state, change ret to 409')
return 409
return 0
def handle_not_found_and_conflict(msg):
err = handle_not_found(msg)
if err == 0:
return handle_conflict_state(msg)
return err
def mask_tuple_password(message_list, secret="***"):
"""Replace password with *secret* in message."""
retval = []
for sani_key in _SANITIZE_KEYS:
for item in message_list:
item_lower = [x.lower() for x in item if isinstance(x, str)]
if isinstance(item, tuple) and sani_key.lower() in item_lower:
retval.append((sani_key, secret))
else:
retval.append(item)
return retval
class SdkWsgify(wsgify):
def call_func(self, req, *args, **kwargs):
"""Add json_error_formatter to any webob HTTPExceptions."""
try:
return super(SdkWsgify, self).call_func(req, *args, **kwargs)
except webob.exc.HTTPException as exc:
msg = ('encounter %(error)s error') % {'error': exc}
LOG.debug(msg)
exc.json_formatter = json_error_formatter
code = exc.status_int
explanation = six.text_type(exc)
fault_data = {
'overallRC': 400,
'rc': 400,
'rs': code,
'modID': SDKWSGI_MODID,
'output': '',
'errmsg': explanation}
exc.text = six.text_type(json.dumps(fault_data))
raise exc | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/util.py | util.py |
import routes
import webob
from zvmsdk import exception
from zvmsdk import log
from zvmsdk.sdkwsgi import util
from zvmsdk.sdkwsgi.handlers import file
from zvmsdk.sdkwsgi.handlers import guest
from zvmsdk.sdkwsgi.handlers import healthy
from zvmsdk.sdkwsgi.handlers import host
from zvmsdk.sdkwsgi.handlers import image
from zvmsdk.sdkwsgi.handlers import tokens
from zvmsdk.sdkwsgi.handlers import version
from zvmsdk.sdkwsgi.handlers import volume
from zvmsdk.sdkwsgi.handlers import vswitch
LOG = log.LOG
# This is the route of zvm sdk REST API, in order to add or modify
# you need add code in handlers/ folder to handle the request
ROUTE_LIST = (
('/', {
'GET': version.version,
}),
('/guests', {
'POST': guest.guest_create,
'GET': guest.guest_list,
}),
('/guests/stats', {
'GET': guest.guest_get_stats
}),
('/guests/interfacestats', {
'GET': guest.guest_get_interface_stats
}),
('/guests/nics', {
'GET': guest.guests_get_nic_info
}),
('/guests/volumes', {
'POST': volume.volume_attach,
'DELETE': volume.volume_detach,
}),
('/volumes/conn/{userid}', {
'GET': volume.get_volume_connector,
}),
('/volumes/fcptemplates', {
'POST': volume.create_fcp_template,
'GET': volume.get_fcp_templates,
}),
('/volumes/fcptemplates/detail', {
'GET': volume.get_fcp_templates_details,
}),
('/volumes/fcptemplates/{template_id}', {
'DELETE': volume.delete_fcp_template,
'PUT': volume.edit_fcp_template
}),
('/volumes/fcp/{fcp_id}', {
'GET': volume.get_fcp_usage,
'PUT': volume.set_fcp_usage,
}),
('/volumes/volume_refresh_bootmap', {
'PUT': volume.volume_refresh_bootmap,
}),
('/guests/{userid}', {
'DELETE': guest.guest_delete,
'GET': guest.guest_get,
}),
('/guests/{userid}/action', {
'POST': guest.guest_action,
}),
('/guests/{userid}/power_state_real', {
'GET': guest.guest_get_power_state_real,
}),
('/guests/{userid}/info', {
'GET': guest.guest_get_info,
}),
('/guests/{userid}/user_direct', {
'GET': guest.guest_get_user_direct,
}),
('/guests/{userid}/adapters', {
'GET': guest.guest_get_adapters_info,
}),
('/guests/{userid}/nic', {
'POST': guest.guest_create_nic,
}),
('/guests/{userid}/nic/{vdev}', {
'DELETE': guest.guest_delete_nic,
'PUT': guest.guest_couple_uncouple_nic,
}),
('/guests/{userid}/interface', {
'POST': guest.guest_create_network_interface,
'DELETE': guest.guest_delete_network_interface,
}),
('/guests/{userid}/power_state', {
'GET': guest.guest_get_power_state,
}),
('/guests/{userid}/disks', {
'POST': guest.guest_create_disks,
'DELETE': guest.guest_delete_disks,
'PUT': guest.guest_config_disks,
}),
('/smapi-healthy', {
'GET': healthy.healthy,
}),
('/host', {
'GET': host.host_get_info,
}),
('/host/guests', {
'GET': host.host_get_guest_list,
}),
('/host/diskpool', {
'GET': host.host_get_disk_info,
}),
('/host/diskpool_volumes', {
'GET': host.host_get_diskpool_volumes,
}),
('/host/volume', {
'GET': host.host_get_volume_info,
}),
('/host/ssi', {
'GET': host.host_get_ssi_info,
}),
('/images', {
'POST': image.image_create,
'GET': image.image_query
}),
('/images/{name}', {
'DELETE': image.image_delete,
'PUT': image.image_export,
}),
('/images/{name}/root_disk_size', {
'GET': image.image_get_root_disk_size,
}),
('/files', {
'PUT': file.file_import,
'POST': file.file_export,
}),
('/token', {
'POST': tokens.create,
}),
('/vswitches', {
'GET': vswitch.vswitch_list,
'POST': vswitch.vswitch_create,
}),
('/vswitches/{name}', {
'GET': vswitch.vswitch_query,
'DELETE': vswitch.vswitch_delete,
'PUT': vswitch.vswitch_update,
}),
)
def dispatch(environ, start_response, mapper):
"""Find a matching route for the current request.
:raises: 404(not found) if no match request
405(method not allowed) if route exist but
method not provided.
"""
result = mapper.match(environ=environ)
if result is None:
info = environ.get('PATH_INFO', '')
LOG.debug('The route for %s can not be found', info)
raise webob.exc.HTTPNotFound(
json_formatter=util.json_error_formatter)
handler = result.pop('action')
environ['wsgiorg.routing_args'] = ((), result)
return handler(environ, start_response)
def handle_not_allowed(environ, start_response):
"""Return a 405 response when method is not allowed.
If _methods are in routing_args, send an allow header listing
the methods that are possible on the provided URL.
"""
_methods = util.wsgi_path_item(environ, '_methods')
headers = {}
if _methods:
headers['allow'] = str(_methods)
raise webob.exc.HTTPMethodNotAllowed(
('The method specified is not allowed for this resource.'),
headers=headers, json_formatter=util.json_error_formatter)
def make_map(declarations):
"""Process route declarations to create a Route Mapper."""
mapper = routes.Mapper()
for route, methods in ROUTE_LIST:
allowed_methods = []
for method, func in methods.items():
mapper.connect(route, action=func,
conditions=dict(method=[method]))
allowed_methods.append(method)
allowed_methods = ', '.join(allowed_methods)
mapper.connect(route, action=handle_not_allowed,
_methods=allowed_methods)
return mapper
class SdkHandler(object):
"""Serve zvm sdk request
Dispatch to handlers defined in ROUTE_LIST.
"""
def __init__(self, **local_config):
self._map = make_map(ROUTE_LIST)
def __call__(self, environ, start_response):
clen = environ.get('CONTENT_LENGTH')
try:
if clen and (int(clen) > 0) and not environ.get('CONTENT_TYPE'):
msg = 'content-type header required when content-length > 0'
LOG.debug(msg)
raise webob.exc.HTTPBadRequest(msg,
json_formatter=util.json_error_formatter)
except ValueError:
msg = 'content-length header must be an integer'
LOG.debug(msg)
raise webob.exc.HTTPBadRequest(msg,
json_formatter=util.json_error_formatter)
try:
return dispatch(environ, start_response, self._map)
except exception.NotFound as exc:
raise webob.exc.HTTPNotFound(
exc, json_formatter=util.json_error_formatter)
except Exception:
raise | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/handler.py | handler.py |
import json
import six
import sys
import traceback
import webob
from zvmsdk import log
from zvmsdk.sdkwsgi import handler
from zvmsdk.sdkwsgi import requestlog
from zvmsdk.sdkwsgi import util
LOG = log.LOG
NAME = "zvm-cloud-connector"
def _find_fault(clazz, encountered=None):
if not encountered:
encountered = []
for subclass in clazz.__subclasses__():
if subclass not in encountered:
encountered.append(subclass)
for subsubclass in _find_fault(subclass, encountered):
yield subsubclass
yield subclass
class Fault(webob.exc.HTTPException):
def __init__(self, exception):
self.wrapped_exc = exception
for key, value in list(self.wrapped_exc.headers.items()):
self.wrapped_exc.headers[key] = str(value)
self.status_int = exception.status_int
@webob.dec.wsgify()
def __call__(self, req):
code = self.wrapped_exc.status_int
explanation = self.wrapped_exc.explanation
LOG.debug("Returning %(code)s to user: %(explanation)s",
{'code': code, 'explanation': explanation})
fault_data = {
'overallRC': 400,
'rc': 400,
'rs': code,
'modID': util.SDKWSGI_MODID,
'output': '',
'errmsg': explanation}
if code == 413 or code == 429:
retry = self.wrapped_exc.headers.get('Retry-After', None)
if retry:
fault_data['retryAfter'] = retry
self.wrapped_exc.content_type = 'application/json'
self.wrapped_exc.charset = 'UTF-8'
self.wrapped_exc.text = six.text_type(json.dumps(fault_data))
return self.wrapped_exc
def __str__(self):
return self.wrapped_exc.__str__()
class FaultWrapper(object):
"""Calls down the middleware stack, making exceptions into faults."""
_status_to_type = {}
@staticmethod
def status_to_type(status):
if not FaultWrapper._status_to_type:
for clazz in _find_fault(webob.exc.HTTPError):
FaultWrapper._status_to_type[clazz.code] = clazz
return FaultWrapper._status_to_type.get(
status, webob.exc.HTTPInternalServerError)()
def __init__(self, application):
self.application = application
def _error(self, inner, req):
exc_info = traceback.extract_tb(sys.exc_info()[2])[-1]
LOG.info('Got unhandled exception: %s', exc_info)
safe = getattr(inner, 'safe', False)
headers = getattr(inner, 'headers', None)
status = getattr(inner, 'code', 500)
if status is None:
status = 500
outer = self.status_to_type(status)
if headers:
outer.headers = headers
if safe:
outer.explanation = '%s: %s' % (inner.__class__.__name__,
inner.message)
return Fault(outer)
@webob.dec.wsgify()
def __call__(self, req):
try:
return req.get_response(self.application)
except Exception as ex:
return self._error(ex, req)
class HeaderControl(object):
def __init__(self, application):
self.application = application
@webob.dec.wsgify
def __call__(self, req):
response = req.get_response(self.application)
response.headers.add('cache-control', 'no-cache')
return response
def deploy(project_name):
"""Assemble the middleware pipeline"""
request_log = requestlog.RequestLog
header_addon = HeaderControl
fault_wrapper = FaultWrapper
application = handler.SdkHandler()
# currently we have 3 middleware
for middleware in (header_addon,
fault_wrapper,
request_log,
):
if middleware:
application = middleware(application)
return application
def loadapp(project_name=NAME):
application = deploy(project_name)
return application
def init_application():
# build and return WSGI app
return loadapp() | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/deploy.py | deploy.py |
import logging
from zvmsdk import log
from zvmsdk.sdkwsgi import util
LOG = log.LOG
class RequestLog(object):
"""WSGI Middleware to write a simple request log to.
Borrowed from Paste Translogger
"""
format = ('%(REMOTE_ADDR)s "%(REQUEST_METHOD)s %(REQUEST_URI)s" '
'status: %(status)s length: %(bytes)s headers: %(headers)s '
'exc_info: %(exc_info)s')
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
LOG.debug('Starting request: %s "%s %s"',
environ['REMOTE_ADDR'], environ['REQUEST_METHOD'],
util.get_request_uri(environ))
return self._log_and_call(environ, start_response)
def _log_and_call(self, environ, start_response):
req_uri = util.get_request_uri(environ)
def _local_response(status, headers, exc_info=None):
size = None
for name, value in headers:
if name.lower() == 'content-length':
size = value
for index, value in enumerate(headers):
if value[0] == 'X-Auth-Token':
headers[index] = ('X-Auth-Token', value[1].decode('utf-8'))
break
self._write_log(environ, req_uri, status, size, headers,
exc_info)
return start_response(status, headers, exc_info)
return self.application(environ, _local_response)
def _force_debug(self, method, uri):
if method == 'POST' and uri == '/token':
return True
if method == 'GET' and uri == '/guests/nics':
return True
return False
def _write_log(self, environ, req_uri, status, size, headers, exc_info):
if size is None:
size = '-'
log_format = {
'REMOTE_ADDR': environ.get('REMOTE_ADDR', '-'),
'REQUEST_METHOD': environ['REQUEST_METHOD'],
'REQUEST_URI': req_uri,
'status': status.split(None, 1)[0],
'bytes': size,
'headers': util.mask_tuple_password(headers),
'exc_info': exc_info
}
if LOG.isEnabledFor(logging.INFO):
# POST '/token' and GET '/guests/nics'
# too often, so we want to avoid them
if self._force_debug(environ['REQUEST_METHOD'], req_uri):
LOG.debug(self.format, log_format)
else:
LOG.info(self.format, log_format)
else:
LOG.debug(self.format, log_format) | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/requestlog.py | requestlog.py |
import json
from zvmconnector import connector
from zvmsdk import config
from zvmsdk import log
from zvmsdk.sdkwsgi.handlers import tokens
from zvmsdk.sdkwsgi.schemas import vswitch
from zvmsdk.sdkwsgi import util
from zvmsdk.sdkwsgi import validation
from zvmsdk import utils
_VSWITCHACTION = None
CONF = config.CONF
LOG = log.LOG
class VswitchAction(object):
def __init__(self):
self.client = connector.ZVMConnector(connection_type='socket',
ip_addr=CONF.sdkserver.bind_addr,
port=CONF.sdkserver.bind_port)
def list(self):
return self.client.send_request('vswitch_get_list')
@validation.schema(vswitch.create)
def create(self, body):
vsw = body['vswitch']
name = vsw['name']
rdev = vsw.get('rdev', None)
controller = vsw.get('controller', '*')
connection = vsw.get('connection', "CONNECT")
network_type = vsw.get('network_type', "ETHERNET")
router = vsw.get('router', "NONROUTER")
vid = vsw.get('vid', "UNAWARE")
port_type = vsw.get('port_type', "ACCESS")
gvrp = vsw.get('gvrp', "GVRP")
queue_mem = vsw.get('queue_mem', 8)
native_vid = vsw.get('native_vid', 1)
persist = vsw.get('persist', True)
persist = util.bool_from_string(persist, strict=True)
info = self.client.send_request('vswitch_create', name, rdev=rdev,
controller=controller,
connection=connection,
network_type=network_type,
router=router, vid=vid,
port_type=port_type, gvrp=gvrp,
queue_mem=queue_mem,
native_vid=native_vid,
persist=persist)
return info
def delete(self, name):
info = self.client.send_request('vswitch_delete', name)
return info
def query(self, name):
info = self.client.send_request('vswitch_query', name)
return info
@validation.schema(vswitch.update)
def update(self, name, body):
vsw = body['vswitch']
# TODO: only allow one param at most once
if 'grant_userid' in vsw:
userid = vsw['grant_userid']
info = self.client.send_request('vswitch_grant_user',
name, userid)
return info
if 'revoke_userid' in vsw:
userid = vsw['revoke_userid']
info = self.client.send_request('vswitch_revoke_user',
name, userid)
return info
if 'user_vlan_id' in vsw:
userid = vsw['user_vlan_id']['userid']
vlanid = vsw['user_vlan_id']['vlanid']
info = self.client.send_request('vswitch_set_vlan_id_for_user',
name, userid, vlanid)
return info
def get_action():
global _VSWITCHACTION
if _VSWITCHACTION is None:
_VSWITCHACTION = VswitchAction()
return _VSWITCHACTION
@util.SdkWsgify
@tokens.validate
def vswitch_list(req):
def _vswitch_list(req):
action = get_action()
return action.list()
info = _vswitch_list(req)
info_json = json.dumps(info)
req.response.body = utils.to_utf8(info_json)
req.response.status = util.get_http_code_from_sdk_return(info)
req.response.content_type = 'application/json'
return req.response
@util.SdkWsgify
@tokens.validate
def vswitch_create(req):
def _vswitch_create(req):
action = get_action()
body = util.extract_json(req.body)
return action.create(body=body)
info = _vswitch_create(req)
info_json = json.dumps(info)
req.response.body = utils.to_utf8(info_json)
req.response.status = util.get_http_code_from_sdk_return(info,
additional_handler=util.handle_already_exists)
req.response.content_type = 'application/json'
return req.response
@util.SdkWsgify
@tokens.validate
def vswitch_delete(req):
def _vswitch_delete(name):
action = get_action()
return action.delete(name)
name = util.wsgi_path_item(req.environ, 'name')
info = _vswitch_delete(name)
info_json = json.dumps(info)
req.response.body = utils.to_utf8(info_json)
req.response.status = util.get_http_code_from_sdk_return(info, default=200)
req.response.content_type = 'application/json'
return req.response
@util.SdkWsgify
@tokens.validate
def vswitch_update(req):
def _vswitch_update(name, req):
body = util.extract_json(req.body)
action = get_action()
return action.update(name, body=body)
name = util.wsgi_path_item(req.environ, 'name')
info = _vswitch_update(name, req)
info_json = json.dumps(info)
req.response.body = utils.to_utf8(info_json)
req.response.status = util.get_http_code_from_sdk_return(info,
additional_handler=util.handle_not_found)
req.response.content_type = 'application/json'
return req.response
@util.SdkWsgify
@tokens.validate
def vswitch_query(req):
def _vswitch_query(name):
action = get_action()
return action.query(name)
name = util.wsgi_path_item(req.environ, 'name')
info = _vswitch_query(name)
info_json = json.dumps(info)
req.response.body = utils.to_utf8(info_json)
req.response.status = util.get_http_code_from_sdk_return(info,
additional_handler=util.handle_not_found)
req.response.content_type = 'application/json'
return req.response | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/handlers/vswitch.py | vswitch.py |
import json
from zvmconnector import connector
from zvmsdk import config
from zvmsdk import log
from zvmsdk.sdkwsgi.handlers import tokens
from zvmsdk.sdkwsgi import util
from zvmsdk import utils
from zvmsdk.sdkwsgi import validation
from zvmsdk.sdkwsgi.schemas import image
from zvmsdk.sdkwsgi.schemas import host
_HOSTACTION = None
CONF = config.CONF
LOG = log.LOG
class HostAction(object):
def __init__(self):
self.client = connector.ZVMConnector(connection_type='socket',
ip_addr=CONF.sdkserver.bind_addr,
port=CONF.sdkserver.bind_port)
def get_info(self):
info = self.client.send_request('host_get_info')
return info
def get_guest_list(self):
info = self.client.send_request('host_get_guest_list')
return info
@validation.query_schema(image.diskpool)
def get_diskpool_volumes(self, req, poolname):
info = self.client.send_request('host_get_diskpool_volumes',
disk_pool=poolname)
return info
@validation.query_schema(host.volume)
def get_volume_info(self, req, volumename):
info = self.client.send_request('host_get_volume_info',
volume=volumename)
return info
@validation.query_schema(image.diskpool)
def diskpool_get_info(self, req, poolname):
info = self.client.send_request('host_diskpool_get_info',
disk_pool=poolname)
return info
def get_ssi_info(self):
info = self.client.send_request('host_get_ssi_info')
return info
def get_action():
global _HOSTACTION
if _HOSTACTION is None:
_HOSTACTION = HostAction()
return _HOSTACTION
@util.SdkWsgify
@tokens.validate
def host_get_info(req):
def _host_get_info():
action = get_action()
return action.get_info()
info = _host_get_info()
info_json = json.dumps(info)
req.response.status = util.get_http_code_from_sdk_return(info)
req.response.body = utils.to_utf8(info_json)
req.response.content_type = 'application/json'
return req.response
@util.SdkWsgify
@tokens.validate
def host_get_guest_list(req):
def _host_get_guest_list():
action = get_action()
return action.get_guest_list()
info = _host_get_guest_list()
info_json = json.dumps(info)
req.response.body = utils.to_utf8(info_json)
req.response.content_type = 'application/json'
req.response.status = util.get_http_code_from_sdk_return(info)
return req.response
@util.SdkWsgify
@tokens.validate
def host_get_diskpool_volumes(req):
def _host_get_diskpool_volumes(req, poolname):
action = get_action()
return action.get_diskpool_volumes(req, poolname)
poolname = None
if 'poolname' in req.GET:
poolname = req.GET['poolname']
info = _host_get_diskpool_volumes(req, poolname)
info_json = json.dumps(info)
req.response.body = utils.to_utf8(info_json)
req.response.content_type = 'application/json'
req.response.status = util.get_http_code_from_sdk_return(info)
return req.response
@util.SdkWsgify
@tokens.validate
def host_get_volume_info(req):
def _host_get_volume_info(req, volumename):
action = get_action()
return action.get_volume_info(req, volumename)
volumename = None
if 'volumename' in req.GET:
volumename = req.GET['volumename']
info = _host_get_volume_info(req, volumename)
info_json = json.dumps(info)
req.response.body = utils.to_utf8(info_json)
req.response.content_type = 'application/json'
req.response.status = util.get_http_code_from_sdk_return(info)
return req.response
@util.SdkWsgify
@tokens.validate
def host_get_disk_info(req):
def _host_get_disk_info(req, poolname):
action = get_action()
return action.diskpool_get_info(req, poolname)
poolname = None
if 'poolname' in req.GET:
poolname = req.GET['poolname']
info = _host_get_disk_info(req, poolname)
info_json = json.dumps(info)
req.response.status = util.get_http_code_from_sdk_return(info,
additional_handler=util.handle_not_found)
req.response.body = utils.to_utf8(info_json)
req.response.content_type = 'application/json'
return req.response
@util.SdkWsgify
@tokens.validate
def host_get_ssi_info(req):
def _host_get_ssi_info():
action = get_action()
return action.get_ssi_info()
info = _host_get_ssi_info()
info_json = json.dumps(info)
req.response.body = utils.to_utf8(info_json)
req.response.content_type = 'application/json'
req.response.status = util.get_http_code_from_sdk_return(info)
return req.response | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/handlers/host.py | host.py |
import six
import json
import hashlib
import os
import uuid
from zvmsdk import config
from zvmsdk import constants as const
from zvmsdk import exception
from zvmsdk import log
from zvmsdk import returncode
from zvmsdk import utils
from zvmsdk.sdkwsgi.handlers import tokens
from zvmsdk.sdkwsgi import util
_FILEACTION = None
CONF = config.CONF
LOG = log.LOG
CHUNKSIZE = 4096
INVALID_CONTENT_TYPE = {
'overallRC': returncode.errors['RESTAPI'][0]['overallRC'],
'modID': returncode.errors['RESTAPI'][0]['modID'],
'rc': returncode.errors['RESTAPI'][0]['overallRC'],
'rs': 1,
'errmsg': '',
'output': ''}
FILE_OPERATION_ERROR = {
'overallRC': returncode.errors['file'][0]['overallRC'],
'modID': returncode.errors['file'][0]['modID'],
'rc': returncode.errors['file'][0]['overallRC'],
'rs': 1,
'errmsg': '',
'output': ''}
class FileAction(object):
def __init__(self):
self._pathutils = utils.PathUtils()
def file_import(self, fileobj):
try:
importDir = self._pathutils.create_file_repository(
const.FILE_TYPE['IMPORT'])
fname = str(uuid.uuid1())
target_fpath = '/'.join([importDir, fname])
# The following steps save the imported file into sdkserver
checksum = hashlib.md5()
bytes_written = 0
with open(target_fpath, 'wb') as f:
for buf in fileChunkReadable(fileobj, CHUNKSIZE):
bytes_written += len(buf)
checksum.update(buf)
f.write(buf)
checksum_hex = checksum.hexdigest()
LOG.debug("Wrote %(bytes_written)d bytes to %(target_image)s"
" with checksum %(checksum_hex)s" %
{'bytes_written': bytes_written,
'target_image': target_fpath,
'checksum_hex': checksum_hex})
return_data = {'filesize_in_bytes': bytes_written,
'dest_url': 'file://' + target_fpath,
'md5sum': checksum_hex}
results = {'overallRC': 0, 'modID': None,
'rc': 0, 'rs': 0,
'errmsg': '',
'output': return_data}
except OSError as err:
msg = ("File import error: %s, please check access right to "
"specified file or folder" % six.text_type(err))
LOG.error(msg)
results = FILE_OPERATION_ERROR
results.update({'rs': 1, 'errmsg': msg, 'output': ''})
except Exception as err:
# Cleanup the file from file repository
self._pathutils.clean_temp_folder(target_fpath)
msg = ("Exception happened during file import: %s" %
six.text_type(err))
LOG.error(msg)
results = FILE_OPERATION_ERROR
results.update({'rs': 1, 'errmsg': msg, 'output': ''})
return results
def file_export(self, fpath):
try:
if not os.path.exists(fpath):
msg = ("The specific file %s for export does not exist" %
fpath)
LOG.error(msg)
results = FILE_OPERATION_ERROR
results.update({'rs': 2,
'errmsg': msg, 'output': ''})
return results
offset = 0
file_size = os.path.getsize(fpath)
# image_size here is the image_size in bytes
file_iter = iter(get_data(fpath,
offset=offset,
file_size=file_size))
return file_iter
except exception as err:
msg = ("Exception happened during file export with error %s " %
six.text_type(err))
LOG.error(msg)
results = FILE_OPERATION_ERROR.update({'rs': 2, 'errmsg': msg,
'output': ''})
return results
def get_action():
global _FILEACTION
if _FILEACTION is None:
_FILEACTION = FileAction()
return _FILEACTION
@util.SdkWsgify
@tokens.validate
def file_import(request):
def _import(file_obj):
action = get_action()
return action.file_import(file_obj)
# Check if the request content type is valid
content_type = request.content_type
info = _content_type_validation(content_type)
if not info:
file_obj = request.body_file
info = _import(file_obj)
info_json = json.dumps(info)
request.response.body = utils.to_utf8(info_json)
request.response.status = util.get_http_code_from_sdk_return(info)
request.response.content_type = 'application/json'
return request.response
def _content_type_validation(content_type):
results = {}
if content_type not in ['application/octet-stream']:
msg = ('Invalid content type %s found for file import/export, the '
'supported content type is application/octet-stream' %
content_type)
LOG.error(msg)
results = INVALID_CONTENT_TYPE.update({'errmsg': msg})
return results
@util.SdkWsgify
@tokens.validate
def file_export(request):
def _export(fpath):
action = get_action()
return action.file_export(fpath)
body = util.extract_json(request.body)
fpath = body['source_file']
results = _export(fpath)
# if results is dict, means error happened.
if isinstance(results, dict):
info_json = json.dumps(results)
request.response.body = utils.to_utf8(info_json)
request.response.status = util.get_http_code_from_sdk_return(
results)
request.response.content_type = 'application/json'
return request.response
# Result contains (image_iter, md5sum, image_size)
else:
request.response.headers['Content-Type'] = 'application/octet-stream'
request.response.app_iter = results
request.response.status_int = 200
return request.response
def fileChunkReadable(file_obj, chunk_size=65536):
"""
Return a readable iterator with a reader yielding chunks of
a preferred size, otherwise leave file object unchanged.
:param file_obj: an iter which may be readable
:param chunk_size: maximum size of chunk
"""
if hasattr(file_obj, 'read'):
return fileChunkIter(file_obj, chunk_size)
else:
return file_obj
def fileChunkIter(file_object, file_chunk_size=65536):
"""
Return an iterator to a file-like object that yields fixed size chunks
:param file_object: a file-like object
:param file_chunk_size: maximum size of chunk
"""
while True:
chunk = file_object.read(file_chunk_size)
if chunk:
yield chunk
else:
break
def get_data(file_path, offset=0, file_size=None):
data = chunkedFile(file_path,
file_offset=offset,
file_chunk_size=CHUNKSIZE,
file_partial_length=file_size)
return get_chunk_data_iterator(data)
def get_chunk_data_iterator(data):
for chunk in data:
yield chunk
class chunkedFile(object):
"""
Send iterator to wsgi server so that it can iterate over a large file
"""
def __init__(self, file_path, file_offset=0, file_chunk_size=4096,
file_partial_length=None):
self.file_path = file_path
self.file_chunk_size = file_chunk_size
self.file_partial_length = file_partial_length
self.file_partial = self.file_partial_length is not None
self.file_object = open(self.file_path, 'rb')
if file_offset:
self.file_pointer.seek(file_offset)
def __iter__(self):
"""Return an iterator over the large file."""
try:
if self.file_object:
while True:
if self.file_partial:
size = min(self.file_chunk_size,
self.file_partial_length)
else:
size = self.file_chunk_size
chunk = self.file_object.read(size)
if chunk:
yield chunk
if self.file_partial:
self.file_partial_length -= len(chunk)
if self.file_partial_length <= 0:
break
else:
break
finally:
self.close()
def close(self):
"""Close the internal file pointer"""
if self.file_object:
self.file_object.close()
self.file_object = None | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/handlers/file.py | file.py |
import json
import six
import threading
import webob.exc
from zvmconnector import connector
from zvmsdk import config
from zvmsdk import log
from zvmsdk import returncode
from zvmsdk.sdkwsgi.handlers import tokens
from zvmsdk.sdkwsgi.schemas import guest
from zvmsdk.sdkwsgi import util
from zvmsdk.sdkwsgi import validation
from zvmsdk import utils
_VMACTION = None
_VMHANDLER = None
CONF = config.CONF
LOG = log.LOG
CONF = config.CONF
class VMHandler(object):
def __init__(self):
self.client = connector.ZVMConnector(connection_type='socket',
ip_addr=CONF.sdkserver.bind_addr,
port=CONF.sdkserver.bind_port)
@validation.schema(guest.create)
def create(self, body):
guest = body['guest']
userid = guest['userid']
vcpus = guest['vcpus']
memory = guest['memory']
kwargs_list = {}
guest_keys = guest.keys()
if 'disk_list' in guest_keys:
kwargs_list['disk_list'] = guest['disk_list']
if 'user_profile' in guest_keys:
kwargs_list['user_profile'] = guest['user_profile']
if 'max_cpu' in guest_keys:
kwargs_list['max_cpu'] = guest['max_cpu']
if 'max_mem' in guest_keys:
kwargs_list['max_mem'] = guest['max_mem']
if 'ipl_from' in guest_keys:
kwargs_list['ipl_from'] = guest['ipl_from']
if 'ipl_param' in guest_keys:
kwargs_list['ipl_param'] = guest['ipl_param']
if 'ipl_loadparam' in guest_keys:
kwargs_list['ipl_loadparam'] = guest['ipl_loadparam']
if 'dedicate_vdevs' in guest_keys:
kwargs_list['dedicate_vdevs'] = guest['dedicate_vdevs']
if 'loaddev' in guest_keys:
kwargs_list['loaddev'] = guest['loaddev']
if 'account' in guest_keys:
kwargs_list['account'] = guest['account']
if 'cschedule' in guest_keys:
kwargs_list['cschedule'] = guest['cschedule']
if 'cshare' in guest_keys:
kwargs_list['cshare'] = guest['cshare']
if 'rdomain' in guest_keys:
kwargs_list['rdomain'] = guest['rdomain']
if 'pcif' in guest_keys:
kwargs_list['pcif'] = guest['pcif']
info = self.client.send_request('guest_create', userid, vcpus,
memory, **kwargs_list)
return info
def list(self):
# list all guest on the given host
info = self.client.send_request('guest_list')
return info
@validation.query_schema(guest.userid_list_query)
def get_power_state_real(self, req, userid):
info = self.client.send_request('guest_get_power_state_real', userid)
return info
@validation.query_schema(guest.userid_list_query)
def get_info(self, req, userid):
info = self.client.send_request('guest_get_info', userid)
return info
@validation.query_schema(guest.userid_list_query)
def get_user_direct(self, req, userid):
info = self.client.send_request('guest_get_user_direct', userid)
return info
@validation.query_schema(guest.userid_list_query)
def get_adapters(self, req, userid):
info = self.client.send_request('guest_get_adapters_info', userid)
return info
@validation.query_schema(guest.userid_list_query)
def get_definition_info(self, req, userid):
info = self.client.send_request('guest_get_definition_info', userid)
return info
@validation.query_schema(guest.userid_list_query)
def get_power_state(self, req, userid):
info = self.client.send_request('guest_get_power_state', userid)
return info
def delete(self, userid):
info = self.client.send_request('guest_delete', userid)
return info
def delete_nic(self, userid, vdev, body):
active = body.get('active', False)
active = util.bool_from_string(active, strict=True)
info = self.client.send_request('guest_delete_nic', userid, vdev,
active=active)
return info
@validation.query_schema(guest.userid_list_array_query)
def inspect_stats(self, req, userid_list):
info = self.client.send_request('guest_inspect_stats',
userid_list)
return info
@validation.query_schema(guest.userid_list_array_query)
def inspect_vnics(self, req, userid_list):
info = self.client.send_request('guest_inspect_vnics',
userid_list)
return info
# @validation.query_schema(guest.nic_DB_info)
# FIXME: the above validation will fail with "'dict' object has no
# attribute 'dict_of_lists'"
def get_nic_DB_info(self, req, userid=None, nic_id=None, vswitch=None):
info = self.client.send_request('guests_get_nic_info', userid=userid,
nic_id=nic_id, vswitch=vswitch)
return info
@validation.schema(guest.create_nic)
def create_nic(self, userid, body=None):
nic = body['nic']
vdev = nic.get('vdev', None)
nic_id = nic.get('nic_id', None)
mac_addr = nic.get('mac_addr', None)
active = nic.get('active', False)
active = util.bool_from_string(active, strict=True)
info = self.client.send_request('guest_create_nic', userid,
vdev=vdev, nic_id=nic_id,
mac_addr=mac_addr,
active=active)
return info
@validation.schema(guest.create_network_interface)
def create_network_interface(self, userid, body=None):
interface = body['interface']
version = interface['os_version']
networks = interface.get('guest_networks', None)
active = interface.get('active', False)
active = util.bool_from_string(active, strict=True)
info = self.client.send_request('guest_create_network_interface',
userid, os_version=version,
guest_networks=networks,
active=active)
return info
@validation.schema(guest.delete_network_interface)
def delete_network_interface(self, userid, body=None):
interface = body['interface']
version = interface['os_version']
vdev = interface['vdev']
active = interface.get('active', False)
active = util.bool_from_string(active, strict=True)
info = self.client.send_request('guest_delete_network_interface',
userid, version, vdev,
active=active)
return info
@validation.schema(guest.create_disks)
def create_disks(self, userid, body=None):
disk_info = body['disk_info']
disk_list = disk_info.get('disk_list', None)
info = self.client.send_request('guest_create_disks', userid,
disk_list)
return info
@validation.schema(guest.config_minidisks)
def config_minidisks(self, userid, body=None):
disk_info = body['disk_info']
disk_list = disk_info.get('disk_list', None)
info = self.client.send_request('guest_config_minidisks', userid,
disk_list)
return info
@validation.schema(guest.delete_disks)
def delete_disks(self, userid, body=None):
vdev_info = body['vdev_info']
vdev_list = vdev_info.get('vdev_list', None)
info = self.client.send_request('guest_delete_disks', userid,
vdev_list)
return info
@validation.schema(guest.nic_couple_uncouple)
def nic_couple_uncouple(self, userid, vdev, body):
info = body['info']
active = info.get('active', False)
active = util.bool_from_string(active, strict=True)
couple = util.bool_from_string(info['couple'], strict=True)
# vlan_id is for couple operation only, uncouple ignore it
vlan_id = info.get('vlan_id', -1)
if couple:
info = self.client.send_request('guest_nic_couple_to_vswitch',
userid, vdev, info['vswitch'],
active=active, vlan_id=vlan_id)
else:
info = self.client.send_request('guest_nic_uncouple_from_vswitch',
userid, vdev,
active=active)
return info
class VMAction(object):
def __init__(self):
self.client = connector.ZVMConnector(connection_type='socket',
ip_addr=CONF.sdkserver.bind_addr,
port=CONF.sdkserver.bind_port)
self.dd_semaphore = threading.BoundedSemaphore(
value=CONF.wsgi.max_concurrent_deploy_capture)
@validation.schema(guest.start)
def start(self, userid, body):
timeout = body.get('timeout', 0)
info = self.client.send_request('guest_start', userid, timeout)
return info
@validation.schema(guest.stop)
def stop(self, userid, body):
timeout = body.get('timeout', None)
poll_interval = body.get('poll_interval', None)
info = self.client.send_request('guest_stop', userid,
timeout=timeout,
poll_interval=poll_interval)
return info
@validation.schema(guest.softstop)
def softstop(self, userid, body):
timeout = body.get('timeout', None)
poll_interval = body.get('poll_interval', None)
info = self.client.send_request('guest_softstop', userid,
timeout=timeout,
poll_interval=poll_interval)
return info
def pause(self, userid, body):
info = self.client.send_request('guest_pause', userid)
return info
def unpause(self, userid, body):
info = self.client.send_request('guest_unpause', userid)
return info
def reboot(self, userid, body):
info = self.client.send_request('guest_reboot', userid)
return info
def reset(self, userid, body):
info = self.client.send_request('guest_reset', userid)
return info
def get_console_output(self, userid, body):
info = self.client.send_request('guest_get_console_output',
userid)
return info
@validation.schema(guest.register_vm)
def register_vm(self, userid, body):
meta = body['meta']
net_set = body['net_set']
port_macs = None
if 'port_macs' in body.keys():
port_macs = body['port_macs']
info = self.client.send_request('guest_register',
userid, meta, net_set, port_macs)
return info
@validation.schema(guest.deregister_vm)
def deregister_vm(self, userid, body):
info = self.client.send_request('guest_deregister', userid)
return info
@validation.schema(guest.live_migrate_vm)
def live_migrate_vm(self, userid, body):
# dest_zcc_userid default as ''
dest_zcc_userid = body.get('dest_zcc_userid', '')
destination = body['destination']
operation = body.get('operation', {})
parms = body['parms']
info = self.client.send_request('guest_live_migrate',
userid, dest_zcc_userid, destination,
parms, operation)
return info
@validation.schema(guest.resize_cpus)
def resize_cpus(self, userid, body):
cpu_cnt = body['cpu_cnt']
info = self.client.send_request('guest_resize_cpus',
userid, cpu_cnt)
return info
@validation.schema(guest.resize_cpus)
def live_resize_cpus(self, userid, body):
cpu_cnt = body['cpu_cnt']
info = self.client.send_request('guest_live_resize_cpus',
userid, cpu_cnt)
return info
@validation.schema(guest.resize_mem)
def resize_mem(self, userid, body):
size = body['size']
info = self.client.send_request('guest_resize_mem',
userid, size)
return info
@validation.schema(guest.resize_mem)
def live_resize_mem(self, userid, body):
size = body['size']
info = self.client.send_request('guest_live_resize_mem',
userid, size)
return info
@validation.schema(guest.grow_root_volume)
def grow_root_volume(self, userid, body=None):
info = self.client.send_request('guest_grow_root_volume', userid,
body['os_version'])
return info
@validation.schema(guest.deploy)
def deploy(self, userid, body):
image_name = body['image']
transportfiles = body.get('transportfiles', None)
remotehost = body.get('remotehost', None)
vdev = body.get('vdev', None)
hostname = body.get('hostname', None)
skipdiskcopy = body.get('skipdiskcopy', False)
request_info = ("action: 'deploy', userid: %(userid)s,"
"transportfiles: %(trans)s, remotehost: %(remote)s,"
"vdev: %(vdev)s, skipdiskcopy: %(skipdiskcopy)s" %
{'userid': userid, 'trans': transportfiles,
'remote': remotehost, 'vdev': vdev,
'skipdiskcopy': skipdiskcopy,
})
info = None
dd_allowed = self.dd_semaphore.acquire(blocking=False)
if not dd_allowed:
error_def = returncode.errors['serviceUnavail']
info = error_def[0]
err_msg = error_def[1][1] % {'req': request_info}
info.update({'rs': 1,
'errmsg': err_msg,
'output': ''})
LOG.error(err_msg)
return info
try:
LOG.debug("WSGI sending deploy requests. %s" % request_info)
info = self.client.send_request('guest_deploy', userid,
image_name,
transportfiles=transportfiles,
remotehost=remotehost,
vdev=vdev, hostname=hostname,
skipdiskcopy=skipdiskcopy)
finally:
try:
self.dd_semaphore.release()
LOG.debug("WSGI deploy request finished, %s."
"Resource released." % request_info)
except Exception as err:
err_msg = ("Failed to release deploy resource in WSGI."
"Error: %s, request info: %s" %
(six.text_type(err), request_info))
LOG.error(err_msg)
return info
@validation.schema(guest.capture)
def capture(self, userid, body):
image_name = body['image']
capture_type = body.get('capture_type', 'rootonly')
compress_level = body.get('compress_level',
CONF.image.default_compress_level)
request_info = ("action: 'capture', userid: %(userid)s,"
"image name: %(image)s, capture type: %(cap)s,"
"compress level: %(level)s" %
{'userid': userid, 'image': image_name,
'cap': capture_type, 'level': compress_level
})
info = None
capture_allowed = self.dd_semaphore.acquire(blocking=False)
if not capture_allowed:
error_def = returncode.errors['serviceUnavail']
info = error_def[0]
err_msg = error_def[1][1] % {'req': request_info}
info.update({'rs': 1,
'errmsg': err_msg,
'output': ''})
LOG.error(err_msg)
return info
try:
LOG.debug("WSGI sending capture requests. %s" % request_info)
info = self.client.send_request('guest_capture', userid,
image_name,
capture_type=capture_type,
compress_level=compress_level)
finally:
try:
self.dd_semaphore.release()
LOG.debug("WSGI capture request finished, %s."
"Resource released." % request_info)
except Exception as err:
err_msg = ("Failed to release capture resource in WSGI."
"Error: %s, request info: %s" %
(six.text_type(err), request_info))
LOG.error(err_msg)
return info
def get_action():
global _VMACTION
if _VMACTION is None:
_VMACTION = VMAction()
return _VMACTION
def get_handler():
global _VMHANDLER
if _VMHANDLER is None:
_VMHANDLER = VMHandler()
return _VMHANDLER
@util.SdkWsgify
@tokens.validate
def guest_get_power_state_real(req):
def _guest_get_power_state_real(req, userid):
action = get_handler()
return action.get_power_state_real(req, userid)
userid = util.wsgi_path_item(req.environ, 'userid')
info = _guest_get_power_state_real(req, userid)
info_json = json.dumps(info)
req.response.status = util.get_http_code_from_sdk_return(info,
additional_handler=util.handle_not_found)
req.response.body = utils.to_utf8(info_json)
req.response.content_type = 'application/json'
return req.response
@util.SdkWsgify
@tokens.validate
def guest_get_info(req):
def _guest_get_info(req, userid):
action = get_handler()
return action.get_info(req, userid)
userid = util.wsgi_path_item(req.environ, 'userid')
info = _guest_get_info(req, userid)
info_json = json.dumps(info)
req.response.status = util.get_http_code_from_sdk_return(info,
additional_handler=util.handle_not_found)
req.response.body = utils.to_utf8(info_json)
req.response.content_type = 'application/json'
return req.response
@util.SdkWsgify
@tokens.validate
def guest_get_user_direct(req):
def _guest_get_user_direct(req, userid):
action = get_handler()
return action.get_user_direct(req, userid)
userid = util.wsgi_path_item(req.environ, 'userid')
info = _guest_get_user_direct(req, userid)
info_json = json.dumps(info)
req.response.status = util.get_http_code_from_sdk_return(info,
additional_handler=util.handle_not_found)
req.response.body = utils.to_utf8(info_json)
req.response.content_type = 'application/json'
return req.response
@util.SdkWsgify
@tokens.validate
def guest_get_adapters_info(req):
def _guest_get_adapters_info(req, userid):
action = get_handler()
return action.get_adapters(req, userid)
userid = util.wsgi_path_item(req.environ, 'userid')
info = _guest_get_adapters_info(req, userid)
info_json = json.dumps(info)
req.response.status = util.get_http_code_from_sdk_return(info,
additional_handler=util.handle_not_found)
req.response.body = utils.to_utf8(info_json)
req.response.content_type = 'application/json'
return req.response
@util.SdkWsgify
@tokens.validate
def guest_get(req):
def _guest_get(req, userid):
action = get_handler()
return action.get_definition_info(req, userid)
userid = util.wsgi_path_item(req.environ, 'userid')
info = _guest_get(req, userid)
# info we got looks like:
# {'user_direct': [u'USER RESTT305 PASSW0RD 1024m 1024m G',
# u'INCLUDE OSDFLT']}
info_json = json.dumps(info)
req.response.status = util.get_http_code_from_sdk_return(info,
additional_handler=util.handle_not_found)
req.response.body = utils.to_utf8(info_json)
req.response.content_type = 'application/json'
return req.response
@util.SdkWsgify
@tokens.validate
def guest_get_power_state(req):
def _guest_get_power_state(req, userid):
action = get_handler()
return action.get_power_state(req, userid)
userid = util.wsgi_path_item(req.environ, 'userid')
info = _guest_get_power_state(req, userid)
info_json = json.dumps(info)
req.response.status = util.get_http_code_from_sdk_return(info,
additional_handler=util.handle_not_found)
req.response.body = utils.to_utf8(info_json)
req.response.content_type = 'application/json'
return req.response
@util.SdkWsgify
@tokens.validate
def guest_create(req):
def _guest_create(req):
action = get_handler()
body = util.extract_json(req.body)
return action.create(body=body)
info = _guest_create(req)
info_json = json.dumps(info)
req.response.body = utils.to_utf8(info_json)
req.response.status = util.get_http_code_from_sdk_return(info,
additional_handler=util.handle_already_exists)
req.response.content_type = 'application/json'
return req.response
@util.SdkWsgify
@tokens.validate
def guest_list(req):
def _guest_list():
action = get_handler()
return action.list()
info = _guest_list()
info_json = json.dumps(info)
req.response.body = utils.to_utf8(info_json)
req.response.content_type = 'application/json'
req.response.status = 200
return req.response
@util.SdkWsgify
@tokens.validate
def guest_action(req):
def _guest_action(userid, req):
action = get_action()
body = util.extract_json(req.body)
if len(body) == 0 or 'action' not in body:
msg = 'action not exist or is empty'
LOG.info(msg)
raise webob.exc.HTTPBadRequest(explanation=msg)
method = body['action']
func = getattr(action, method, None)
if func:
body.pop('action')
return func(userid, body=body)
else:
msg = 'action %s is invalid' % method
raise webob.exc.HTTPBadRequest(msg)
userid = util.wsgi_path_item(req.environ, 'userid')
info = _guest_action(userid, req)
info_json = json.dumps(info)
req.response.body = utils.to_utf8(info_json)
req.response.content_type = 'application/json'
req.response.status = util.get_http_code_from_sdk_return(info,
additional_handler=util.handle_not_found_and_conflict)
return req.response
@util.SdkWsgify
@tokens.validate
def guest_delete(req):
def _guest_delete(userid):
action = get_handler()
return action.delete(userid)
userid = util.wsgi_path_item(req.environ, 'userid')
info = _guest_delete(userid)
info_json = json.dumps(info)
req.response.body = utils.to_utf8(info_json)
req.response.status = util.get_http_code_from_sdk_return(info, default=200)
req.response.content_type = 'application/json'
return req.response
@util.SdkWsgify
@tokens.validate
def guest_delete_nic(req):
def _guest_delete_nic(userid, vdev, req):
action = get_handler()
body = util.extract_json(req.body)
return action.delete_nic(userid, vdev, body)
userid = util.wsgi_path_item(req.environ, 'userid')
vdev = util.wsgi_path_item(req.environ, 'vdev')
info = _guest_delete_nic(userid, vdev, req)
info_json = json.dumps(info)
req.response.body = utils.to_utf8(info_json)
req.response.status = util.get_http_code_from_sdk_return(info, default=200)
req.response.content_type = 'application/json'
return req.response
@util.SdkWsgify
@tokens.validate
def guest_create_nic(req):
def _guest_create_nic(userid, req):
action = get_handler()
body = util.extract_json(req.body)
return action.create_nic(userid, body=body)
userid = util.wsgi_path_item(req.environ, 'userid')
info = _guest_create_nic(userid, req)
info_json = json.dumps(info)
req.response.body = utils.to_utf8(info_json)
req.response.status = util.get_http_code_from_sdk_return(info)
req.response.content_type = 'application/json'
return req.response
@util.SdkWsgify
@tokens.validate
def guest_couple_uncouple_nic(req):
def _guest_couple_uncouple_nic(userid, vdev, req):
action = get_handler()
body = util.extract_json(req.body)
return action.nic_couple_uncouple(userid, vdev, body=body)
userid = util.wsgi_path_item(req.environ, 'userid')
vdev = util.wsgi_path_item(req.environ, 'vdev')
info = _guest_couple_uncouple_nic(userid, vdev, req)
info_json = json.dumps(info)
req.response.body = utils.to_utf8(info_json)
req.response.status = util.get_http_code_from_sdk_return(info)
req.response.content_type = 'application/json'
return req.response
@util.SdkWsgify
@tokens.validate
def guest_create_network_interface(req):
def _guest_create_network_interface(userid, req):
action = get_handler()
body = util.extract_json(req.body)
return action.create_network_interface(userid, body=body)
userid = util.wsgi_path_item(req.environ, 'userid')
info = _guest_create_network_interface(userid, req)
info_json = json.dumps(info)
req.response.body = utils.to_utf8(info_json)
req.response.status = util.get_http_code_from_sdk_return(info)
req.response.content_type = 'application/json'
return req.response
@util.SdkWsgify
@tokens.validate
def guest_delete_network_interface(req):
def _guest_delete_network_interface(userid, req):
action = get_handler()
body = util.extract_json(req.body)
return action.delete_network_interface(userid, body=body)
userid = util.wsgi_path_item(req.environ, 'userid')
info = _guest_delete_network_interface(userid, req)
info_json = json.dumps(info)
req.response.body = utils.to_utf8(info_json)
req.response.status = util.get_http_code_from_sdk_return(info)
req.response.content_type = 'application/json'
return req.response
def _get_userid_list(req):
userids = []
if 'userid' in req.GET.keys():
userid = req.GET.get('userid')
userid = userid.strip(' ,')
userid = userid.replace(' ', '')
userids.extend(userid.split(','))
return userids
@util.SdkWsgify
@tokens.validate
def guest_get_stats(req):
userid_list = _get_userid_list(req)
def _guest_get_stats(req, userid_list):
action = get_handler()
return action.inspect_stats(req, userid_list)
info = _guest_get_stats(req, userid_list)
info_json = json.dumps(info)
req.response.status = util.get_http_code_from_sdk_return(info,
additional_handler=util.handle_not_found)
req.response.body = utils.to_utf8(info_json)
req.response.content_type = 'application/json'
return req.response
@util.SdkWsgify
@tokens.validate
def guest_get_interface_stats(req):
userid_list = _get_userid_list(req)
def _guest_get_interface_stats(req, userid_list):
action = get_handler()
return action.inspect_vnics(req, userid_list)
info = _guest_get_interface_stats(req, userid_list)
info_json = json.dumps(info)
req.response.status = util.get_http_code_from_sdk_return(info,
additional_handler=util.handle_not_found)
req.response.body = utils.to_utf8(info_json)
req.response.content_type = 'application/json'
return req.response
@util.SdkWsgify
@tokens.validate
def guests_get_nic_info(req):
def _guests_get_nic_DB_info(req, userid=None, nic_id=None, vswitch=None):
action = get_handler()
return action.get_nic_DB_info(req, userid=userid, nic_id=nic_id,
vswitch=vswitch)
userid = req.GET.get('userid', None)
nic_id = req.GET.get('nic_id', None)
vswitch = req.GET.get('vswitch', None)
info = _guests_get_nic_DB_info(req, userid=userid, nic_id=nic_id,
vswitch=vswitch)
info_json = json.dumps(info)
req.response.status = util.get_http_code_from_sdk_return(info,
additional_handler=util.handle_not_found)
req.response.body = utils.to_utf8(info_json)
req.response.content_type = 'application/json'
return req.response
@util.SdkWsgify
@tokens.validate
def guest_config_disks(req):
def _guest_config_minidisks(userid, req):
action = get_handler()
body = util.extract_json(req.body)
return action.config_minidisks(userid, body=body)
userid = util.wsgi_path_item(req.environ, 'userid')
info = _guest_config_minidisks(userid, req)
info_json = json.dumps(info)
req.response.status = util.get_http_code_from_sdk_return(info)
req.response.body = utils.to_utf8(info_json)
req.response.content_type = 'application/json'
return req.response
@util.SdkWsgify
@tokens.validate
def guest_create_disks(req):
def _guest_create_disks(userid, req):
action = get_handler()
body = util.extract_json(req.body)
return action.create_disks(userid, body=body)
userid = util.wsgi_path_item(req.environ, 'userid')
info = _guest_create_disks(userid, req)
info_json = json.dumps(info)
req.response.status = util.get_http_code_from_sdk_return(info)
req.response.body = utils.to_utf8(info_json)
req.response.content_type = 'application/json'
return req.response
@util.SdkWsgify
@tokens.validate
def guest_delete_disks(req):
def _guest_delete_disks(userid, req):
action = get_handler()
body = util.extract_json(req.body)
return action.delete_disks(userid, body=body)
userid = util.wsgi_path_item(req.environ, 'userid')
info = _guest_delete_disks(userid, req)
info_json = json.dumps(info)
req.response.status = util.get_http_code_from_sdk_return(info, default=200)
req.response.body = utils.to_utf8(info_json)
req.response.content_type = 'application/json'
return req.response | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/handlers/guest.py | guest.py |
import datetime
import functools
import jwt
import os
import threading
from zvmsdk import config
from zvmsdk import exception
from zvmsdk import log
from zvmsdk.sdkwsgi import util
CONF = config.CONF
LOG = log.LOG
DEFAULT_TOKEN_VALIDATION_PERIOD = 3600
TOKEN_LOCK = threading.Lock()
def get_admin_token(path):
if os.path.exists(path):
TOKEN_LOCK.acquire()
try:
with open(path, 'r') as fd:
token = fd.read().strip()
except Exception:
LOG.debug('token file open failed.')
raise exception.ZVMUnauthorized()
finally:
TOKEN_LOCK.release()
else:
LOG.debug('token configuration file not found.')
raise exception.ZVMUnauthorized()
return token
@util.SdkWsgify
def create(req):
# Check if token validation closed
if CONF.wsgi.auth.lower() == 'none':
user_token = 'server-auth-closed'
req.response.headers.add('X-Auth-Token', user_token)
return req.response
# Validation is open, so start to validate the admin-token
if 'X-Admin-Token' not in req.headers:
LOG.debug('no X-Admin-Token given in reqeust header')
raise exception.ZVMUnauthorized()
token_file_path = CONF.wsgi.token_path
admin_token = get_admin_token(token_file_path)
if (req.headers['X-Admin-Token'] != admin_token):
LOG.debug('X-Admin-Token incorrect')
raise exception.ZVMUnauthorized()
expires = CONF.wsgi.token_validation_period
if expires < 0:
expires = DEFAULT_TOKEN_VALIDATION_PERIOD
expired_elapse = datetime.timedelta(seconds=expires)
expired_time = datetime.datetime.utcnow() + expired_elapse
payload = {'exp': expired_time}
user_token = jwt.encode(payload, admin_token)
req.response.headers.add('X-Auth-Token', user_token)
return req.response
# To validate the token, it is possible the token is expired or the
# token is not validated at all
def validate(function):
@functools.wraps(function)
def wrap_func(req, *args, **kwargs):
# by default, no token validation used
if CONF.wsgi.auth.lower() == 'none':
return function(req, *args, **kwargs)
# so, this is for token validation
if 'X-Auth-Token' not in req.headers:
LOG.debug('no X-Auth-Token given in reqeust header')
raise exception.ZVMUnauthorized()
token_file_path = CONF.wsgi.token_path
admin_token = get_admin_token(token_file_path)
try:
jwt.decode(req.headers['X-Auth-Token'], admin_token)
except jwt.ExpiredSignatureError:
LOG.debug('token validation failed because it is expired')
raise exception.ZVMUnauthorized()
except jwt.DecodeError:
LOG.debug('token not valid')
raise exception.ZVMUnauthorized()
except Exception:
LOG.debug('unknown exception occur during token validation')
raise exception.ZVMUnauthorized()
return function(req, *args, **kwargs)
return wrap_func | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/handlers/tokens.py | tokens.py |
import json
from zvmconnector import connector
from zvmsdk import config
from zvmsdk import log
from zvmsdk.sdkwsgi.handlers import tokens
from zvmsdk.sdkwsgi.schemas import volume
from zvmsdk.sdkwsgi import util
from zvmsdk.sdkwsgi import validation
from zvmsdk import utils
_VOLUMEACTION = None
CONF = config.CONF
LOG = log.LOG
class VolumeAction(object):
def __init__(self):
self.client = connector.ZVMConnector(connection_type='socket',
ip_addr=CONF.sdkserver.bind_addr,
port=CONF.sdkserver.bind_port)
@validation.schema(volume.attach)
def attach(self, body):
info = body['info']
connection = info['connection']
info = self.client.send_request('volume_attach', connection)
return info
@validation.schema(volume.detach)
def detach(self, body):
info = body['info']
connection = info['connection']
info = self.client.send_request('volume_detach', connection)
return info
@validation.query_schema(volume.get_volume_connector)
def get_volume_connector(self, req, userid, reserve, fcp_template_id,
sp_name):
conn = self.client.send_request('get_volume_connector',
userid, reserve, fcp_template_id,
sp_name)
return conn
@validation.query_schema(volume.get_fcp_templates)
def get_fcp_templates(self, req, template_id_list, assigner_id,
default_sp_list, host_default):
return self.client.send_request('get_fcp_templates',
template_id_list,
assigner_id,
default_sp_list,
host_default)
@validation.query_schema(volume.get_fcp_templates_details)
def get_fcp_templates_details(self, req, template_id_list, raw,
statistics, sync_with_zvm):
return self.client.send_request('get_fcp_templates_details',
template_id_list,
raw=raw,
statistics=statistics,
sync_with_zvm=sync_with_zvm)
def delete_fcp_template(self, template_id):
return self.client.send_request('delete_fcp_template', template_id)
@validation.query_schema(volume.get_fcp_usage)
def get_fcp_usage(self, req, fcp):
return self.client.send_request('get_fcp_usage', fcp)
@validation.schema(volume.set_fcp_usage)
def set_fcp_usage(self, fcp, body=None):
userid = body['info']['userid']
reserved = body['info']['reserved']
connections = body['info']['connections']
fcp_template_id = body['info']['fcp_template_id']
if not fcp_template_id:
fcp_template_id = ''
return self.client.send_request('set_fcp_usage',
fcp, userid, reserved,
connections, fcp_template_id)
def volume_refresh_bootmap(self, fcpchannel, wwpn, lun, wwid,
transportfiles, guest_networks, fcp_template_id):
info = self.client.send_request('volume_refresh_bootmap',
fcpchannel, wwpn, lun, wwid,
transportfiles, guest_networks, fcp_template_id)
return info
@validation.schema(volume.create_fcp_template)
def create_fcp_template(self, body=None):
name = body.get('name')
description = body.get('description', '')
fcp_devices = body.get('fcp_devices', '')
host_default = body.get('host_default', False)
min_fcp_paths_count = body.get('min_fcp_paths_count', None)
# ensure host_default parameter is boolean type
# because of the sqlite FCP database's requirements
valid_true_values = [True, 'True', 'TRUE', 'true', '1',
'ON', 'On', 'on', 'YES', 'Yes', 'yes']
if host_default in valid_true_values:
host_default = True
else:
host_default = False
default_sp_list = body.get('storage_providers', [])
ret = self.client.send_request('create_fcp_template', name,
description=description,
fcp_devices=fcp_devices,
host_default=host_default,
default_sp_list=default_sp_list,
min_fcp_paths_count=min_fcp_paths_count)
return ret
@validation.schema(volume.edit_fcp_template)
def edit_fcp_template(self, body=None):
fcp_template_id = body.get('fcp_template_id')
name = body.get('name', None)
description = body.get('description', None)
fcp_devices = body.get('fcp_devices', None)
default_sp_list = body.get('storage_providers', None)
min_fcp_paths_count = body.get('min_fcp_paths_count', None)
# Due to the pre-validation in schemas/volume.py,
# host_default only has 2 possible value types:
# i.e. None or a value defined in parameter_types.boolean
host_default = body.get('host_default', None)
# ensure host_default parameter is boolean type
# because of the sqlite FCP database's requirements
valid_true_values = [True, 'True', 'TRUE', 'true', '1',
'ON', 'On', 'on', 'YES', 'Yes', 'yes']
if host_default in valid_true_values:
host_default = True
elif host_default is not None:
host_default = False
ret = self.client.send_request('edit_fcp_template',
fcp_template_id,
name=name, description=description,
fcp_devices=fcp_devices,
host_default=host_default,
default_sp_list=default_sp_list,
min_fcp_paths_count=min_fcp_paths_count)
return ret
def get_action():
global _VOLUMEACTION
if _VOLUMEACTION is None:
_VOLUMEACTION = VolumeAction()
return _VOLUMEACTION
@util.SdkWsgify
@tokens.validate
def volume_attach(req):
def _volume_attach(req):
action = get_action()
body = util.extract_json(req.body)
return action.attach(body=body)
info = _volume_attach(req)
info_json = json.dumps(info)
req.response.body = utils.to_utf8(info_json)
req.response.content_type = 'application/json'
req.response.status = util.get_http_code_from_sdk_return(info, default=200)
return req.response
@util.SdkWsgify
@tokens.validate
def volume_detach(req):
def _volume_detach(req):
action = get_action()
body = util.extract_json(req.body)
return action.detach(body=body)
info = _volume_detach(req)
info_json = json.dumps(info)
req.response.body = utils.to_utf8(info_json)
req.response.content_type = 'application/json'
req.response.status = util.get_http_code_from_sdk_return(info, default=200)
return req.response
@util.SdkWsgify
@tokens.validate
def volume_refresh_bootmap(req):
def _volume_refresh_bootmap(req, fcpchannel, wwpn, lun, wwid,
transportfiles, guest_networks, fcp_template_id):
action = get_action()
return action.volume_refresh_bootmap(fcpchannel, wwpn, lun, wwid,
transportfiles, guest_networks, fcp_template_id)
body = util.extract_json(req.body)
info = _volume_refresh_bootmap(req, body['info']['fcpchannel'],
body['info']['wwpn'], body['info']['lun'],
body['info'].get('wwid', ""),
body['info'].get('transportfiles', ""),
body['info'].get('guest_networks', []),
body['info'].get('fcp_template_id', None))
info_json = json.dumps(info)
req.response.body = utils.to_utf8(info_json)
req.response.content_type = 'application/json'
req.response.status = util.get_http_code_from_sdk_return(info, default=200)
return req.response
@util.SdkWsgify
@tokens.validate
def get_volume_connector(req):
def _get_volume_conn(req, userid, reserve, fcp_template_id, sp_name):
action = get_action()
return action.get_volume_connector(req, userid, reserve,
fcp_template_id, sp_name)
userid = util.wsgi_path_item(req.environ, 'userid')
body = util.extract_json(req.body)
reserve = body['info']['reserve']
fcp_template_id = body['info'].get('fcp_template_id', None)
sp_name = body['info'].get('storage_provider', None)
conn = _get_volume_conn(req, userid, reserve, fcp_template_id, sp_name)
conn_json = json.dumps(conn)
req.response.content_type = 'application/json'
req.response.body = utils.to_utf8(conn_json)
req.response.status = util.get_http_code_from_sdk_return(conn, default=200)
return req.response
@util.SdkWsgify
@tokens.validate
def get_fcp_usage(req):
def _get_fcp_usage(req, fcp):
action = get_action()
return action.get_fcp_usage(req, fcp)
fcp = util.wsgi_path_item(req.environ, 'fcp_id')
ret = _get_fcp_usage(req, fcp)
ret_json = json.dumps(ret)
req.response.status = util.get_http_code_from_sdk_return(ret,
additional_handler=util.handle_not_found)
req.response.content_type = 'application/json'
req.response.body = utils.to_utf8(ret_json)
return req.response
@util.SdkWsgify
@tokens.validate
def set_fcp_usage(req):
def _set_fcp_usage(req, fcp):
action = get_action()
body = util.extract_json(req.body)
return action.set_fcp_usage(fcp, body=body)
fcp = util.wsgi_path_item(req.environ, 'fcp_id')
ret = _set_fcp_usage(req, fcp)
ret_json = json.dumps(ret)
req.response.body = utils.to_utf8(ret_json)
req.response.content_type = 'application/json'
req.response.status = 200
return req.response
@util.SdkWsgify
@tokens.validate
def create_fcp_template(req):
def _create_fcp_template(req):
action = get_action()
body = util.extract_json(req.body)
return action.create_fcp_template(body=body)
ret = _create_fcp_template(req)
ret_json = json.dumps(ret)
req.response.body = utils.to_utf8(ret_json)
req.response.status = util.get_http_code_from_sdk_return(ret)
req.response.content_type = 'application/json'
@util.SdkWsgify
@tokens.validate
def edit_fcp_template(req):
def _edit_fcp_template(req_body):
action = get_action()
return action.edit_fcp_template(body=req_body)
body = util.extract_json(req.body)
body['fcp_template_id'] = util.wsgi_path_item(req.environ, 'template_id')
ret = _edit_fcp_template(body)
ret_json = json.dumps(ret)
req.response.body = utils.to_utf8(ret_json)
req.response.status = util.get_http_code_from_sdk_return(ret)
req.response.content_type = 'application/json'
@util.SdkWsgify
@tokens.validate
def get_fcp_templates(req):
def _get_fcp_templates(req, template_id_list, assigner_id,
default_sp_list, host_default):
action = get_action()
return action.get_fcp_templates(req, template_id_list, assigner_id,
default_sp_list, host_default)
template_id_list = req.GET.get('template_id_list', None)
assigner_id = req.GET.get('assigner_id', None)
default_sp_list = req.GET.get('storage_providers', None)
host_default = req.GET.get('host_default', None)
valid_true_values = [True, 'True', 'TRUE', 'true', '1',
'ON', 'On', 'on', 'YES', 'Yes', 'yes']
if host_default:
if host_default in valid_true_values:
host_default = True
else:
host_default = False
ret = _get_fcp_templates(req, template_id_list, assigner_id,
default_sp_list, host_default)
ret_json = json.dumps(ret)
req.response.status = util.get_http_code_from_sdk_return(
ret, additional_handler=util.handle_not_found)
req.response.content_type = 'application/json'
req.response.body = utils.to_utf8(ret_json)
return req.response
@util.SdkWsgify
@tokens.validate
def get_fcp_templates_details(req):
def _get_fcp_templates_details(req, template_id_list, raw, statistics,
sync_with_zvm):
action = get_action()
return action.get_fcp_templates_details(req, template_id_list, raw,
statistics, sync_with_zvm)
template_id_list = req.GET.get('template_id_list', None)
raw = req.GET.get('raw', 'false')
if raw.lower() == 'true':
raw = True
else:
raw = False
statistics = req.GET.get('statistics', 'true')
if statistics.lower() == 'true':
statistics = True
else:
statistics = False
sync_with_zvm = req.GET.get('sync_with_zvm', 'false')
if sync_with_zvm.lower() == 'true':
sync_with_zvm = True
else:
sync_with_zvm = False
ret = _get_fcp_templates_details(req, template_id_list, raw, statistics,
sync_with_zvm)
ret_json = json.dumps(ret)
req.response.status = util.get_http_code_from_sdk_return(ret,
additional_handler=util.handle_not_found)
req.response.content_type = 'application/json'
req.response.body = utils.to_utf8(ret_json)
return req.response
@util.SdkWsgify
@tokens.validate
def delete_fcp_template(req):
def _delete_fcp_template(template_id):
action = get_action()
return action.delete_fcp_template(template_id)
template_id = util.wsgi_path_item(req.environ, 'template_id')
info = _delete_fcp_template(template_id)
info_json = json.dumps(info)
req.response.body = utils.to_utf8(info_json)
req.response.status = util.get_http_code_from_sdk_return(info,
additional_handler=util.handle_not_found)
req.response.content_type = 'application/json'
return req.response | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/handlers/volume.py | volume.py |
"""Handler for the image of the sdk API."""
import json
from zvmconnector import connector
from zvmsdk import config
from zvmsdk import log
from zvmsdk import utils
from zvmsdk.sdkwsgi.handlers import tokens
from zvmsdk.sdkwsgi.schemas import image
from zvmsdk.sdkwsgi import util
from zvmsdk.sdkwsgi import validation
_IMAGEACTION = None
CONF = config.CONF
LOG = log.LOG
class ImageAction(object):
def __init__(self):
self.client = connector.ZVMConnector(connection_type='socket',
ip_addr=CONF.sdkserver.bind_addr,
port=CONF.sdkserver.bind_port)
@validation.schema(image.create)
def create(self, body):
image = body['image']
image_name = image['image_name']
url = image['url']
remote_host = image.get('remote_host', None)
image_meta = image['image_meta']
info = self.client.send_request('image_import', image_name,
url, image_meta, remote_host)
return info
@validation.query_schema(image.query)
def get_root_disk_size(self, req, name):
# FIXME: this param need combined image nameg, e.g the profile
# name, not the given image name from customer side
info = self.client.send_request('image_get_root_disk_size',
name)
return info
def delete(self, name):
info = self.client.send_request('image_delete', name)
return info
@validation.query_schema(image.query)
def query(self, req, name):
info = self.client.send_request('image_query', name)
return info
@validation.schema(image.export)
def export(self, name, body):
location = body['location']
dest_url = location['dest_url']
remotehost = location.get('remote_host', None)
info = self.client.send_request('image_export', name,
dest_url, remotehost)
return info
def get_action():
global _IMAGEACTION
if _IMAGEACTION is None:
_IMAGEACTION = ImageAction()
return _IMAGEACTION
@util.SdkWsgify
@tokens.validate
def image_create(req):
def _image_create(req):
action = get_action()
body = util.extract_json(req.body)
return action.create(body=body)
info = _image_create(req)
info_json = json.dumps(info)
req.response.body = utils.to_utf8(info_json)
req.response.status = util.get_http_code_from_sdk_return(info,
additional_handler = util.handle_already_exists)
req.response.content_type = 'application/json'
return req.response
@util.SdkWsgify
@tokens.validate
def image_get_root_disk_size(req):
def _image_get_root_disk_size(req, name):
action = get_action()
return action.get_root_disk_size(req, name)
name = util.wsgi_path_item(req.environ, 'name')
info = _image_get_root_disk_size(req, name)
info_json = json.dumps(info)
req.response.body = utils.to_utf8(info_json)
req.response.content_type = 'application/json'
req.response.status = util.get_http_code_from_sdk_return(info)
return req.response
@util.SdkWsgify
@tokens.validate
def image_delete(req):
def _image_delete(name):
action = get_action()
return action.delete(name)
name = util.wsgi_path_item(req.environ, 'name')
info = _image_delete(name)
info_json = json.dumps(info)
req.response.body = utils.to_utf8(info_json)
req.response.status = util.get_http_code_from_sdk_return(info, default=200)
req.response.content_type = 'application/json'
return req.response
@util.SdkWsgify
@tokens.validate
def image_export(req):
def _image_export(name, req):
action = get_action()
body = util.extract_json(req.body)
return action.export(name, body=body)
name = util.wsgi_path_item(req.environ, 'name')
info = _image_export(name, req)
info_json = json.dumps(info)
req.response.body = utils.to_utf8(info_json)
req.response.status = util.get_http_code_from_sdk_return(info,
additional_handler=util.handle_not_found)
req.response.content_type = 'application/json'
return req.response
@util.SdkWsgify
@tokens.validate
def image_query(req):
def _image_query(imagename, req):
action = get_action()
return action.query(req, imagename)
imagename = None
if 'imagename' in req.GET:
imagename = req.GET['imagename']
info = _image_query(imagename, req)
info_json = json.dumps(info)
req.response.body = utils.to_utf8(info_json)
req.response.status = util.get_http_code_from_sdk_return(info,
additional_handler = util.handle_not_found)
req.response.content_type = 'application/json'
return req.response | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/handlers/image.py | image.py |
from zvmsdk.sdkwsgi.validation import parameter_types
create = {
'type': 'object',
'properties': {
'guest': {
'type': 'object',
'properties': {
'userid': parameter_types.userid,
'vcpus': parameter_types.positive_integer,
'memory': parameter_types.positive_integer,
# profile is similar to userid
'user_profile': parameter_types.userid_or_None,
'disk_list': parameter_types.disk_list,
'max_cpu': parameter_types.max_cpu,
'max_mem': parameter_types.max_mem,
'ipl_from': parameter_types.ipl_from,
'ipl_param': parameter_types.ipl_param,
'ipl_loadparam': parameter_types.ipl_loadparam,
'dedicate_vdevs': parameter_types.dedicate_vdevs,
'loaddev': parameter_types.loaddev,
'account': parameter_types.account,
'comments': parameter_types.comment_list,
'cschedule': parameter_types.cpupool,
'cshare': parameter_types.share,
'rdomain': parameter_types.rdomain,
'pcif': parameter_types.pcif
},
'required': ['userid', 'vcpus', 'memory'],
'additionalProperties': False,
},
'additionalProperties': False,
},
'required': ['guest'],
'additionalProperties': False,
}
live_migrate_vm = {
'type': 'object',
'properties': {
'dest_zcc_userid': parameter_types.userid_or_None,
'destination': parameter_types.userid,
'parms': parameter_types.live_migrate_parms,
'operation': parameter_types.name,
},
'required': ['destination', 'operation'],
'additionalProperties': False,
}
create_nic = {
'type': 'object',
'properties': {
'nic': {
'type': 'object',
'properties': {
'vdev': parameter_types.vdev_or_None,
'nic_id': parameter_types.nic_id,
'mac_addr': parameter_types.mac_address,
'active': parameter_types.boolean,
},
'additionalProperties': False,
},
'additionalProperties': False,
},
'required': ['nic'],
'additionalProperties': False,
}
create_network_interface = {
'type': 'object',
'properties': {
'interface': {
'type': 'object',
'properties': {
'os_version': parameter_types.os_version,
'guest_networks': parameter_types.network_list,
'active': parameter_types.boolean,
},
'required': ['os_version', 'guest_networks'],
'additionalProperties': False,
},
'additionalProperties': False,
},
'required': ['interface'],
'additionalProperties': False,
}
delete_network_interface = {
'type': 'object',
'properties': {
'interface': {
'type': 'object',
'properties': {
'os_version': parameter_types.os_version,
'vdev': parameter_types.vdev,
'active': parameter_types.boolean,
},
'required': ['os_version', 'vdev'],
'additionalProperties': False,
},
'additionalProperties': False,
},
'required': ['interface'],
'additionalProperties': False,
}
config_minidisks = {
'type': 'object',
'properties': {
'disk_info': {
'type': 'object',
'properties': {
'disk_list': parameter_types.disk_conf,
},
'additionalProperties': False,
},
'additionalProperties': False,
},
'required': ['disk_info'],
'additionalProperties': False,
}
grow_root_volume = {
'type': 'object',
'properties': {
'os_version': parameter_types.os_version,
},
'required': ['os_version'],
'additionalProperties': False,
}
create_disks = {
'type': 'object',
'properties': {
'disk_info': {
'type': 'object',
'properties': {
'disk_list': parameter_types.disk_list,
},
'additionalProperties': False,
},
'additionalProperties': False,
},
'required': ['disk_info'],
'additionalProperties': False,
}
delete_disks = {
'type': 'object',
'properties': {
'vdev_info': {
'type': 'object',
'properties': {
'vdev_list': parameter_types.vdev_list,
},
'additionalProperties': False,
},
'additionalProperties': False,
},
'required': ['vdev_info'],
'additionalProperties': False,
}
nic_couple_uncouple = {
'type': 'object',
'properties': {
'info': {
'type': 'object',
'properties': {
'couple': parameter_types.boolean,
'active': parameter_types.boolean,
'vswitch': parameter_types.vswitch_name,
'vlan_id': parameter_types.vlan_id_or_minus_1,
},
# FIXME: vswitch should be required when it's couple
'required': ['couple'],
'additionalProperties': False,
},
'additionalProperties': False,
},
'required': ['info'],
'additionalProperties': False,
}
deploy = {
'type': 'object',
'properties': {
'image': parameter_types.name,
'transportfiles': {'type': ['string']},
'remotehost': parameter_types.remotehost,
'vdev': parameter_types.vdev_or_None,
'hostname': parameter_types.hostname,
'skipdiskcopy': parameter_types.boolean,
},
'required': ['image'],
'additionalProperties': False,
}
capture = {
'type': 'object',
'properties': {
'image': parameter_types.name,
'capture_type': parameter_types.capture_type,
'compress_level': parameter_types.compress_level,
},
'required': ['image'],
'additionalProperties': False,
}
resize_cpus = {
'type': 'object',
'properties': {
'cpu_cnt': parameter_types.max_cpu,
},
'required': ['cpu_cnt'],
'additionalProperties': False,
}
resize_mem = {
'type': 'object',
'properties': {
'size': parameter_types.max_mem,
},
'required': ['size'],
'additionalProperties': False,
}
userid_list_query = {
'type': 'object',
'properties': {
'userid': parameter_types.userid_list,
},
'additionalProperties': False
}
register_vm = {
'type': 'object',
'properties': {
'meta': {'type': ['string']},
'net_set': {'type': ['string']},
'port': {'type': ['string']},
},
'required': ['meta', 'net_set'],
'additionalProperties': True
}
deregister_vm = {
'type': 'object',
'properties': {
'userid': parameter_types.userid,
},
'additionalProperties': False
}
userid_list_array_query = {
'type': 'object',
'properties': {
'userid': parameter_types.userid_list_array,
},
'additionalProperties': False
}
nic_DB_info = {
'type': 'object',
'properties': {
'userid': parameter_types.userid,
'nic_id': parameter_types.nic_id,
'vswitch': parameter_types.vswitch_name,
},
'additionalProperties': False,
}
start = {
'type': 'object',
'properties': {
'userid': parameter_types.userid,
'timeout': parameter_types.non_negative_integer,
},
'additionalProperties': False,
}
stop = {
'type': 'object',
'properties': {
'userid': parameter_types.userid,
'timeout': parameter_types.non_negative_integer,
'poll_interval': parameter_types.non_negative_integer,
},
'additionalProperties': False,
}
softstop = {
'type': 'object',
'properties': {
'userid': parameter_types.userid,
'timeout': parameter_types.non_negative_integer,
'poll_interval': parameter_types.non_negative_integer,
},
'additionalProperties': False,
} | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/schemas/guest.py | guest.py |
from zvmsdk.sdkwsgi.validation import parameter_types
attach = {
'type': 'object',
'properties': {
'info': {
'type': 'object',
'properties': {
'connection': parameter_types.connection_info,
},
'required': ['connection'],
'additionalProperties': False,
},
'additionalProperties': False,
},
'required': ['info'],
'additionalProperties': False,
}
detach = {
'type': 'object',
'properties': {
'info': {
'type': 'object',
'properties': {
'connection': parameter_types.connection_info,
},
'required': ['connection'],
'additionalProperties': False,
},
'additionalProperties': False,
},
'required': ['info'],
'additionalProperties': False,
}
get_fcp_usage = {
'type': 'object',
'properties': {
'fcp_id': parameter_types.fcp_id,
},
'required': ['fcp_id'],
'additionalProperties': False,
}
get_fcp_templates = {
'type': 'object',
'properties': {
'template_id_list': parameter_types.fcp_template_id_list,
'assigner_id': parameter_types.single_param(parameter_types.userid),
'host_default': parameter_types.single_param(parameter_types.boolean),
'storage_providers': {
'type': 'array'
}
},
'additionalProperties': False,
}
get_fcp_templates_details = {
'type': 'object',
'properties': {
'template_id_list': parameter_types.fcp_template_id_list,
'raw': parameter_types.single_param(parameter_types.boolean),
'statistics': parameter_types.single_param(parameter_types.boolean),
'sync_with_zvm': parameter_types.single_param(parameter_types.boolean),
},
'additionalProperties': False,
}
set_fcp_usage = {
'type': 'object',
'properties': {
'info': {
'type': 'object',
'properties': {
'userid': parameter_types.userid,
'reserved': {
'type': ['integer'],
'minimum': 0,
'maximum': 1,
},
'connections': {
'type': ['integer'],
},
'fcp_template_id': parameter_types.fcp_template_id,
},
'required': ['reserved', 'connections'],
'additionalProperties': False,
}
},
'required': ['info'],
'additionalProperties': False,
}
get_volume_connector = {
'type': 'object',
'properties': {
'userid': parameter_types.userid_list,
'info': {
'type': 'object',
'properties': {
'reserve': parameter_types.boolean,
'fcp_template_id': parameter_types.fcp_template_id,
'storage_provider': parameter_types.name
},
'required': ['info'],
'additionalProperties': False,
},
'additionalProperties': False,
},
'additionalProperties': False,
}
create_fcp_template = {
'type': 'object',
'properties': {
'name': parameter_types.name,
'description': {
'type': 'string'
},
'fcp_devices': {
'type': 'string'
},
'host_default': parameter_types.boolean,
'storage_providers': {
'type': 'array'
},
'min_fcp_paths_count': parameter_types.positive_integer
},
'required': ['name'],
'additionalProperties': False,
}
edit_fcp_template = {
'type': 'object',
'properties': {
'fcp_template_id': parameter_types.fcp_template_id,
'name': parameter_types.name,
'description': {
'type': 'string'
},
'fcp_devices': {
'type': 'string'
},
'host_default': parameter_types.boolean,
'storage_providers': {
'type': 'array'
},
'min_fcp_paths_count': parameter_types.positive_integer
},
'required': ['fcp_template_id'],
'additionalProperties': False
} | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/schemas/volume.py | volume.py |
import functools
import re
import jsonschema
from jsonschema import exceptions as jsonschema_exc
import six
from zvmsdk import exception
def _schema_validation_helper(schema, target, args, kwargs, is_body=True):
schema_validator = _SchemaValidator(
schema, is_body=is_body)
schema_validator.validate(target)
def schema(request_body_schema):
def add_validator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
_schema_validation_helper(request_body_schema, kwargs['body'],
args, kwargs)
return func(*args, **kwargs)
return wrapper
return add_validator
class FormatChecker(jsonschema.FormatChecker):
def check(self, instance, format):
if format not in self.checkers:
return
func, raises = self.checkers[format]
result, cause = None, None
try:
result = func(instance)
except raises as e:
cause = e
if not result:
msg = "%r is not a %r" % (instance, format)
raise jsonschema_exc.FormatError(msg, cause=cause)
class _SchemaValidator(object):
validator = None
validator_org = jsonschema.Draft4Validator
def __init__(self, schema, relax_additional_properties=False,
is_body=True):
self.is_body = is_body
validators = {
'dummy': self._dummy
}
validator_cls = jsonschema.validators.extend(self.validator_org,
validators)
format_checker = FormatChecker()
self.validator = validator_cls(schema, format_checker=format_checker)
def _dummy(self, validator, minimum, instance, schema):
pass
def validate(self, *args, **kwargs):
try:
self.validator.validate(*args, **kwargs)
except jsonschema.ValidationError as ex:
if isinstance(ex.cause, exception.InvalidName):
detail = ex.cause.format_message()
elif len(ex.path) > 0:
if self.is_body:
detail = ("Invalid input for field/attribute %(path)s. "
"Value: %(value)s. %(message)s")
else:
detail = ("Invalid input for query parameters %(path)s. "
"Value: %(value)s. %(message)s")
detail = detail % {
'path': ex.path.pop(), 'value': ex.instance,
'message': ex.message
}
else:
detail = ex.message
raise exception.ValidationError(detail=detail)
except TypeError as ex:
detail = six.text_type(ex)
raise exception.ValidationError(detail=detail)
def _remove_unexpected_query_parameters(schema, req):
"""Remove unexpected properties from the req.GET."""
additional_properties = schema.get('addtionalProperties', True)
if additional_properties:
pattern_regexes = []
patterns = schema.get('patternProperties', None)
if patterns:
for regex in patterns:
pattern_regexes.append(re.compile(regex))
for param in set(req.GET.keys()):
if param not in schema['properties'].keys():
if not (list(regex for regex in pattern_regexes if
regex.match(param))):
del req.GET[param]
def query_schema(query_params_schema, min_version=None,
max_version=None):
"""Register a schema to validate request query parameters."""
def add_validator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if 'req' in kwargs:
req = kwargs['req']
else:
req = args[1]
if req.environ['wsgiorg.routing_args'][1]:
if _schema_validation_helper(query_params_schema,
req.environ['wsgiorg.routing_args'][1],
args, kwargs, is_body=False):
_remove_unexpected_query_parameters(query_params_schema,
req)
else:
if _schema_validation_helper(query_params_schema,
req.GET.dict_of_lists(),
args, kwargs, is_body=False):
_remove_unexpected_query_parameters(query_params_schema,
req)
return func(*args, **kwargs)
return wrapper
return add_validator | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/validation/__init__.py | __init__.py |
import re
import unicodedata
import six
def single_param(schema):
ret = multi_params(schema)
ret['maxItems'] = 1
return ret
def multi_params(schema):
return {'type': 'array', 'items': schema}
class ValidationRegex(object):
def __init__(self, regex, reason):
self.regex = regex
self.reason = reason
def _is_printable(char):
category = unicodedata.category(char)
return (not category.startswith("C") and
(not category.startswith("Z") or category == "Zs"))
def _get_all_chars():
for i in range(0xFFFF):
yield six.unichr(i)
def _build_regex_range(ws=True, invert=False, exclude=None):
if exclude is None:
exclude = []
regex = ""
in_range = False
last = None
last_added = None
def valid_char(char):
if char in exclude:
result = False
elif ws:
result = _is_printable(char)
else:
# Zs is the unicode class for space characters, of which
# there are about 10 in this range.
result = (_is_printable(char) and
unicodedata.category(char) != "Zs")
if invert is True:
return not result
return result
# iterate through the entire character range. in_
for c in _get_all_chars():
if valid_char(c):
if not in_range:
regex += re.escape(c)
last_added = c
in_range = True
else:
if in_range and last != last_added:
regex += "-" + re.escape(last)
in_range = False
last = c
else:
if in_range:
regex += "-" + re.escape(c)
return regex
valid_name_regex_base = '^(?![%s])[%s]*(?<![%s])$'
valid_name_regex = ValidationRegex(
valid_name_regex_base % (
_build_regex_range(ws=False, invert=True),
_build_regex_range(),
_build_regex_range(ws=False, invert=True)),
"printable characters. Can not start or end with whitespace.")
name = {
'type': 'string', 'minLength': 1, 'maxLength': 255,
'format': 'name'
}
account = {
'type': 'string', 'minLength': 0, 'maxLength': 128,
}
ipl_from = {
'type': 'string', 'minLength': 0, 'maxLength': 255,
}
ipl_param = {
'type': 'string', 'minLength': 0, 'maxLength': 255,
}
ipl_loadparam = {
'type': 'string', 'minLength': 0, 'maxLength': 255,
}
loaddev = {
'type': 'object',
'properties': {
'portname': {'type': 'string',
'minLength': 1,
'maxLength': 16,
'pattern': '^[0-9a-fA-F]{,16}$'},
'lun': {'type': 'string',
'minLength': 1,
'maxLength': 16,
'pattern': '^[0-9a-fA-F]{,16}$'},
},
'additionalProperties': False
}
dedicate_vdevs = {
'type': 'array',
'minItems': 0,
'items': {
'type': 'string',
'pattern': '^[0-9a-fA-F]{,4}$'
},
'uniqueItems': True
}
positive_integer = {
'type': ['integer', 'string'],
'pattern': '^[0-9]*$', 'minimum': 1
}
non_negative_integer = {
'type': ['integer', 'string'],
'pattern': '^[0-9]*$', 'minimum': 0
}
ipv4 = {
'type': 'string', 'format': 'ipv4'
}
nic_info = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'nic_id': {'type': 'string'},
'mac_addr': {'type': 'string'}
},
'additionalProperties': False
}
}
boolean = {
'type': ['boolean', 'string'],
'enum': [True, 'True', 'TRUE', 'true', '1', 'ON', 'On', 'on',
'YES', 'Yes', 'yes',
False, 'False', 'FALSE', 'false', '0', 'OFF', 'Off', 'off',
'NO', 'No', 'no']
}
rdev_list = {
'oneOf': [
{'type': 'null'},
{'type': 'string',
'pattern': '^([0-9a-fA-F]{,4})(\s+[0-9a-fA-F]{,4}){,2}$'}
]
}
rdev = {
'type': ['string'], 'minLength': 1, 'maxLength': 4,
'pattern': '^[0-9a-fA-F]{,4}$'
}
vdev_or_None = {
'oneOf': [
{'type': 'null'},
{'type': ['string'], 'minLength': 1, 'maxLength': 4,
'pattern': '^[0-9a-fA-F]{,4}$'}
]
}
vdev = {
'type': ['string'], 'minLength': 1, 'maxLength': 4,
'pattern': '^[0-9a-fA-F]{,4}$'
}
vdev_list = {
'type': 'array',
'minItems': 1,
'items': {
'type': 'string',
'pattern': '^[0-9a-fA-F]{,4}$'
},
'uniqueItems': True
}
image_list = {
'maxItems': 1,
'items': {
'format': 'name',
'maxLength': 255,
'minLength': 1,
'type': 'string'
},
'type': 'array'
}
url = {
'type': ['string'],
'pattern': '^https?:/{2}|^file:/{3}\w.+$'
}
mac_address = {
'type': 'string',
'pattern': '^([0-9a-fA-F]{2})(:[0-9a-fA-F]{2}){5}$'
}
remotehost = {
'type': ['string'],
'pattern': '^[a-zA-Z0-9\-]+\@([0-9]{1,3}(.[0-9]{1,3}){3}$|'
'[a-zA-Z0-9\-]+(.[a-zA-Z0-9\-]){1,}$)'
}
userid = {
'type': ['string'],
'minLength': 1,
'maxLength': 8,
'pattern': '^(\w{,8})$'
}
cpupool = {
'type': ['string'],
'minLength': 1,
'maxLength': 8,
'pattern': '^(\w{,8})$'
}
share = {
'type': ['string'],
'minLength': 1,
'maxLength': 64,
'pattern': '^(\w{,64})$'
}
rdomain = {
'type': ['string'],
'minLength': 1,
'maxLength': 8,
'pattern': '^(\w{,8})$'
}
pcif = {
'type': ['string'],
'minLength': 1,
'maxLength': 9,
'pattern': '^(\w{,9})$'
}
userid_or_None = {
'oneOf': [
{'type': 'null'},
{'type': ['string'], 'minLength': 1,
'maxLength': 8, 'pattern': '^(\w{,8})$'}
]
}
vswitch_name = {
'type': ['string'], 'minLength': 1, 'maxLength': 8
}
controller = {
'type': ['string'],
'anyOf': [
{'pattern': '\*'},
{'minLength': 1, 'maxLength': 8}
]
}
nic_id = {
'type': ['string']
}
cidr = {
'type': ['string'],
'format': 'cidr'
}
userid_list = {
'type': ['string'],
# TODO:validate userid_list in inspect APIs
'pattern': '^(\s*\w{1,8}\s*)(,\s*\w{1,8}\s*){0,}$'
}
userid_list_array = {
'items': {
'type': ['string'],
'minLength': 1,
'pattern': '^(\s*\w{1,8}\s*)(,\s*\w{1,8}\s*){0,}$'
},
'type': 'array'
}
fcp_template_id = {
'oneOf': [
{'type': 'null'},
{'type': 'string', 'maxLength': 36}
]
}
fcp_template_id_list = {
'items': {
'type': 'string'
},
'type': 'array'
}
file_type = {
'type': 'string',
'enum': ['ext2', 'ext3', 'ext4', 'xfs', 'swap', 'none']
}
volume_list = {
'maxItems': 1,
'items': {
'type': 'string',
'minLength': 1,
'pattern': '^(\w{,6})$',
},
'type': 'array'
}
disk_pool = {
'type': 'string',
'pattern': '^\w+:\w+$'
}
disk_pool_list = {
'maxItems': 1,
'items': {
'type': 'string',
'pattern': '^\w+:\w+$',
},
'type': 'array'
}
disk_list = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'size': {'type': 'string'},
'format': file_type,
'is_boot_disk': boolean,
'vdev': vdev,
'disk_pool': {'type': 'string', 'pattern': '^\w+:\w+$'}
},
'required': ['size'],
'additionalProperties': False
}
}
comment_list = {
'type': 'array',
'items': {
'type': 'string'
}
}
live_migrate_parms = {
'type': 'object',
'properties': {
'maxtotal': {'type': 'integer'},
'maxquiesce': {'type': 'integer'},
'immediate': {'type': 'string'},
'forcearch': {'type': 'string'},
'forcedomain': {'type': 'string'},
'forcestorage': {'type': 'string'}
},
'additionalProperties': False
}
disk_conf = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'vdev': vdev,
'format': file_type,
'mntdir': {'type': 'string'},
'size': {'type': 'string'}
},
'required': ['format'],
'additionalProperties': False
}
}
# For redhat linux, it will match rhelX, rhelX.Y, redhatX, redhatX.Y,
# where X is 6 or 7, Y is 0 to 9, all case insensitive
# For suse linux, it will match slesX, slesX.Y, slesXspY, suseX,
# suseX.Y, suseXspY, where X is 11 or 12, Y is 0 to 9,
# all case insensitive
# For ubuntu linux, it will match ubuntuX, ubuntuX.Y, ubuntuX.Y.Z,
# where X is 16, Y is 01 to 10, Z is 0 to 9, such as ubuntu16.04.3,
# all case insensitive
# For red hat cores linux, it will match rhcosX, rhcosX.Y and rhcosX.Y.Z,
# where X is 4, such as rhcos4, rhcos4.6, rhcos4.6.8,
# all case insensitive
os_version = {
'oneOf': [
{'type': 'string',
'pattern':
'^((r|R)(h|H)(e|E)(l|L))(6|7|8|9){1}([.][0-9])?$'},
{'type': 'string',
'pattern':
'^((r|R)(e|E)(d|D)(h|H)(a|A)(t|T))(6|7){1}([.][0-9])?$'},
{'type': 'string',
'pattern':
'^((s|S)(l|L)(e|E)(s|S))(11|12|15){1}(([.]|((s|S)(p|P)))[0-9])?$'},
{'type': 'string',
'pattern':
'^((s|S)(u|U)(s|S)(e|E))(11|12|15){1}(([.]|((s|S)(p|P)))[0-9])?$'},
{'type': 'string',
'pattern':
'^((u|U)(b|B)(u|U)(n|N)(t|T)(u|U))(16|20|22){1}([.][0-9]{2})?([.][0-9])?$'},
{'type': 'string',
'pattern':
'^((r|R)(h|H)(c|C)(o|O)(s|S))(4){1}([.][0-9]{1,2})?([.][0-9]{1,2})?$'}
]
}
disk_type = {
'type': 'string',
'enum': ['DASD', 'dasd', 'SCSI', 'scsi']
}
image_meta = {
'type': 'object',
'properties': {
'os_version': os_version,
# md5 shoule be 32 hexadeciaml numbers
'md5sum': {'type': 'string', 'pattern': '^[0-9a-fA-F]{32}$'},
'disk_type': disk_type
},
'required': ['os_version'],
'additionalProperties': False
}
command = {
'type': 'string'
}
hostname = {
'oneOf': [
{'type': 'null'},
{'type': 'string', 'minLength': 1, 'maxLength': 255,
'pattern': '^[a-zA-Z0-9-._]*$'}
]
}
network_list = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'ip_addr': ipv4,
'dns_addr': {'type': 'array',
'items': ipv4},
'gateway_addr': ipv4,
'mac_addr': mac_address,
'cidr': cidr,
'nic_vdev': vdev,
'nic_id': {'type': 'string'},
'osa_device': vdev,
'hostname': hostname},
'dependencies': {
'ip_addr': ['cidr']
}
},
'additionalProperties': False
}
capture_type = {
'type': 'string',
'enum': ['rootonly', 'alldisks']
}
compress_level = {
'type': ['integer'],
'pattern': '^[0-9]$'
}
user_vlan_id = {
'type': 'object',
'properties': {
'userid': userid,
'vlanid': {'type': ['integer'],
'minimum': 1,
'maximum': 4094,
}
},
'required': ['userid', "vlanid"],
'additionalProperties': False
}
fcp = {
'type': 'array',
'items': {
'type': 'string',
'minLength': 4,
'maxLength': 4,
'pattern': '^[0-9a-fA-F]{4}$'
}
}
fcp_id = {
'type': 'string',
'minLength': 4,
'maxLength': 4,
'pattern': '^[0-9a-fA-F]{4}$'
}
wwpn = {
'type': 'array',
'items': {
'type': 'string',
'minLength': 18,
'maxLength': 18,
'pattern': '^0x[0-9a-fA-F]{16}$'
}
}
lun = {
'type': ['string'], 'minLength': 18, 'maxLength': 18,
'pattern': '^0x[0-9a-fA-F]{16}$'
}
connection_info = {
'type': 'object',
'properties': {
'assigner_id': userid,
'zvm_fcp': fcp,
'target_wwpn': wwpn,
'target_lun': lun,
'os_version': os_version,
'multipath': boolean,
'mount_point': {'type': 'string'},
'is_root_volume': boolean,
'update_connections_only': boolean,
},
'required': ['assigner_id', 'zvm_fcp', 'target_wwpn',
'target_lun', 'multipath', 'os_version',
'mount_point'],
'additionalProperties': False
}
connection_type = {
'type': 'string',
'enum': ['CONnect', 'CONNECT', 'connect',
'DISCONnect', 'DISCONNECT', 'disconnect',
'NOUPLINK', 'nouplink']
}
router_type = {
'type': 'string',
'enum': ['NONrouter', 'NONROUTER', 'nonrouter',
'PRIrouter', 'PRIROUTER', 'prirouter']
}
network_type = {
'type': 'string',
'enum': ['IP', 'ip', 'ETHernet', 'ethernet', 'ETHERNET']
}
vid_type = {
'oneOf': [
{'type': 'string', 'enum': ['UNAWARE', 'unaware', 'AWARE', 'aware']},
{'type': 'integer', 'minimum': 1, 'maximum': 4094}
]
}
port_type = {
'type': 'string',
'enum': ['ACCESS', 'access', 'TRUNK', 'trunk']
}
gvrp_type = {
'type': 'string',
'enum': ['GVRP', 'gvrp', 'NOGVRP', 'nogvrp']
}
native_vid_type = {
'oneOf': [
{'type': 'null'},
{'type': 'integer', 'minimum': 1, 'maximum': 4094}
]
}
max_cpu = {
'type': 'integer',
'minimum': 1,
'maximum': 64
}
max_mem = {
'type': 'string',
'pattern': '^[1-9][0-9]{0,3}[m|M|g|G]$'
}
vlan_id_or_minus_1 = {
'type': 'integer',
'minimum': -1,
'maximum': 4094,
} | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/validation/parameter_types.py | parameter_types.py |
import json
import six
import socket
SDKCLIENT_MODID = 110
SOCKET_ERROR = [{'overallRC': 101, 'modID': SDKCLIENT_MODID, 'rc': 101},
{1: "Failed to create client socket, error: %(error)s",
2: ("Failed to connect SDK server %(addr)s:%(port)s, "
"error: %(error)s"),
3: ("Failed to send all API call data to SDK server, "
"only %(sent)d bytes sent. API call: %(api)s"),
4: "Client receive empty data from SDK server",
5: ("Client got socket error when sending API call to "
"SDK server, error: %(error)s"),
6: ("Client got socket error when receiving response "
"from SDK server, error: %(error)s")},
"SDK client or server get socket error",
]
INVALID_API_ERROR = [{'overallRC': 400, 'modID': SDKCLIENT_MODID, 'rc': 400},
{1: "Invalid API name, '%(msg)s'"},
"Invalid API name"
]
class SDKSocketClient(object):
def __init__(self, addr='127.0.0.1', port=2000, request_timeout=3600):
self.addr = addr
self.port = port
# request_timeout is used to set the client socket timeout when
# waiting results returned from server.
self.timeout = request_timeout
def _construct_api_name_error(self, msg):
results = INVALID_API_ERROR[0]
results.update({'rs': 1,
'errmsg': INVALID_API_ERROR[1][1] % {'msg': msg},
'output': ''})
return results
def _construct_socket_error(self, rs, **kwargs):
results = SOCKET_ERROR[0]
results.update({'rs': rs,
'errmsg': SOCKET_ERROR[1][rs] % kwargs,
'output': ''})
return results
def call(self, func, *api_args, **api_kwargs):
"""Send API call to SDK server and return results"""
if not isinstance(func, str) or (func == ''):
msg = ('Invalid input for API name, should be a'
'string, type: %s specified.') % type(func)
return self._construct_api_name_error(msg)
# Create client socket
try:
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error as err:
return self._construct_socket_error(1, error=six.text_type(err))
try:
# Set socket timeout
cs.settimeout(self.timeout)
# Connect SDK server
try:
cs.connect((self.addr, self.port))
except socket.error as err:
return self._construct_socket_error(2, addr=self.addr,
port=self.port,
error=six.text_type(err))
# Prepare the data to be sent and switch to bytes if needed
api_data = json.dumps((func, api_args, api_kwargs))
api_data = api_data.encode()
# Send the API call data to SDK server
sent = 0
total_len = len(api_data)
got_error = False
try:
while (sent < total_len):
this_sent = cs.send(api_data[sent:])
if this_sent == 0:
got_error = True
break
sent += this_sent
except socket.error as err:
return self._construct_socket_error(5,
error=six.text_type(err))
if got_error or sent != total_len:
return self._construct_socket_error(3, sent=sent,
api=api_data)
# Receive data from server
return_blocks = []
try:
while True:
block = cs.recv(4096)
if not block:
break
block = bytes.decode(block)
return_blocks.append(block)
except socket.error as err:
# When the sdkserver cann't handle all the client request,
# some client request would be rejected.
# Under this case, the client socket can successfully
# connect/send, but would get exception in recv with error:
# "error: [Errno 104] Connection reset by peer"
return self._construct_socket_error(6,
error=six.text_type(err))
finally:
# Always close the client socket to avoid too many hanging
# socket left.
cs.close()
# Transform the received stream to standard result form
# This client assumes that the server would return result in
# the standard result form, so client just return the received
# data
if return_blocks:
results = json.loads(''.join(return_blocks))
else:
results = self._construct_socket_error(4)
return results | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmconnector/socketclient.py | socketclient.py |
from zvmconnector import socketclient
from zvmconnector import restclient
CONN_TYPE_SOCKET = 'socket'
CONN_TYPE_REST = 'rest'
class baseConnection(object):
def request(self, api_name, *api_args, **api_kwargs):
pass
class socketConnection(baseConnection):
def __init__(self, ip_addr='127.0.0.1', port=2000, timeout=3600):
self.client = socketclient.SDKSocketClient(ip_addr, port, timeout)
def request(self, api_name, *api_args, **api_kwargs):
return self.client.call(api_name, *api_args, **api_kwargs)
class restConnection(baseConnection):
def __init__(self, ip_addr='127.0.0.1', port=8080, ssl_enabled=False,
verify=False, token_path=None):
self.client = restclient.RESTClient(ip_addr, port, ssl_enabled, verify,
token_path)
def request(self, api_name, *api_args, **api_kwargs):
return self.client.call(api_name, *api_args, **api_kwargs)
class ZVMConnector(object):
def __init__(self, ip_addr=None, port=None, timeout=3600,
connection_type=None, ssl_enabled=False, verify=False,
token_path=None):
"""
:param str ip_addr: IP address of SDK server
:param int port: Port of SDK server daemon
:param int timeout: Wait timeout if request no response
:param str connection_type: The value should be 'socket' or 'rest'
:param boolean ssl_enabled: Whether SSL enabled or not. If enabled,
use HTTPS instead of HTTP. The https
server should enable SSL to support this.
:param boolean/str verify: Either a boolean, in which case it
controls whether we verify the server's
TLS certificate, or a string, in which
case it must be a path to a CA bundle
to use. Default to False.
:param str token_path: The path of token file.
"""
if (connection_type is not None and
connection_type.lower() == CONN_TYPE_SOCKET):
connection_type = CONN_TYPE_SOCKET
else:
connection_type = CONN_TYPE_REST
self.conn = self._get_connection(ip_addr, port, timeout,
connection_type, ssl_enabled, verify,
token_path)
def _get_connection(self, ip_addr, port, timeout,
connection_type, ssl_enabled, verify,
token_path):
if connection_type == CONN_TYPE_SOCKET:
return socketConnection(ip_addr or '127.0.0.1', port or 2000,
timeout)
else:
return restConnection(ip_addr or '127.0.0.1', port or 8080,
ssl_enabled=ssl_enabled, verify=verify,
token_path=token_path)
def send_request(self, api_name, *api_args, **api_kwargs):
"""Refer to SDK API documentation.
:param api_name: SDK API name
:param *api_args: SDK API sequence parameters
:param **api_kwargs: SDK API keyword parameters
"""
return self.conn.request(api_name, *api_args, **api_kwargs) | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmconnector/connector.py | connector.py |
import json
import os
import requests
import six
import tempfile
import threading
import uuid
from zvmsdk import config
CONF = config.CONF
# TODO:set up configuration file only for RESTClient and configure this value
TOKEN_LOCK = threading.Lock()
CHUNKSIZE = 4096
REST_REQUEST_ERROR = [{'overallRC': 101, 'modID': 110, 'rc': 101},
{1: "Request to zVM Cloud Connector failed: %(error)s",
2: "Token file not found: %(error)s",
3: "Request to url: %(url)s got unexpected response: "
"status_code: %(status)s, reason: %(reason)s, "
"text: %(text)s",
4: "Get Token failed: %(error)s"},
"zVM Cloud Connector request failed",
]
SERVICE_UNAVAILABLE_ERROR = [{'overallRC': 503, 'modID': 110, 'rc': 503},
{2: "Service is unavailable. reason: %(reason)s,"
" text: %(text)s"},
"Service is unavailable",
]
INVALID_API_ERROR = [{'overallRC': 400, 'modID': 110, 'rc': 400},
{1: "Invalid API name, '%(msg)s'"},
"Invalid API name",
]
class UnexpectedResponse(Exception):
def __init__(self, resp):
self.resp = resp
class ServiceUnavailable(Exception):
def __init__(self, resp):
self.resp = resp
class TokenNotFound(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
class TokenFileOpenError(Exception):
def __init__(self, msg):
self.msg = msg
class CACertNotFound(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
class APINameNotFound(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
class ArgsFormatError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
def fill_kwargs_in_body(body, **kwargs):
for key in kwargs.keys():
body[key] = kwargs.get(key)
def req_version(start_index, *args, **kwargs):
url = '/'
body = None
return url, body
def req_guest_list(start_index, *args, **kwargs):
url = '/guests'
body = None
return url, body
def req_guest_delete(start_index, *args, **kwargs):
url = '/guests/%s'
body = None
return url, body
def req_guest_get_definition_info(start_index, *args, **kwargs):
url = '/guests/%s'
body = None
return url, body
def req_guest_create(start_index, *args, **kwargs):
url = '/guests'
body = {'guest': {'userid': args[start_index],
'vcpus': args[start_index + 1],
'memory': args[start_index + 2]}}
fill_kwargs_in_body(body['guest'], **kwargs)
return url, body
def req_guest_inspect_stats(start_index, *args, **kwargs):
if type(args[start_index]) is str:
url = '/guests/stats?userid=%s' % args[start_index]
else:
userids = ','.join(args[start_index])
url = '/guests/stats?userid=%s' % userids
body = None
return url, body
def req_guest_inspect_vnics(start_index, *args, **kwargs):
if type(args[start_index]) is str:
url = '/guests/interfacestats?userid=%s' % args[start_index]
else:
userids = ','.join(args[start_index])
url = '/guests/interfacestats?userid=%s' % userids
body = None
return url, body
def req_guests_get_nic_info(start_index, *args, **kwargs):
url = '/guests/nics'
# process appends in GET method
userid = kwargs.get('userid', None)
nic_id = kwargs.get('nic_id', None)
vswitch = kwargs.get('vswitch', None)
if ((userid is None) and
(nic_id is None) and
(vswitch is None)):
append = ''
else:
append = "?"
if userid is not None:
append += 'userid=%s&' % userid
if nic_id is not None:
append += 'nic_id=%s&' % nic_id
if vswitch is not None:
append += 'vswitch=%s&' % vswitch
append = append.strip('&')
url = url + append
body = None
return url, body
# FIXME: the order of args need adjust
def req_guest_start(start_index, *args, **kwargs):
url = '/guests/%s/action'
body = {'action': 'start'}
fill_kwargs_in_body(body, **kwargs)
return url, body
def req_guest_stop(start_index, *args, **kwargs):
url = '/guests/%s/action'
body = {'action': 'stop'}
fill_kwargs_in_body(body, **kwargs)
return url, body
def req_guest_softstop(start_index, *args, **kwargs):
url = '/guests/%s/action'
body = {'action': 'softstop'}
fill_kwargs_in_body(body, **kwargs)
return url, body
def req_guest_pause(start_index, *args, **kwargs):
url = '/guests/%s/action'
body = {'action': 'pause'}
return url, body
def req_guest_unpause(start_index, *args, **kwargs):
url = '/guests/%s/action'
body = {'action': 'unpause'}
return url, body
def req_guest_reboot(start_index, *args, **kwargs):
url = '/guests/%s/action'
body = {'action': 'reboot'}
return url, body
def req_guest_reset(start_index, *args, **kwargs):
url = '/guests/%s/action'
body = {'action': 'reset'}
return url, body
def req_guest_get_console_output(start_index, *args, **kwargs):
url = '/guests/%s/action'
body = {'action': 'get_console_output'}
return url, body
def req_guest_live_migrate(start_index, *args, **kwargs):
url = '/guests/%s/action'
body = {'action': 'live_migrate_vm',
'dest_zcc_userid': args[start_index],
'destination': args[start_index + 1],
'parms': args[start_index + 2],
'operation': args[start_index + 3]}
return url, body
def req_guest_register(start_index, *args, **kwargs):
url = '/guests/%s/action'
body = {'action': 'register_vm',
'meta': args[start_index],
'net_set': args[start_index + 1]}
if len(args) - start_index == 3:
body['port_macs'] = args[start_index + 2]
return url, body
def req_guest_deregister(start_index, *args, **kwargs):
url = '/guests/%s/action'
body = {'action': 'deregister_vm'}
return url, body
def req_guest_live_resize_cpus(start_index, *args, **kwargs):
url = '/guests/%s/action'
body = {'action': 'live_resize_cpus',
'cpu_cnt': args[start_index]}
return url, body
def req_guest_resize_cpus(start_index, *args, **kwargs):
url = '/guests/%s/action'
body = {'action': 'resize_cpus',
'cpu_cnt': args[start_index]}
return url, body
def req_guest_resize_mem(start_index, *args, **kwargs):
url = '/guests/%s/action'
body = {'action': 'resize_mem',
'size': args[start_index]}
return url, body
def req_guest_live_resize_mem(start_index, *args, **kwargs):
url = '/guests/%s/action'
body = {'action': 'live_resize_mem',
'size': args[start_index]}
return url, body
def req_guest_grow_root_volume(start_index, *args, **kwargs):
url = '/guests/%s/action'
body = {'action': 'grow_root_volume',
'os_version': args[start_index]}
return url, body
def req_guest_capture(start_index, *args, **kwargs):
url = '/guests/%s/action'
body = {'action': 'capture',
'image': args[start_index]}
fill_kwargs_in_body(body, **kwargs)
return url, body
def req_guest_deploy(start_index, *args, **kwargs):
url = '/guests/%s/action'
body = {'action': 'deploy',
'image': args[start_index]}
fill_kwargs_in_body(body, **kwargs)
return url, body
def req_guest_get_power_state_real(start_index, *args, **kwargs):
url = '/guests/%s/power_state_real'
body = None
return url, body
def req_guest_get_info(start_index, *args, **kwargs):
url = '/guests/%s/info'
body = None
return url, body
def req_guest_get_user_direct(start_index, *args, **kwargs):
url = '/guests/%s/user_direct'
body = None
return url, body
def req_guest_get_adapters_info(start_index, *args, **kwargs):
url = '/guests/%s/adapters'
body = None
return url, body
def req_guest_create_nic(start_index, *args, **kwargs):
url = '/guests/%s/nic'
body = {'nic': {}}
fill_kwargs_in_body(body['nic'], **kwargs)
return url, body
def req_guest_delete_nic(start_index, *args, **kwargs):
url = '/guests/%s/nic/%s'
body = {}
fill_kwargs_in_body(body, **kwargs)
return url, body
def req_guest_nic_couple_to_vswitch(start_index, *args, **kwargs):
url = '/guests/%s/nic/%s'
body = {'info': {'couple': True,
'vswitch': args[start_index]}}
fill_kwargs_in_body(body['info'], **kwargs)
return url, body
def req_guest_nic_uncouple_from_vswitch(start_index, *args, **kwargs):
url = '/guests/%s/nic/%s'
body = {'info': {'couple': False}}
fill_kwargs_in_body(body['info'], **kwargs)
return url, body
def req_guest_create_network_interface(start_index, *args, **kwargs):
url = '/guests/%s/interface'
body = {'interface': {'os_version': args[start_index],
'guest_networks': args[start_index + 1]}}
fill_kwargs_in_body(body['interface'], **kwargs)
return url, body
def req_guest_delete_network_interface(start_index, *args, **kwargs):
url = '/guests/%s/interface'
body = {'interface': {'os_version': args[start_index],
'vdev': args[start_index + 1]}}
fill_kwargs_in_body(body['interface'], **kwargs)
return url, body
def req_guest_get_power_state(start_index, *args, **kwargs):
url = '/guests/%s/power_state'
body = None
return url, body
def req_guest_create_disks(start_index, *args, **kwargs):
url = '/guests/%s/disks'
body = {'disk_info': {'disk_list': args[start_index]}}
return url, body
def req_guest_delete_disks(start_index, *args, **kwargs):
url = '/guests/%s/disks'
body = {'vdev_info': {'vdev_list': args[start_index]}}
return url, body
def req_guest_config_minidisks(start_index, *args, **kwargs):
url = '/guests/%s/disks'
body = {'disk_info': {'disk_list': args[start_index]}}
fill_kwargs_in_body(body['disk_info'], **kwargs)
return url, body
def req_volume_attach(start_index, *args, **kwargs):
url = '/guests/volumes'
body = {'info': {'connection': args[start_index]}}
return url, body
def req_volume_detach(start_index, *args, **kwargs):
url = '/guests/volumes'
body = {'info': {'connection': args[start_index]}}
return url, body
def req_volume_refresh_bootmap(start_index, *args, **kwargs):
url = '/volumes/volume_refresh_bootmap'
fcpchannel = kwargs.get('fcpchannels', None)
wwpn = kwargs.get('wwpn', None)
lun = kwargs.get('lun', None)
wwid = kwargs.get('wwid', '')
transportfiles = kwargs.get('transportfiles', '')
guest_networks = kwargs.get('guest_networks', [])
fcp_template_id = kwargs.get('fcp_template_id', None)
body = {'info':
{
"fcpchannel": fcpchannel,
"wwpn": wwpn,
"lun": lun,
"wwid": wwid,
"transportfiles": transportfiles,
"guest_networks": guest_networks,
"fcp_template_id": fcp_template_id
}
}
fill_kwargs_in_body(body['info'], **kwargs)
return url, body
def req_get_volume_connector(start_index, *args, **kwargs):
url = '/volumes/conn/%s'
reserve = kwargs.get('reserve', False)
fcp_template_id = kwargs.get('fcp_template_id', None)
sp_name = kwargs.get('storage_provider', None)
body = {'info':
{
"reserve": reserve,
"fcp_template_id": fcp_template_id,
"storage_provider": sp_name
}
}
fill_kwargs_in_body(body['info'], **kwargs)
return url, body
def req_get_fcp_templates(start_index, *args, **kwargs):
url = '/volumes/fcptemplates'
template_id_list = kwargs.get('template_id_list', None)
assigner_id = kwargs.get('assigner_id', None)
default_sp_list = kwargs.get('storage_providers', None)
host_default = kwargs.get('host_default', False)
if template_id_list:
url += "?template_id_list=%s" % template_id_list
elif assigner_id:
url += "?assigner_id=%s" % assigner_id
elif default_sp_list:
url += "?storage_providers=%s" % default_sp_list
elif host_default:
url += "?host_default=%s" % host_default
body = None
return url, body
def req_get_fcp_templates_details(start_index, *args, **kwargs):
url = '/volumes/fcptemplates/detail'
template_id_list = kwargs.get('template_id_list', None)
raw = kwargs.get('raw', False)
statistics = kwargs.get('statistics', True)
sync_with_zvm = kwargs.get('sync_with_zvm', False)
if template_id_list:
url += "?template_id_list=%s&" % template_id_list
else:
url += "?"
url += "raw=%s&" % raw
url += "statistics=%s&" % statistics
url += "sync_with_zvm=%s" % sync_with_zvm
body = None
return url, body
def req_delete_fcp_template(start_index, *args, **kwargs):
url = '/volumes/fcptemplates/%s'
body = None
return url, body
def req_get_fcp_usage(start_index, *args, **kwargs):
url = '/volumes/fcp/%s'
body = None
return url, body
def req_set_fcp_usage(start_index, *args, **kwargs):
url = '/volumes/fcp/%s'
body = {'info': {'userid': args[start_index],
'reserved': args[start_index + 1],
'connections': args[start_index + 2],
'fcp_template_id': args[start_index + 3]}}
fill_kwargs_in_body(body['info'], **kwargs)
return url, body
def req_create_fcp_template(start_index, *args, **kwargs):
url = '/volumes/fcptemplates'
body = {'name': args[start_index]}
fill_kwargs_in_body(body, **kwargs)
return url, body
def req_edit_fcp_template(start_index, *args, **kwargs):
# the function is called by _get_url_body_headers()
url = '/volumes/fcptemplates/%s'
body = dict()
# no other args except fcp_templated_id in url path,
# param in url path is set by _get_url_body_headers().
# hence, only save kwargs in body
fill_kwargs_in_body(body, **kwargs)
return url, body
def req_host_get_info(start_index, *args, **kwargs):
url = '/host'
body = None
return url, body
def req_host_get_guest_list(start_index, *args, **kwargs):
url = '/host/guests'
body = None
return url, body
def req_host_get_diskpool_volumes(start_index, *args, **kwargs):
url = '/host/diskpool_volumes'
poolname = kwargs.get('disk_pool', None)
append = ''
if poolname is not None:
append += "?poolname=%s" % poolname
url += append
body = None
return url, body
def req_host_diskpool_get_info(start_index, *args, **kwargs):
url = '/host/diskpool'
poolname = kwargs.get('disk_pool', None)
append = ''
if poolname is not None:
append += "?poolname=%s" % poolname
url += append
body = None
return url, body
def req_host_get_volume_info(start_index, *args, **kwargs):
url = '/host/volume'
volumename = kwargs.get('volume', None)
append = ''
if volumename is not None:
append += "?volumename=%s" % volumename
url += append
body = None
return url, body
def req_host_get_ssi_info(start_index, *args, **kwargs):
url = '/host/ssi'
body = None
return url, body
def req_image_import(start_index, *args, **kwargs):
url = '/images'
body = {'image': {'image_name': args[start_index],
'url': args[start_index + 1],
'image_meta': args[start_index + 2]}}
fill_kwargs_in_body(body['image'], **kwargs)
return url, body
def req_image_query(start_index, *args, **kwargs):
url = '/images'
image_name = kwargs.get('imagename', None)
if image_name is None:
append = ''
else:
append = "?"
append += "imagename=%s" % image_name
url += append
body = None
return url, body
def req_image_delete(start_index, *args, **kwargs):
url = '/images/%s'
body = None
return url, body
def req_image_export(start_index, *args, **kwargs):
url = '/images/%s'
body = {'location': {'dest_url': args[start_index]}}
fill_kwargs_in_body(body['location'], **kwargs)
return url, body
def req_image_get_root_disk_size(start_index, *args, **kwargs):
url = '/images/%s/root_disk_size'
body = None
return url, body
def req_file_import(start_index, *args, **kwargs):
url = '/files'
file_spath = args[start_index]
body = get_data_file(file_spath)
return url, body
def req_file_export(start_index, *args, **kwargs):
url = '/files'
body = {'source_file': args[start_index]}
return url, body
def req_token_create(start_index, *args, **kwargs):
url = '/token'
body = None
return url, body
def req_vswitch_get_list(start_index, *args, **kwargs):
url = '/vswitches'
body = None
return url, body
def req_vswitch_create(start_index, *args, **kwargs):
url = '/vswitches'
body = {'vswitch': {'name': args[start_index]}}
fill_kwargs_in_body(body['vswitch'], **kwargs)
return url, body
def req_vswitch_delete(start_index, *args, **kwargs):
url = '/vswitches/%s'
body = None
return url, body
def req_vswitch_query(start_index, *args, **kwargs):
url = '/vswitches/%s'
body = None
return url, body
def req_vswitch_grant_user(start_index, *args, **kwargs):
url = '/vswitches/%s'
body = {'vswitch': {'grant_userid': args[start_index]}}
fill_kwargs_in_body(body['vswitch'], **kwargs)
return url, body
def req_vswitch_revoke_user(start_index, *args, **kwargs):
url = '/vswitches/%s'
body = {'vswitch': {'revoke_userid': args[start_index]}}
fill_kwargs_in_body(body['vswitch'], **kwargs)
return url, body
def req_vswitch_set_vlan_id_for_user(start_index, *args, **kwargs):
url = '/vswitches/%s'
body = {'vswitch': {'user_vlan_id': {'userid': args[start_index],
'vlanid': args[start_index + 1]}}}
fill_kwargs_in_body(body['vswitch'], **kwargs)
return url, body
# Save data used for comprsing RESTful request
# method: request type
# args_required: arguments in args are required, record the count here.
# if len(args) not equal to this number, raise exception
# params_path: parameters amount in url path
# request: function that provide url and body for comprosing a request
DATABASE = {
'version': {
'method': 'GET',
'args_required': 0,
'params_path': 0,
'request': req_version},
'guest_create': {
'method': 'POST',
'args_required': 3,
'params_path': 0,
'request': req_guest_create},
'guest_list': {
'method': 'GET',
'args_required': 0,
'params_path': 0,
'request': req_guest_list},
'guest_inspect_stats': {
'method': 'GET',
'args_required': 1,
'params_path': 0,
'request': req_guest_inspect_stats},
'guest_inspect_vnics': {
'method': 'GET',
'args_required': 1,
'params_path': 0,
'request': req_guest_inspect_vnics},
'guests_get_nic_info': {
'method': 'GET',
'args_required': 0,
'params_path': 0,
'request': req_guests_get_nic_info},
'guest_delete': {
'method': 'DELETE',
'args_required': 1,
'params_path': 1,
'request': req_guest_delete},
'guest_get_definition_info': {
'method': 'GET',
'args_required': 1,
'params_path': 1,
'request': req_guest_get_definition_info},
'guest_start': {
'method': 'POST',
'args_required': 1,
'params_path': 1,
'request': req_guest_start},
'guest_stop': {
'method': 'POST',
'args_required': 1,
'params_path': 1,
'request': req_guest_stop},
'guest_softstop': {
'method': 'POST',
'args_required': 1,
'params_path': 1,
'request': req_guest_softstop},
'guest_pause': {
'method': 'POST',
'args_required': 1,
'params_path': 1,
'request': req_guest_pause},
'guest_unpause': {
'method': 'POST',
'args_required': 1,
'params_path': 1,
'request': req_guest_unpause},
'guest_reboot': {
'method': 'POST',
'args_required': 1,
'params_path': 1,
'request': req_guest_reboot},
'guest_reset': {
'method': 'POST',
'args_required': 1,
'params_path': 1,
'request': req_guest_reset},
'guest_get_console_output': {
'method': 'POST',
'args_required': 1,
'params_path': 1,
'request': req_guest_get_console_output},
'guest_register': {
'method': 'POST',
'args_required': 3,
'args_optional': 1,
'params_path': 1,
'request': req_guest_register},
'guest_deregister': {
'method': 'POST',
'args_required': 1,
'params_path': 1,
'request': req_guest_deregister},
'guest_live_migrate': {
'method': 'POST',
'args_required': 5,
'params_path': 1,
'request': req_guest_live_migrate},
'guest_live_resize_cpus': {
'method': 'POST',
'args_required': 2,
'params_path': 1,
'request': req_guest_live_resize_cpus},
'guest_resize_cpus': {
'method': 'POST',
'args_required': 2,
'params_path': 1,
'request': req_guest_resize_cpus},
'guest_live_resize_mem': {
'method': 'POST',
'args_required': 2,
'params_path': 1,
'request': req_guest_live_resize_mem},
'guest_resize_mem': {
'method': 'POST',
'args_required': 2,
'params_path': 1,
'request': req_guest_resize_mem},
'guest_grow_root_volume': {
'method': 'POST',
'args_required': 2,
'params_path': 1,
'request': req_guest_grow_root_volume},
'guest_capture': {
'method': 'POST',
'args_required': 2,
'params_path': 1,
'request': req_guest_capture},
'guest_deploy': {
'method': 'POST',
'args_required': 2,
'params_path': 1,
'request': req_guest_deploy},
'guest_get_power_state_real': {
'method': 'GET',
'args_required': 1,
'params_path': 1,
'request': req_guest_get_power_state_real},
'guest_get_info': {
'method': 'GET',
'args_required': 1,
'params_path': 1,
'request': req_guest_get_info},
'guest_get_user_direct': {
'method': 'GET',
'args_required': 1,
'params_path': 1,
'request': req_guest_get_user_direct},
'guest_get_adapters_info': {
'method': 'GET',
'args_required': 1,
'params_path': 1,
'request': req_guest_get_adapters_info},
'guest_create_nic': {
'method': 'POST',
'args_required': 1,
'params_path': 1,
'request': req_guest_create_nic},
'guest_delete_nic': {
'method': 'DELETE',
'args_required': 2,
'params_path': 2,
'request': req_guest_delete_nic},
'guest_nic_couple_to_vswitch': {
'method': 'PUT',
'args_required': 3,
'params_path': 2,
'request': req_guest_nic_couple_to_vswitch},
'guest_nic_uncouple_from_vswitch': {
'method': 'PUT',
'args_required': 2,
'params_path': 2,
'request': req_guest_nic_uncouple_from_vswitch},
'guest_create_network_interface': {
'method': 'POST',
'args_required': 3,
'params_path': 1,
'request': req_guest_create_network_interface},
'guest_delete_network_interface': {
'method': 'DELETE',
'args_required': 3,
'params_path': 1,
'request': req_guest_delete_network_interface},
'guest_get_power_state': {
'method': 'GET',
'args_required': 1,
'params_path': 1,
'request': req_guest_get_power_state},
'guest_create_disks': {
'method': 'POST',
'args_required': 2,
'params_path': 1,
'request': req_guest_create_disks},
'guest_delete_disks': {
'method': 'DELETE',
'args_required': 2,
'params_path': 1,
'request': req_guest_delete_disks},
'guest_config_minidisks': {
'method': 'PUT',
'args_required': 2,
'params_path': 1,
'request': req_guest_config_minidisks},
'volume_attach': {
'method': 'POST',
'args_required': 1,
'params_path': 0,
'request': req_volume_attach},
'volume_detach': {
'method': 'DELETE',
'args_required': 1,
'params_path': 0,
'request': req_volume_detach},
'volume_refresh_bootmap': {
'method': 'PUT',
'args_required': 0,
'params_path': 0,
'request': req_volume_refresh_bootmap},
'get_volume_connector': {
'method': 'GET',
'args_required': 1,
'params_path': 1,
'request': req_get_volume_connector},
'get_fcp_templates': {
'method': 'GET',
'args_required': 0,
'args_optional': 1,
'params_path': 0,
'request': req_get_fcp_templates},
'get_fcp_templates_details': {
'method': 'GET',
'args_required': 0,
'params_path': 0,
'request': req_get_fcp_templates_details},
'delete_fcp_template': {
'method': 'DELETE',
'args_required': 1,
'params_path': 1,
'request': req_delete_fcp_template},
'get_fcp_usage': {
'method': 'GET',
'args_required': 1,
'params_path': 1,
'request': req_get_fcp_usage},
'set_fcp_usage': {
'method': 'PUT',
'args_required': 5,
'params_path': 1,
'request': req_set_fcp_usage},
'create_fcp_template': {
'method': 'POST',
'args_required': 1,
'params_path': 0,
'request': req_create_fcp_template},
'edit_fcp_template': {
'method': 'PUT',
# args_required and args_optional are used for args rather than kwargs,
# refer to 'def _check_arguments' for details.
# In total,
# 1 args: fcp_template_id
# 5 kwargs: name, desc, fcp_devices, host_default, storage_providers
# args_required : 1
# fcp_template_id
'args_required': 1,
# params_path is the count of params in url path,
# url path is '/volumes/fcptemplates/%s'
# %s is for fcp_template_id
# %s is from args
'params_path': 1,
'request': req_edit_fcp_template},
'host_get_info': {
'method': 'GET',
'args_required': 0,
'params_path': 0,
'request': req_host_get_info},
'host_get_guest_list': {
'method': 'GET',
'args_required': 0,
'params_path': 0,
'request': req_host_get_guest_list},
'host_get_diskpool_volumes': {
'method': 'GET',
'args_required': 0,
'params_path': 0,
'request': req_host_get_diskpool_volumes},
'host_diskpool_get_info': {
'method': 'GET',
'args_required': 0,
'params_path': 0,
'request': req_host_diskpool_get_info},
'host_get_volume_info': {
'method': 'GET',
'args_required': 0,
'params_path': 0,
'request': req_host_get_volume_info},
'host_get_ssi_info': {
'method': 'GET',
'args_required': 0,
'params_path': 0,
'request': req_host_get_ssi_info},
'image_import': {
'method': 'POST',
'args_required': 3,
'params_path': 0,
'request': req_image_import},
'image_query': {
'method': 'GET',
'args_required': 0,
'params_path': 0,
'request': req_image_query},
'image_delete': {
'method': 'DELETE',
'args_required': 1,
'params_path': 1,
'request': req_image_delete},
'image_export': {
'method': 'PUT',
'args_required': 2,
'params_path': 1,
'request': req_image_export},
'image_get_root_disk_size': {
'method': 'GET',
'args_required': 1,
'params_path': 1,
'request': req_image_get_root_disk_size},
'file_import': {
'method': 'PUT',
'args_required': 1,
'params_path': 0,
'request': req_file_import},
'file_export': {
'method': 'POST',
'args_required': 1,
'params_path': 0,
'request': req_file_export},
'token_create': {
'method': 'POST',
'args_required': 0,
'params_path': 0,
'request': req_token_create},
'vswitch_get_list': {
'method': 'GET',
'args_required': 0,
'params_path': 0,
'request': req_vswitch_get_list},
'vswitch_create': {
'method': 'POST',
'args_required': 1,
'params_path': 0,
'request': req_vswitch_create},
'vswitch_delete': {
'method': 'DELETE',
'args_required': 1,
'params_path': 1,
'request': req_vswitch_delete},
'vswitch_grant_user': {
'method': 'PUT',
'args_required': 2,
'params_path': 1,
'request': req_vswitch_grant_user},
'vswitch_query': {
'method': 'GET',
'args_required': 1,
'params_path': 1,
'request': req_vswitch_query},
'vswitch_revoke_user': {
'method': 'PUT',
'args_required': 2,
'params_path': 1,
'request': req_vswitch_revoke_user},
'vswitch_set_vlan_id_for_user': {
'method': 'PUT',
'args_required': 3,
'params_path': 1,
'request': req_vswitch_set_vlan_id_for_user},
}
def get_data_file(fpath):
if fpath:
return open(fpath, 'rb')
class RESTClient(object):
def __init__(self, ip='127.0.0.1', port=8888,
ssl_enabled=False, verify=False,
token_path=None, auth=None):
# SSL enable or not
if ssl_enabled:
self.base_url = "https://" + ip + ":" + str(port)
else:
self.base_url = "http://" + ip + ":" + str(port)
# if value of verify is str, means its value is
# the path of CA certificate
if type(verify) == str:
if not os.path.exists(verify):
raise CACertNotFound('CA certificate file not found.')
self.verify = verify
self.token_path = token_path
# need send token to validate
# This is client, so must NOT use zvmsdk.conf file setting
self.auth = auth
def _check_arguments(self, api_name, *args, **kwargs):
# check api_name exist or not
if api_name not in DATABASE.keys():
msg = "API name %s not exist." % api_name
raise APINameNotFound(msg)
# check args count is valid
count = DATABASE[api_name]['args_required']
optional = 0
if 'args_optional' in DATABASE[api_name].keys():
optional = DATABASE[api_name]['args_optional']
if len(args) < count:
msg = "Missing some args,please check:%s." % str(args)
raise ArgsFormatError(msg)
if len(args) > count + optional:
msg = "Too many args,please check:%s." % str(args)
raise ArgsFormatError(msg)
def _get_admin_token(self, path):
if os.path.exists(path):
TOKEN_LOCK.acquire()
try:
with open(path, 'r') as fd:
token = fd.read().strip()
except Exception:
raise TokenFileOpenError('token file open failed.')
finally:
TOKEN_LOCK.release()
else:
raise TokenNotFound('token file not found.')
return token
def _get_token(self):
_headers = {'Content-Type': 'application/json'}
admin_token = self._get_admin_token(self.token_path)
_headers['X-Admin-Token'] = admin_token
url = self.base_url + '/token'
method = 'POST'
response = requests.request(method, url, headers=_headers,
verify=self.verify)
if response.status_code == 503:
# service unavailable
raise ServiceUnavailable(response)
else:
try:
token = response.headers['X-Auth-Token']
except KeyError:
raise UnexpectedResponse(response)
return token
def _get_url_body_headers(self, api_name, *args, **kwargs):
headers = {}
headers['Content-Type'] = 'application/json'
count_params_in_path = DATABASE[api_name]['params_path']
func = DATABASE[api_name]['request']
url, body = func(count_params_in_path, *args, **kwargs)
if api_name in ['file_import']:
headers['Content-Type'] = 'application/octet-stream'
if count_params_in_path > 0:
url = url % tuple(args[0:count_params_in_path])
full_url = '%s%s' % (self.base_url, url)
return full_url, body, headers
def _process_rest_response(self, response):
content_type = response.headers.get('Content-Type')
if ('application/json' not in content_type) and (
'application/octet-stream' not in content_type):
# Currently, all the response content from zvmsdk wsgi are
# 'application/json' or application/octet-stream type.
# If it is not, the response may be sent by HTTP server due
# to internal server error or time out,
# it is an unexpected response to the rest client.
# If new content-type is added to the response by sdkwsgi, the
# parsing function here is also required to change.
raise UnexpectedResponse(response)
# Read body into string if it isn't obviously image data
if 'application/octet-stream' in content_type:
# Do not read all response in memory when downloading an file
body_iter = self._close_after_stream(response, CHUNKSIZE)
else:
body_iter = None
return response, body_iter
def api_request(self, url, method='GET', body=None, headers=None,
**kwargs):
_headers = {}
_headers.update(headers or {})
if body is not None and not isinstance(body, six.string_types):
try:
body = json.dumps(body)
except TypeError:
# if data is a file-like object
body = body
if self.auth == 'token' and self.token_path is not None:
_headers['X-Auth-Token'] = self._get_token()
content_type = headers['Content-Type']
stream = content_type == 'application/octet-stream'
if stream:
response = requests.request(method, url, data=body,
headers=_headers,
verify=self.verify,
stream=stream)
else:
response = requests.request(method, url, data=body,
headers=_headers,
verify=self.verify)
return response
def call(self, api_name, *args, **kwargs):
try:
# check validation of arguments
self._check_arguments(api_name, *args, **kwargs)
# get method by api_name
method = DATABASE[api_name]['method']
# get url,body with api_name and method
url, body, headers = self._get_url_body_headers(api_name,
*args, **kwargs)
response = self.api_request(url, method, body=body,
headers=headers)
# change response to SDK format
resp, body_iter = self._process_rest_response(response)
if api_name == 'file_export' and resp.status_code == 200:
# Save the file in an temporary path
return self._save_exported_file(body_iter)
results = json.loads(resp.content)
except TokenFileOpenError as err:
errmsg = REST_REQUEST_ERROR[1][4] % {'error': err.msg}
results = REST_REQUEST_ERROR[0]
results.update({'rs': 4, 'errmsg': errmsg, 'output': ''})
except TokenNotFound as err:
errmsg = REST_REQUEST_ERROR[1][2] % {'error': err.msg}
results = REST_REQUEST_ERROR[0]
results.update({'rs': 2, 'errmsg': errmsg, 'output': ''})
except UnexpectedResponse as err:
errmsg = REST_REQUEST_ERROR[1][3] % ({
'url': err.resp.url, 'status': err.resp.status_code,
'reason': err.resp.reason, 'text': err.resp.text})
results = REST_REQUEST_ERROR[0]
results.update({'rs': 3, 'errmsg': errmsg, 'output': ''})
except ServiceUnavailable as err:
errmsg = SERVICE_UNAVAILABLE_ERROR[1][2] % {
'reason': err.resp.reason, 'text': err.resp.text}
results = SERVICE_UNAVAILABLE_ERROR[0]
results.update({'rs': 2, 'errmsg': errmsg, 'output': ''})
except Exception as err:
errmsg = REST_REQUEST_ERROR[1][1] % {'error': six.text_type(err)}
results = REST_REQUEST_ERROR[0]
results.update({'rs': 1, 'errmsg': errmsg, 'output': ''})
return results
def _save_exported_file(self, body_iter):
fname = str(uuid.uuid1())
tempDir = tempfile.mkdtemp()
os.chmod(tempDir, 0o777)
target_file = '/'.join([tempDir, fname])
self._save_file(body_iter, target_file)
file_size = os.path.getsize(target_file)
output = {'filesize_in_bytes': file_size,
'dest_url': target_file}
results = {'overallRC': 0, 'modID': None, 'rc': 0,
'output': output, 'rs': 0, 'errmsg': ''}
return results
def _close_after_stream(self, response, chunk_size):
"""Iterate over the content and ensure the response is closed after."""
# Yield each chunk in the response body
for chunk in response.iter_content(chunk_size=chunk_size):
yield chunk
# Once we're done streaming the body, ensure everything is closed.
response.close()
def _save_file(self, data, path):
"""Save an file to the specified path.
:param data: binary data of the file
:param path: path to save the file to
"""
with open(path, 'wb') as tfile:
for chunk in data:
tfile.write(chunk) | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/zvmconnector/restclient.py | restclient.py |
import math
from smtLayer import msgs
fiveGigSize = (1024 * 5)
modId = 'GUT'
def cvtToBlocks(rh, diskSize):
"""
Convert a disk storage value to a number of blocks.
Input:
Request Handle
Size of disk in bytes
Output:
Results structure:
overallRC - Overall return code for the function:
0 - Everything went ok
4 - Input validation error
rc - Return code causing the return. Same as overallRC.
rs - Reason code causing the return.
errno - Errno value causing the return. Always zero.
Converted value in blocks
"""
rh.printSysLog("Enter generalUtils.cvtToBlocks")
blocks = 0
results = {'overallRC': 0, 'rc': 0, 'rs': 0, 'errno': 0}
blocks = diskSize.strip().upper()
lastChar = blocks[-1]
if lastChar == 'G' or lastChar == 'M':
# Convert the bytes to blocks
byteSize = blocks[:-1]
if byteSize == '':
# The size of the disk is not valid.
msg = msgs.msg['0200'][1] % (modId, blocks)
rh.printLn("ES", msg)
results = msgs.msg['0200'][0]
else:
try:
if lastChar == 'M':
blocks = (float(byteSize) * 1024 * 1024) / 512
elif lastChar == 'G':
blocks = (float(byteSize) * 1024 * 1024 * 1024) / 512
blocks = str(int(math.ceil(blocks)))
except Exception:
# Failed to convert to a number of blocks.
msg = msgs.msg['0201'][1] % (modId, byteSize)
rh.printLn("ES", msg)
results = msgs.msg['0201'][0]
elif blocks.strip('1234567890'):
# Size is not an integer size of blocks.
msg = msgs.msg['0202'][1] % (modId, blocks)
rh.printLn("ES", msg)
results = msgs.msg['0202'][0]
rh.printSysLog("Exit generalUtils.cvtToBlocks, rc: " +
str(results['overallRC']))
return results, blocks
def cvtToCyl(rh, diskSize):
"""
Convert a disk storage value to a number of cylinders.
Input:
Request Handle
Size of disk in bytes
Output:
Results structure:
overallRC - Overall return code for the function:
0 - Everything went ok
4 - Input validation error
rc - Return code causing the return. Same as overallRC.
rs - Reason code causing the return.
errno - Errno value causing the return. Always zero.
Converted value in cylinders
"""
rh.printSysLog("Enter generalUtils.cvtToCyl")
cyl = 0
results = {'overallRC': 0, 'rc': 0, 'rs': 0, 'errno': 0}
cyl = diskSize.strip().upper()
lastChar = cyl[-1]
if lastChar == 'G' or lastChar == 'M':
# Convert the bytes to cylinders
byteSize = cyl[:-1]
if byteSize == '':
# The size of the disk is not valid.
msg = msgs.msg['0200'][1] % (modId, lastChar)
rh.printLn("ES", msg)
results = msgs.msg['0200'][0]
else:
try:
if lastChar == 'M':
cyl = (float(byteSize) * 1024 * 1024) / 737280
elif lastChar == 'G':
cyl = (float(byteSize) * 1024 * 1024 * 1024) / 737280
cyl = str(int(math.ceil(cyl)))
except Exception:
# Failed to convert to a number of cylinders.
msg = msgs.msg['0203'][1] % (modId, byteSize)
rh.printLn("ES", msg)
results = msgs.msg['0203'][0]
elif cyl.strip('1234567890'):
# Size is not an integer value.
msg = msgs.msg['0204'][1] % (modId, cyl)
rh.printLn("ES", msg)
results = msgs.msg['0202'][0]
rh.printSysLog("Exit generalUtils.cvtToCyl, rc: " +
str(results['overallRC']))
return results, cyl
def cvtToMag(rh, size):
"""
Convert a size value to a number with a magnitude appended.
Input:
Request Handle
Size bytes
Output:
Converted value with a magnitude
"""
rh.printSysLog("Enter generalUtils.cvtToMag")
mSize = ''
size = size / (1024 * 1024)
if size > (1024 * 5):
# Size is greater than 5G. Using "G" magnitude.
size = size / 1024
mSize = "%.1fG" % size
else:
# Size is less than or equal 5G. Using "M" magnitude.
mSize = "%.1fM" % size
rh.printSysLog("Exit generalUtils.cvtToMag, magSize: " + mSize)
return mSize
def getSizeFromPage(rh, page):
"""
Convert a size value from page to a number with a magnitude appended.
Input:
Request Handle
Size in page
Output:
Converted value with a magnitude
"""
rh.printSysLog("Enter generalUtils.getSizeFromPage")
bSize = float(page) * 4096
mSize = cvtToMag(rh, bSize)
rh.printSysLog("Exit generalUtils.getSizeFromPage, magSize: " + mSize)
return mSize
def parseCmdline(rh, posOpsList, keyOpsList):
"""
Parse the request command input.
Input:
Request Handle
Positional Operands List. This is a dictionary that contains
an array for each subfunction. The array contains a entry
(itself an array) for each positional operand.
That array contains:
- Human readable name of the operand,
- Property in the parms dictionary to hold the value,
- Is it required (True) or optional (False),
- Type of data (1: int, 2: string).
Keyword Operands List. This is a dictionary that contains
an item for each subfunction. The value for the subfunction is a
dictionary that contains a key for each recognized operand.
The value associated with the key is an array that contains
the following:
- the related ReqHandle.parms item that stores the value,
- how many values follow the keyword, and
- the type of data for those values (1: int, 2: string)
Output:
Request Handle updated with parsed input.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter generalUtils.parseCmdline")
# Handle any positional operands on the line.
if rh.results['overallRC'] == 0 and rh.subfunction in posOpsList:
ops = posOpsList[rh.subfunction]
currOp = 0
# While we have operands on the command line AND
# we have more operands in the positional operand list.
while rh.argPos < rh.totalParms and currOp < len(ops):
key = ops[currOp][1] # key for rh.parms[]
opType = ops[currOp][3] # data type
if opType == 1:
# Handle an integer data type
try:
rh.parms[key] = int(rh.request[rh.argPos])
except ValueError:
# keyword is not an integer
msg = msgs.msg['0001'][1] % (modId, rh.function,
rh.subfunction, (currOp + 1),
ops[currOp][0], rh.request[rh.argPos])
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0001'][0])
break
else:
rh.parms[key] = rh.request[rh.argPos]
currOp += 1
rh.argPos += 1
if (rh.argPos >= rh.totalParms and currOp < len(ops) and
ops[currOp][2] is True):
# Check for missing required operands.
msg = msgs.msg['0002'][1] % (modId, rh.function,
rh.subfunction, ops[currOp][0], (currOp + 1))
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0002'][0])
# Handle any keyword operands on the line.
if rh.results['overallRC'] == 0 and rh.subfunction in keyOpsList:
while rh.argPos < rh.totalParms:
if rh.request[rh.argPos] in keyOpsList[rh.subfunction]:
keyword = rh.request[rh.argPos]
rh.argPos += 1
ops = keyOpsList[rh.subfunction]
if keyword in ops:
key = ops[keyword][0]
opCnt = ops[keyword][1]
opType = ops[keyword][2]
if opCnt == 0:
# Keyword has no additional value
rh.parms[key] = True
else:
# Keyword has values following it.
storeIntoArray = False # Assume single word
if opCnt < 0:
storeIntoArray = True
# Property is a list all of the rest of the parms.
opCnt = rh.totalParms - rh.argPos
if opCnt == 0:
# Need at least 1 operand value
opCnt = 1
elif opCnt > 1:
storeIntoArray = True
if opCnt + rh.argPos > rh.totalParms:
# keyword is missing its related value operand
msg = msgs.msg['0003'][1] % (modId, rh.function,
rh.subfunction, keyword)
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0003'][0])
break
"""
Add the expected value to the property.
Take into account if there are more than 1.
"""
if storeIntoArray:
# Initialize the list.
rh.parms[key] = []
for i in range(0, opCnt):
if opType == 1:
# convert from string to int and save it.
try:
if not storeIntoArray:
rh.parms[key] = (
int(rh.request[rh.argPos]))
else:
rh.parms[key].append(int(
rh.request[rh.argPos]))
except ValueError:
# keyword is not an integer
msg = (msgs.msg['0004'][1] %
(modId, rh.function, rh.subfunction,
keyword, rh.request[rh.argPos]))
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0004'][0])
break
else:
# Value is a string, save it.
if not storeIntoArray:
rh.parms[key] = rh.request[rh.argPos]
else:
rh.parms[key].append(rh.request[rh.argPos])
rh.argPos += 1
if rh.results['overallRC'] != 0:
# Upper loop had an error break from loops.
break
else:
# keyword is not in the subfunction's keyword list
msg = msgs.msg['0005'][1] % (modId, rh.function,
rh.subfunction, keyword)
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0005'][0])
break
else:
# Subfunction does not support keywords
msg = (msgs.msg['0006'][1] % (modId, rh.function,
rh.subfunction, rh.request[rh.argPos]))
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0006'][0])
break
rh.printSysLog("Exit generalUtils.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC'] | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/smtLayer/generalUtils.py | generalUtils.py |
import argparse
import datetime
import os
import re
import six
from six import string_types
import sys
import subprocess
from subprocess import CalledProcessError
from tempfile import NamedTemporaryFile
from smtLayer.smt import SMT
from smtLayer.ReqHandle import ReqHandle
version = '1.0.0' # Version of this script
longstring = '1' * 4096
"""
The following dictionary contains keys and values used as substitution
in the requests that are processed. Replaceable values are identified
in the requests by '<<<' and '>>>'. The key within the '<<<' and '>>>'
is in the subs dictionary.
"""
subs = {
'<<<safeID>>>': 'someid', # An existing userid that can be
# started and stopped
'<<<unsafePre>>>': 'STUS', # A prefix for a userid that gets
# created and destroyed. Tests
# add to the prefix to get an id.
'<<<horribleID1>>>': 'g[][325$$$', # A userid that makes SMAPI cry
# and beg for a swift death
'<<<migrID>>>': '', # An existing userid that can be
# migrated or empty to bypass tests.
'<<<unmigrID>>>': '', # An existing userid that cannot be
# migrated or empty to bypass tests.
'<<<migrDest>>>': 'zvmhost', # A z/VM host for migration into it
'<<<pw>>>': 'password', # password
'<<<vmSize>>>': '2G', # Virtual machine size
'<<<pool3390>>>': 'POOL1', # 3390 disk pool (keep this in
# uppercase for smtTest ease of use)
'<<<size3390>>>': '1100', # Size of a 3390 for system deploys
'<<<pool9336>>>': 'POOL4', # 9336 disk pool (keep this in
# uppercase for smtTest ease of use)
'<<<setupDisk>>>': '/opt/xcat/share/xcat/scripts/setupDisk', # SetupDisk
'<<<SimpleCfgFile>>>': '/install/zvm/POC/testImages/cfgdrive.tgz',
# Simple tar file for the config drive
'<<<simpleImage>>>': '/install/zvm/POC/testImages/' +
'rhel67eckd_small_1100cyl.img', # Small image file
'<<<unpackScript>>>': '/opt/zthin/bin/unpackdiskimage',
# Location of unpackdiskimage
'<<<longString>>>': longstring,
'<<<makevmWait>>>': '0', # Wait time for makeVM to fully
# complete
}
# Add a substitution key for the userid of this system.
cmd = ["sudo", "/sbin/vmcp", "query userid"]
try:
subs['<<<localUserid>>>'] = subprocess.check_output(
cmd,
close_fds=True,
stderr=subprocess.STDOUT)
subs['<<<localUserid>>>'] = bytes.decode(subs['<<<localUserid>>>'])
subs['<<<localUserid>>>'].split()[0]
except Exception:
print("Could not find the userid of this system.")
subs['<<<localUserid>>>'] = 'unknownUserid'
# Add a substitution key for the name of the aemod script that
# set the /etc/iucv_authorized_userid file to use our userid
# and create the script.
modFile = NamedTemporaryFile(delete=False)
subs['<<<aeModScript>>>'] = modFile.name
file = open(modFile.name, 'w')
file.write("#!/usr/bin/env bash\n")
file.write("echo -n $1 > /etc/iucv_authorized_userid\n")
file.close()
"""
A dictionary contains the elements needed to process a test.
This includes the following keys:
description - Discriptive information to show when running the test.
request - Request to be passed to SMT.
out - Input to grep to validate the output from a test.
Normally, this is a reqular expression. The regular
expression is input to grep which scans and validates the
output. If output is an empty string then the test
is assumed to have passed the output check.
overallRC - A single return code or a list of return codes to compare
against the overallRC property in the results.
If the test returns an overallRC value that matches one of
the specified values then it has passed the overallRC check.
rc - A single return code or a list of return codes.
If the test returns a return code that matches one of the
specified return codes then it has passed the
return code check.
rs - A single return code or a list of return codes.
If the test returns a return code that matches one of the
specified return codes then it has passed the
return code check.
Note: A test must pass all specified tests (e.g. output, rc, etc.)
in order for the test to pass.
"""
deployTests = [
{
'description': "Create a simple system: <<<unsafePre>>>1",
'request': "MakeVM <<<unsafePre>>>1 directory <<<pw>>> " +
"<<<vmSize>>> g --ipl 100 --profile OSDFLT",
'out': "",
'overallRC': [0],
},
{
'description': "Purge the reader",
'request': "ChangeVM <<<unsafePre>>>1 purgerdr",
'out': "",
'overallRC': [0],
},
{
'description': "Add a 3390 disk to <<<unsafePre>>>1 as 100",
'request': "ChangeVM <<<unsafePre>>>1 add3390 <<<pool3390>>> 100 " +
"<<<size3390>>>",
'out': "",
'overallRC': [0],
},
{
'description': "Check out the user entry",
'request': "GetVM <<<unsafePre>>>1 directory",
'out': "",
'overallRC': [0],
},
{
'description': "Unpack the image into the disk.",
'request': "SHELL_TEST <<<unpackScript>>> <<<unsafePre>>>1 100 " +
"<<<simpleImage>>>",
'out': "",
'overallRC': [0],
},
{
'description': "Punch the config drive tar file to the system.",
'request': "ChangeVM <<<unsafePre>>>1 punchfile " +
"<<<SimpleCfgFile>>> --class x",
'out': "",
'overallRC': [0],
},
{
'description': "Send an aemod to allow IUCV access by this system.",
'request': "ChangeVM <<<unsafePre>>>1 aemod <<<aeModScript>>> " +
"--invparms <<<localUserid>>>",
'out': "",
'overallRC': [0],
},
{
'description': "Power on the system and wait for to OS to come up.",
'request': "PowerVM <<<unsafePre>>>1 on --wait --state up",
'out': "",
'overallRC': [0],
},
{
'description': "Send a commmand to a system.",
'request': "CmdVM <<<unsafePre>>>1 cmd pwd",
'out': "",
'overallRC': [0],
},
{
'description': "Delete a system: <<<unsafePre>>>1",
'request': "DeleteVM <<<unsafePre>>>1 directory",
'out': "",
'overallRC': [0],
},
]
generalTests = [
{
'description': "Test Help Function",
'request': "help",
'overallRC': [0],
},
{
'description': "Test no operands => error",
'request': "", # Request with no parms
'overallRC': [4],
'rc': [4],
'rs': [9],
},
{
'description': "Test Version",
'request': "version",
'out': "^Version:",
'overallRC': [0],
},
{
'description': "Test unrecognized operands",
'request': "Steve is great",
'overallRC': [4],
'rc': [4],
'rs': [7],
},
]
guestTests = [
{
'description': "Power on a system: <<<safeID>>>",
'request': "PowerVM <<<safeID>>> on",
'out': "",
'overallRC': [0],
},
{
'description': "Get the status of the system: <<<safeID>>>",
'request': "getvm <<<safeID>>> status --all",
'out': "CPU Used Time:",
'overallRC': [0],
},
{
'description': "Get the power status of the system: <<<safeID>>>",
'request': "getvm <<<safeID>>> status --power",
'out': "Power state: on",
'overallRC': [0],
},
{
'description': "Get the memory status of the system: <<<safeID>>>",
'request': "getvm <<<safeID>>> status --memory",
'out': "Total Memory:",
'overallRC': [0],
},
{
'description': "Get the cpu status of the system: <<<safeID>>>",
'request': "getvm <<<safeID>>> status --cpu",
'out': "Processors:",
'overallRC': [0],
},
{
'description': "Power off the system: <<<safeID>>>",
'request': "PowerVM <<<safeID>>> off",
'out': "",
'overallRC': [0],
},
{
'description': "Get the status of the system: <<<safeID>>>",
'request': "getvm <<<safeID>>> status",
'out': "CPU Used Time: 0 sec",
'overallRC': [0],
},
{
'description': "Get the power status of the system: <<<safeID>>>",
'request': "getvm <<<safeID>>> status --power",
'out': "Power state: off",
'overallRC': [0],
},
{
'description': "Get the memory status of the system: <<<safeID>>>",
'request': "getvm <<<safeID>>> status --memory",
'out': "Total Memory: 0M",
'overallRC': [0],
},
{
'description': "Get the cpu status of the system: <<<safeID>>>",
'request': "getvm <<<safeID>>> status --cpu",
'out': "Processors: 0",
'overallRC': [0],
},
]
hostTests = [
{
'description': "Get the list of disk pools.",
'request': "GetHost diskpoolnames",
'overallRC': [0],
},
{
'description': "Get the space for all disk pools.",
'request': "GetHost diskpoolspace",
'out': "Total",
'overallRC': [0],
},
{
'description': "Get the space for a specific 3390 disk pool: " +
"<<<pool3390>>>",
'request': "GetHost diskpoolspace <<<pool3390>>>",
'out': "^<<<pool3390>>> Total",
'overallRC': [0],
},
{
'description': "Get the space for a specific 9336 disk pool: " +
"<<<pool9336>>>",
'request': "GetHost diskpoolspace <<<pool9336>>>",
'out': "^<<<pool9336>>> Total",
'overallRC': [0],
},
{
'description': "Get the FCP Device information.",
'request': "GetHost fcpdevices",
'out': "^FCP device number",
'overallRC': [0],
},
{
'description': "Get the general information.",
'request': "GetHost general",
'out': "",
'overallRC': [0],
},
]
iucvTests = [
{
'description': "Power on a system: <<<safeID>>>",
'request': "PowerVM <<<safeID>>> on --wait --state up",
'out': "",
'overallRC': [0],
},
{
'description': "Send a commmand to a system.",
'request': "CmdVM <<<safeID>>> cmd pwd",
'out': "",
'overallRC': [0],
},
{
'description': "Send an failing commmand to a system.",
'request': "CmdVM <<<safeID>>> cmd \"echo 'bob'|grep /john/\"",
'out': "",
'overallRC': [2],
'rc': [8],
'rs': [1],
},
{
'description': "Send an unknown commmand to a system.",
'request': "CmdVM <<<safeID>>> cmd SteveIsGreat",
'out': "",
'overallRC': [2],
'rc': [8],
'rs': [127],
},
]
lifecycleTests = [
{
'description': "Create a simple system: <<<unsafePre>>>2",
'request': "makevm <<<unsafePre>>>2 directory smapi 2g g",
'out': "",
'overallRC': [0],
},
{
'description': "Verify system exists: <<<unsafePre>>>2",
'request': "smapi <<<unsafePre>>>2 api Image_Query_DM",
'out': "",
'overallRC': [0],
},
{
'description': "Delete a system: <<<unsafePre>>>2",
'request': "deletevm <<<unsafePre>>>2 directory",
'out': "",
'overallRC': [0],
},
# We used to verify that system no longer exists but dirmaint was slower
# and test case sometimes fails.
]
migrateTests = [
{
'description': "Get status for a specific userid that " +
"cannot be migrated: <<<unmigrID>>>",
'doIf': "'<<<unmigrID>>>' != ''",
'request': "migrateVM <<<unmigrID>>> status",
'overallRC': [99],
'rc': [99],
'rs': [419],
},
{
'description': "Get all migration status for a host with " +
"no active migrations.",
'doIf': "'<<<unmigrID>>>' != ''",
'request': "migrateVM <<<unmigrID>>> status --all",
'overallRC': [99],
'rc': [99],
'rs': [419],
},
{
'description': ("Get incoming migration status for a host " +
"with no active migrations."),
'doIf': "'<<<unmigrID>>>' != ''",
'request': "migrateVM <<<unmigrID>>> status --incoming",
'overallRC': [99],
'rc': [99],
'rs': [419],
},
{
'description': "Get outgoing migration status for a host " +
"with no active migrations.",
'doIf': "'<<<unmigrID>>>' != ''",
'request': "migrateVM <<<unmigrID>>> status --outgoing",
'overallRC': [99],
'rc': [99],
'rs': [419],
},
{
'description': "Test a system for migration: <<<unmigrID>>>",
'doIf': "'<<<unmigrID>>>' != ''",
'request': "migrateVM <<<unmigrID>>> test --destination " +
"<<<migrDest>>>",
'overallRC': [99],
'rc': [99],
'rs': [418],
},
{
'description': "Cancel a migration",
'doIf': "'<<<migrID>>>' != ''",
'request': "migrateVM <<<migrID>>> cancel",
'overallRC': [99],
'rc': [99],
'rs': [419],
},
]
modifyTests = [
# >>>>>>>>> Create a simple system for logged off tests.
{
'description': "Create a simple system: <<<unsafePre>>>3",
'request': "MakeVM <<<unsafePre>>>3 directory <<<pw>>> " +
"<<<vmSize>>> g --ipl 100 --profile OSDFLT",
'out': "",
'overallRC': [0],
},
{
'description': "Verify no console log is available: <<<unsafePre>>>3",
'request': "getvm <<<unsafePre>>>3 consoleoutput",
'out': "",
'overallRC': [8],
'rc': [8],
'rs': [8]
},
{
'description': "Wait <<<makevmWait>>> seconds for source " +
"directory to be updated.",
'request': "SHELL echo 'Sleeping for <<<makevmWait>>> seconds " +
"to allow source directory update to complete';sleep " +
"<<<makevmWait>>>",
'out': "",
'overallRC': [0],
},
{
'description': "Add modifications to the activation engine",
'request': 'ChangeVM <<<unsafePre>>>3 aemod <<<setupDisk>>> ' +
'--invparms "action=addMdisk vaddr=101 filesys=ext4 ' +
'mntdir=/mnt/ephemeral/0.0.0101"',
'out': "",
'overallRC': [0],
},
{
'description': "Add unknown script mods to the activation engine",
'request': 'ChangeVM <<<unsafePre>>>3 aemod BAD ' +
'--invparms "action=addMdisk vaddr=101 filesys=ext4 ' +
'mntdir=/mnt/ephemeral/0.0.0101"',
'out': "",
'overallRC': [4],
'rc': [4],
'rs': [400],
},
{
'description': "Add modifications to activation engine for bad id",
'request': 'ChangeVM BADID aemod <<<setupDisk>>> ' +
'--invparms "action=addMdisk vaddr=101 filesys=ext4 ' +
'mntdir=/mnt/ephemeral/0.0.0101"',
'out': "",
'overallRC': [4],
},
{
'description': "Purge the reader: <<<unsafePre>>>3",
'request': "ChangeVM <<<unsafePre>>>3 purgerdr",
'out': "",
'overallRC': [0],
},
{
'description': "Add a 3390 disk to the system with ext4: " +
"<<<unsafePre>>>3",
'request': "changevm <<<unsafePre>>>3 add3390 <<<pool3390>>> " +
"101 100m --mode w --filesystem ext4 " +
"--readpw readpw --writepw writepw --multipw multipw",
'out': "",
'overallRC': [0],
},
{
'description': "Remove the 3390 disk with ext4: <<<unsafePre>>>3",
'request': "changevm <<<unsafePre>>>3 removedisk 101",
'out': "",
'overallRC': [0],
},
{
'description': "Add a 3390 disk to the system with xfs: " +
"<<<unsafePre>>>3",
'request': "changevm <<<unsafePre>>>3 add3390 <<<pool3390>>> " +
"102 100m --mode w --filesystem xfs",
'out': "",
'overallRC': [0],
},
{
'description': "Remove the 3390 disk with xfs: <<<unsafePre>>>3",
'request': "changevm <<<unsafePre>>>3 removedisk 102",
'out': "",
'overallRC': [0],
},
{
'description': "Add a 3390 disk to the system with swap: " +
"<<<unsafePre>>>3",
'request': "changevm <<<unsafePre>>>3 add3390 <<<pool3390>>> " +
"103 100m --mode w --filesystem swap",
'out': "",
'overallRC': [0],
},
{
'description': "Remove the 3390 disk with swap: <<<unsafePre>>>3",
'request': "changevm <<<unsafePre>>>3 removedisk 103",
'out': "",
'overallRC': [0],
},
{
'description': "Remove a disk that does not exist: <<<unsafePre>>>3",
'request': "changevm <<<unsafePre>>>3 removedisk 104",
'out': "",
'overallRC': [0],
},
{
'description': "Add a 9336 disk to the system with ext4.",
'doIf': "'<<<pool9336>>>' != ''",
'request': "changevm <<<unsafePre>>>3 add9336 <<<pool9336>>> " +
"120 100m --mode w --filesystem ext4 " +
"--readpw readpw --writepw writepw --multipw multipw",
'out': "",
'overallRC': [0],
},
{
'description': "Remove the 9336 disk with ext4.",
'doIf': "'<<<pool9336>>>' != ''",
'request': "changevm <<<unsafePre>>>3 removedisk 120",
'out': "",
'overallRC': [0],
},
{
'description': "Add a 9336 disk to the system with xfs.",
'doIf': "'<<<pool9336>>>' != ''",
'request': "changevm <<<unsafePre>>>3 add9336 <<<pool9336>>> " +
"121 100m --mode w --filesystem xfs",
'out': "",
'overallRC': [0],
},
{
'description': "Remove the 9336 disk with xfs.",
'doIf': "'<<<pool9336>>>' != ''",
'request': "changevm <<<unsafePre>>>3 removedisk 121",
'out': "",
'overallRC': [0],
},
{
'description': "Add a 9336 disk to the system with swap.",
'doIf': "'<<<pool9336>>>' != ''",
'request': "changevm <<<unsafePre>>>3 add9336 <<<pool9336>>> " +
"122 100m --mode w --filesystem swap",
'out': "",
'overallRC': [0],
},
{
'description': "Remove the 9336 disk with swap.",
'doIf': "'<<<pool9336>>>' != ''",
'request': "changevm <<<unsafePre>>>3 removedisk 122",
'out': "",
'overallRC': [0],
},
# >>>>>>>>> Deploy an image for active system tests.
{
'description': "Add a 3390 disk for the root disk: <<<unsafePre>>>3",
'request': "ChangeVM <<<unsafePre>>>3 add3390 <<<pool3390>>> 100 " +
"<<<size3390>>>",
'out': "",
'overallRC': [0],
},
{
'description': "Unpack the image into the disk: <<<unsafePre>>>3",
'request': "SHELL_TEST <<<unpackScript>>> <<<unsafePre>>>3 100 " +
"<<<simpleImage>>>",
'out': "",
'overallRC': [0],
},
{
'description': "Punch the config drive tar file to the system: " +
"<<<unsafePre>>>3",
'request': "ChangeVM <<<unsafePre>>>3 punchfile " +
"<<<SimpleCfgFile>>> --class x",
'out': "",
'overallRC': [0],
},
{
'description': "Send an aemod to allow IUCV access by this system.",
'request': "ChangeVM <<<unsafePre>>>3 aemod <<<aeModScript>>> " +
"--invparms <<<localUserid>>>",
'out': "",
'overallRC': [0],
},
{
'description': "Power on the system and wait for to OS to " +
"come up: <<<unsafePre>>>3",
'request': "PowerVM <<<unsafePre>>>3 on --wait --state up",
'out': "",
'overallRC': [0],
},
# >>>>>>>>> Tests that are related to active systems.
{
'description': "Start console spooling on the system: " +
"<<<unsafePre>>>3",
'request': "CmdVM <<<unsafePre>>>3 cmd 'vmcp spool console " +
"to <<<unsafePre>>>3 start'",
'overallRC': [0],
},
{
'description': "Enable tracing so we put stuff to the " +
"console of <<<unsafePre>>>3",
'request': "CmdVM <<<unsafePre>>>3 cmd 'vmcp trace diag run'",
'overallRC': [0],
},
{
'description': "Force more to the console of " +
"<<<unsafePre>>>3",
'request': "CmdVM <<<unsafePre>>>3 cmd 'vmcp query userid'",
'overallRC': [0],
},
{
'description': "Get the console log of the system: <<<unsafePre>>>3",
'request': "getvm <<<unsafePre>>>3 consoleoutput",
'out': "List of spool files containing console logs " +
"from <<<unsafePre>>>3:",
'overallRC': [0],
},
{
'description': "Add a 3390 disk to the system with ext4: " +
"<<<unsafePre>>>3",
'request': "changevm <<<unsafePre>>>3 add3390 <<<pool3390>>> " +
"110 100m --mode w --filesystem ext4",
'out': "",
'overallRC': [0],
},
{
'description': "Online the 110 ECKD disk with ext4: " +
"<<<unsafePre>>>3",
'request': "CmdVM <<<unsafePre>>>3 cmd '/sbin/cio_ignore -r 110; " +
"which udevadm &> /dev/null && udevadm settle || udevsettle ;" +
"/sbin/chccwdev -e 110 2>&1'",
'out': "",
'overallRC': [0],
},
{
'description': "Remove the 3390 disk with ext4: <<<unsafePre>>>3 110",
'request': "changevm <<<unsafePre>>>3 removedisk 110",
'out': "",
'overallRC': [0],
},
{
'description': "Add a 3390 disk to the system with xfs: " +
"<<<unsafePre>>>3",
'request': "changevm <<<unsafePre>>>3 add3390 <<<pool3390>>> " +
"111 100m --mode w --filesystem xfs",
'out': "",
'overallRC': [0],
},
# Don't online the disk. This makes the chccwdev fail but the
# failure should be ignored.
{
'description': "Remove the 3390 disk with xfs: " +
"<<<unsafePre>>>3 111",
'request': "changevm <<<unsafePre>>>3 removedisk 111",
'out': "",
'overallRC': [0],
},
{
'description': "Add a 3390 disk to the system with swap: " +
"<<<unsafePre>>>3 112",
'request': "changevm <<<unsafePre>>>3 add3390 <<<pool3390>>> " +
"112 100m --mode w --filesystem swap",
'out': "",
'overallRC': [0],
},
{
'description': "Online the 112 ECKD disk with swap: " +
"<<<unsafePre>>>3",
'request': "CmdVM <<<unsafePre>>>3 cmd '/sbin/cio_ignore -r 112; " +
"which udevadm &> /dev/null && udevadm settle || udevsettle ;" +
"/sbin/chccwdev -e 112 2>&1'",
'out': "",
'overallRC': [0],
},
{
'description': "Remove the 3390 disk with swap: " +
"<<<unsafePre>>>3 112",
'request': "changevm <<<unsafePre>>>3 removedisk 112",
'out': "",
'overallRC': [0],
},
{
'description': "Add a 9336 disk to an active system with ext4.",
'doIf': "'<<<pool9336>>>' != ''",
'request': "changevm <<<unsafePre>>>3 add9336 <<<pool9336>>> " +
"130 100m --mode w --filesystem ext4 " +
"--readpw readpw --writepw writepw --multipw multipw",
'out': "",
'overallRC': [0],
},
{
'description': "Check out the user entry",
'request': "GetVM <<<unsafePre>>>3 directory",
'out': "",
'overallRC': [0],
},
{
'description': "Online the 130 FBA disk with swap: " +
"<<<unsafePre>>>3",
'request': "CmdVM <<<unsafePre>>>3 cmd '/sbin/cio_ignore -r 130; " +
"which udevadm &> /dev/null && udevadm settle || udevsettle ;" +
"/sbin/chccwdev -e 130 2>&1'",
'out': "",
'overallRC': [0],
},
{
'description': "Remove the 9336 disk with ext4.",
'doIf': "'<<<pool9336>>>' != ''",
'request': "changevm <<<unsafePre>>>3 removedisk 130",
'out': "",
'overallRC': [0],
},
{
'description': "Add a 9336 disk to an active system with xfs.",
'doIf': "'<<<pool9336>>>' != ''",
'request': "changevm <<<unsafePre>>>3 add9336 <<<pool9336>>> " +
"131 100m --mode w --filesystem xfs",
'out': "",
'overallRC': [0],
},
{
'description': "Online the 131 FBA disk with swap: " +
"<<<unsafePre>>>3",
'request': "CmdVM <<<unsafePre>>>3 cmd '/sbin/cio_ignore -r 131; " +
"which udevadm &> /dev/null && udevadm settle || udevsettle ;" +
"/sbin/chccwdev -e 131 2>&1'",
'out': "",
'overallRC': [0],
},
{
'description': "Remove the 9336 disk with xfs.",
'doIf': "'<<<pool9336>>>' != ''",
'request': "changevm <<<unsafePre>>>3 removedisk 131",
'out': "",
'overallRC': [0],
},
{
'description': "Add a 9336 disk to an active system with swap.",
'doIf': "'<<<pool9336>>>' != ''",
'request': "changevm <<<unsafePre>>>3 add9336 <<<pool9336>>> " +
"132 100m --mode w --filesystem swap",
'out': "",
'overallRC': [0],
},
{
'description': "Online the 132 FBA disk with swap: " +
"<<<unsafePre>>>3",
'request': "CmdVM <<<unsafePre>>>3 cmd '/sbin/cio_ignore -r 132; " +
"which udevadm &> /dev/null && udevadm settle || udevsettle ;" +
"/sbin/chccwdev -e 132 2>&1'",
'out': "",
'overallRC': [0],
},
{
'description': "Remove the 9336 disk with swap.",
'doIf': "'<<<pool9336>>>' != ''",
'request': "changevm <<<unsafePre>>>3 removedisk 132",
'out': "",
'overallRC': [0],
},
{
'description': "Add/change an IPL statement",
'request': "changevm <<<unsafePre>>>3 ipl 100",
'out': "",
'overallRC': [0],
},
{
'description': "Add/change an IPL statement with loadparms",
'request': "changevm <<<unsafePre>>>3 ipl 100 --loadparms cl",
'out': "",
'overallRC': [0],
},
{
'description': "Add/change an IPL statement with loadparms",
'request': "changevm <<<unsafePre>>>3 ipl 100 --loadparms lots",
'out': "",
'overallRC': [0],
},
{
'description': "Add/change an IPL statement with parms",
'request': "changevm <<<unsafePre>>>3 ipl cms --parms autocr",
'out': "",
'overallRC': [0],
},
{
'description': "Verify IPL statement exists.",
'request': "smapi <<<unsafePre>>>3 api Image_Query_DM",
'out': "IPL CMS PARM AUTOCR",
'overallRC': [0],
},
{
'description': "Remove an IPL statement",
'request': "changevm <<<unsafePre>>>3 removeipl",
'out': "",
'overallRC': [0],
},
{
'description': "Add some loaddev statements",
'request': "changevm <<<unsafePre>>>3 loaddev --boot 0 " +
"--addr 123411 --lun 12345678 --wwpn " +
"5005076800aa0001 --scpDataType hex "
"--scpData 1212",
'out': "",
'overallRC': [0],
},
{
'description': "No datatype loaddev statements",
'request': "changevm <<<unsafePre>>>3 loaddev --boot 0 " +
"--addr 123411 --lun 12345678 --wwpn " +
"5005076800aa0001 --scpData 1212",
'out': "",
'overallRC': [4],
'rc': [4],
'rs': [14],
},
{
'description': "No data loaddev statements",
'request': "changevm <<<unsafePre>>>3 loaddev --boot 0 " +
"--addr 123411 --lun 12345678 --wwpn " +
"5005076800aa0001 --scpDataType hex",
'out': "",
'overallRC': [4],
'rc': [4],
'rs': [14],
},
{
'description': "Bad datatype loaddev statements",
'request': "changevm <<<unsafePre>>>3 loaddev --boot 0 " +
"--addr 123411 --lun 12345678 --wwpn " +
"5005076800aa0001 --scpDataType BAD --scpData 1212",
'out': "",
'overallRC': [4],
'rc': [4],
'rs': [16],
},
{
'description': "Really long scp data",
'request': "changevm <<<unsafePre>>>3 loaddev --boot 0 " +
"--addr 123411 --lun 12345678 --wwpn " +
"5005076800aa0001 --scpDataType hex " +
"--scpData <<<longString>>>",
'out': "",
'overallRC': [0],
},
{
'description': "No boot parm (keep old boot)",
'request': "changevm <<<unsafePre>>>3 loaddev --addr 123411 " +
"--lun 12345678 --wwpn 5005076800aa0001 " +
"--scpDataType hex --scpData 1212",
'out': "",
'overallRC': [0],
},
{
'description': "No addr parm (keep old block address)",
'request': "changevm <<<unsafePre>>>3 loaddev --lun " +
"12345678 --wwpn 5005076800aa0001 " +
"--scpDataType hex --scpData 1212",
'out': "",
'overallRC': [0],
},
{
'description': "No lun parm (keep old lun)",
'request': "changevm <<<unsafePre>>>3 loaddev --wwpn " +
"5005076800aa0001 --scpDataType hex --scpData 1212",
'out': "",
'overallRC': [0],
},
{
'description': "No wwpn parm (keep old wwpn)",
'request': "changevm <<<unsafePre>>>3 loaddev --scpDataType " +
"hex --scpData 1212",
'out': "",
'overallRC': [0],
},
{
'description': "No parms (keep old parms)",
'request': "changevm <<<unsafePre>>>3 loaddev",
'out': "",
'overallRC': [0],
},
{
'description': "Verify loaddev boot statements exist",
'request': "smapi <<<unsafePre>>>3 api Image_Query_DM",
'out': "LOADDEV BOOTPROG 0",
'overallRC': [0],
},
{
'description': "Verify loaddev addr statements exist",
'request': "smapi <<<unsafePre>>>3 api Image_Query_DM",
'out': "LOADDEV BR_LBA 0000000000123411",
'overallRC': [0],
},
{
'description': "Verify loaddev lun statements exist",
'request': "smapi <<<unsafePre>>>3 api Image_Query_DM",
'out': "LOADDEV LUN 0000000012345678",
'overallRC': [0],
},
{
'description': "Verify loaddev wwpn statements exist.",
'request': "smapi <<<unsafePre>>>3 api Image_Query_DM",
'out': "LOADDEV PORTNAME 5005076800AA0001",
'overallRC': [0],
},
{
'description': "Verify loaddev wwpn statements exist",
'request': "smapi <<<unsafePre>>>3 api Image_Query_DM",
'out': "LOADDEV SCPDATA HEX",
'overallRC': [0],
},
{
'description': "Delete statements",
'request': "changevm <<<unsafePre>>>3 loaddev --boot DELETE " +
"--addr DELETE --lun DELETE --wwpn DELETE " +
"--scpDataType DELETE",
'out': "",
'overallRC': [0],
},
{
'description': "Verify loaddev statements are gone",
'request': "SMAPI <<<unsafePre>>>3 API " +
"Image_SCSI_Characteristics_Query_DM",
'out': "",
'overallRC': [8],
'rc': [0],
'rs': [28],
},
{
'description': "Successfully purge the reader: <<<unsafePre>>>3",
'request': "changeVM <<<unsafePre>>>3 purgeRDR ",
'overallRC': [0],
},
{
'description': "Try to purge read of a bad id: <<<horribleID1>>>",
'request': "changeVM <<<horribleID1>>> purgeRDR ",
'out': "Syntax error in function parameter 8",
'overallRC': [8],
'rc': [24],
'rs': [813]
},
{
'description': "Punch the config drive tar file to the system.",
'request': "ChangeVM <<<unsafePre>>>3 punchfile <<<SimpleCfgFile>>>",
'out': "",
'overallRC': [0],
},
{
'description': "Punch the config drive tar file to the system" +
" with valid spool class.",
'request': "ChangeVM <<<unsafePre>>>3 punchfile <<<SimpleCfgFile>>>" +
" --class b",
'out': "",
'overallRC': [0],
},
{
'description': "Punch the config drive tar file to the system" +
" with an invalid userid and file.",
'request': "ChangeVM <<<horribleID1>>> punchfile invalid.config",
'out': "",
'overallRC': [4],
'rc': [7],
'rs': [401],
},
{
'description': "Punch the config drive tar file to the system" +
" with an invalid userid and spool class.",
'request': "ChangeVM <<<unsafePre>>>3 punchfile invalid.config" +
" --class b*",
'out': "",
'overallRC': [4],
'rc': [7],
'rs': [401],
},
{
'description': "Punch the config drive tar file to the system" +
" with an invalid userid.",
'request': "ChangeVM <<<horribleID1>>> punchfile <<<SimpleCfgFile>>>" +
" --class b",
'out': "",
'overallRC': [4],
'rc': [4],
'rs': [424],
},
{
'description': "Punch the config drive tar file to the system" +
" with an invalid class.",
'request': "ChangeVM <<<unsafePre>>>3 punchfile <<<SimpleCfgFile>>>" +
" --class b*",
'out': "",
'overallRC': [4],
'rc': [8],
'rs': [404],
},
{
'description': "Punch the config drive tar file to the system" +
" with an invalid file.",
'request': "ChangeVM <<<unsafePre>>>3 punchfile invalid.config",
'out': "",
'overallRC': [4],
'rc': [7],
'rs': [401],
},
# >>>>>>>>> Clean up by destroying the system.
{
'description': "Delete the system: <<<unsafePre>>>3",
'request': "deletevm <<<unsafePre>>>3 directory",
'out': "",
'overallRC': [0],
},
{
'description': "Clean up an reader files for <<<unsafePre>>>3.",
'request': "CODE_SEG purgeRdr('<<<unsafePre>>>3')",
'overallRC': [0],
},
]
powerTests = [
{
'description': "Test PowerVM VERSION.",
'request': "PowerVM version",
'out': "^Version:",
'overallRC': [0],
},
{
'description': "'PowerVM xxx JUNK' fails",
'request': "PowerVM xxx junk",
'out': "",
'overallRC': [4],
},
{
'description': "Power off a system: <<<safeID>>>",
'request': "PowerVM <<<safeID>>> off --wait",
'out': "",
'overallRC': [0],
},
{
'description': "Check status of powered off system.",
'request': "PowerVM <<<safeID>>> status",
'out': "<<<safeID>>>: off",
'overallRC': [0],
'rc': [0],
'rs': [1]
},
{
'description': "Check isreachable of powered off system.",
'request': "PowerVM <<<safeID>>> isreachable",
'out': "<<<safeID>>>: unreachable",
'overallRC': [0],
'rs': [0]
},
{
'description': "Power off an already powered off system.",
'request': "PowerVM <<<safeID>>> off",
'out': "",
'overallRC': [0],
},
{
'description': "Power on a system: <<<safeID>>>",
'request': "PowerVM <<<safeID>>> on",
'out': "",
'overallRC': [0],
},
{
'description': "Power off a system with softOff option: " +
"<<<safeID>>>",
'request': "PowerVM <<<safeID>>> softoff",
'out': "",
'overallRC': [0],
},
{
'description': "Power on a system: <<<safeID>>>",
'request': "PowerVM <<<safeID>>> on",
'out': "",
'overallRC': [0],
},
{
'description': "Power on a system that is on but not up: " +
"<<<safeID>>>",
'request': "PowerVM <<<safeID>>> on --wait --state up",
'out': "<<<safeID>>>: up",
'overallRC': [0],
},
{
'description': "Check status of powered on system: <<<safeID>>>",
'request': "PowerVM <<<safeID>>> status",
'out': "<<<safeID>>>: on",
'overallRC': [0],
'rc': [0],
'rs': [0]
},
{
'description': "Check isreachable of powered on system: " +
"<<<safeID>>>",
'request': "PowerVM <<<safeID>>> isreachable",
'out': "<<<safeID>>>: reachable",
'overallRC': [0],
'rs': [1]
},
{
'description': "Pause a system: <<<safeID>>>",
'request': "PowerVM <<<safeID>>> pause",
'out': "",
'overallRC': [0],
},
{
'description': "Isreachable of a paused system is unreachable: " +
"<<<safeID>>>",
'request': "PowerVM <<<safeID>>> isreachable",
'out': "<<<safeID>>>: unreachable",
'overallRC': [0],
'rs': [0]
},
{
'description': "Unpause a system: <<<safeID>>>",
'request': "PowerVM <<<safeID>>> unpause",
'out': "",
'overallRC': [0],
},
{
'description': "Isreachable of an unpaused system is reachable: " +
"<<<safeID>>>",
'request': "PowerVM <<<safeID>>> isreachable",
'out': "<<<safeID>>>: reachable",
'overallRC': [0],
'rs': [1]
},
{
'description': "Reset a system: <<<safeID>>>",
'request': "PowerVM <<<safeID>>> reset --wait --state up",
'out': "",
'overallRC': [0],
},
{
'description': "Isreachable of an unpaused system is reachable: " +
"<<<safeID>>>",
'request': "PowerVM <<<safeID>>> isreachable",
'out': "<<<safeID>>>: reachable",
'overallRC': [0],
'rs': [1]
},
{
'description': "Reboot a system: <<<safeID>>>",
'request': "PowerVM <<<safeID>>> reboot --wait",
'out': "",
'overallRC': [0],
},
{
'description': "Reboot a system w/o waiting for the OS to be up: " +
"<<<safeID>>>",
'request': "PowerVM <<<safeID>>> reboot",
'out': "",
'overallRC': [0],
},
{
'description': "Wait for the OS to come up: <<<safeID>>>",
'request': "PowerVM <<<safeID>>> wait --state up",
'out': "<<<safeID>>>: up",
'overallRC': [0],
'rs': [0]
},
]
smapiTests = [
{
'description': "Directory related query w/o operands.",
'request': "smapi <<<safeID>>> api Image_Query_DM",
'out': "",
'overallRC': [0],
},
{
'description': "Disk pool query with operands.",
'request': "smapi <<<safeID>>> api Image_Volume_Space_Query_DM " +
"--operands '-q' 1 '-e' 1",
'out': "",
'overallRC': [0],
},
{
'description': "Failing disk pool query with operands.",
'request': "smapi <<<safeID>>> api Image_Volume_Space_Query_DM " +
"--operands '-q' 4 '-e' 1",
'out': "",
'overallRC': [8],
'rc': [24],
'rs': [1018],
},
]
testSets = {
'DEPLOY': {
'description': 'ECKD deploy image tests',
'doIf': "'<<<pool3390>>>' != ''",
'tests': deployTests},
'GENERAL': {
'description': 'Tests that are not specific to a ' +
'particular function.',
'tests': generalTests},
'GUEST': {
'description': 'Guest tests that are not covered under ' +
'other functions.',
'tests': guestTests},
'HOST': {
'description': 'Host related tests',
'tests': hostTests},
'IUCV': {
'description': 'Send commands to VM over IUCV',
'tests': iucvTests},
'LIFECYCLE': {
'description': 'VM Life Cycle related tests',
'tests': lifecycleTests},
'MIGRATE': {
'description': 'VM Migration related tests',
'tests': migrateTests},
'MODIFY': {
'description': 'Modify a VM',
'tests': modifyTests},
'POWER': {
'description': 'VM Power function tests',
'tests': powerTests},
'SMAPI': {
'description': 'SMAP API invocation tests',
'tests': smapiTests},
}
def localize(localFile, subs, testSets):
"""
Perform localization of substitution variables and test sets.
This allows the invoker to extend or modify defined tests
without modifying this file.
Input:
Name of local tailorization file (without .py)
e.g. smtTestLocal for smtTestLocal.py file.
Substitution dictionary to be updated.
Test set dictionary to be updated.
Output:
None
Note:
- Upon exit the substitution and test set dictionary
have been updated with the data from the localization
file.
"""
try:
smtTestLocal = __import__(localFile, fromlist=["*"])
except Exception as e:
print(e)
return 1
# Apply local overrides to the subs dictionary.
if len(smtTestLocal.localSubs) > 0:
print("Localizing localSubs dictionary.")
for key in smtTestLocal.localSubs:
print("Localizing " + key + ": " + smtTestLocal.localSubs[key])
subs[key] = smtTestLocal.localSubs[key]
else:
print("No local overrides exist for the subs dictionary.")
# Apply local overrides to the testSets dictionary.
if len(smtTestLocal.localTestSets) > 0:
print("Localizing the test sets.")
if 'clear:testSets' in smtTestLocal.localTestSets:
print("Removing all original test sets.")
testSets.clear()
for key in smtTestLocal.localTestSets:
if key == 'clear:testSets':
continue
print("Localizing test set: " + key)
testSets[key] = smtTestLocal.localTestSets[key]
else:
print("No local test sets exist.")
return 0
def purgeRdr(userid):
"""
Purge contents in this system's reader from a userid.
Input:
userid that originated the files we want to purge.
Output:
Return code - 0: no problems, 1: problem encountered.
"""
subRC = 0
userid = userid.upper()
spoolList = []
queryCmd = ("sudo /sbin/vmcp query rdr userid '*' | " +
"grep ^" + userid + " | awk '{print $2}'")
try:
qryRes = subprocess.check_output(
queryCmd,
close_fds=True,
shell=True)
qryRes = bytes.decode(qryRes)
spoolList = qryRes.splitlines()
except Exception as e:
# All exceptions.
print("Unable to purge reader files for in this " +
"system's reader originally owned by: " + userid +
", exception: " + str(e))
subRC = 1
purgeCmd = ['sudo', '/sbin/vmcp', 'purge', 'reader', '0']
for purgeCmd[3] in spoolList:
try:
subprocess.check_output(
purgeCmd,
close_fds=True)
except Exception as e:
# All exceptions.
print("Unable to purge reader file " + purgeCmd[3] +
", exception: " + str(e))
subRC = 1
return subRC
def runTest(smt, test):
"""
Drive a test and validate the results.
Input:
SMT daemon object
Dictionary element for the test to drive.
Output:
Final test score - 0: failed, 1: passed,
"""
global args
if test['request'][0:10] != 'SHELL_TEST':
reqHandle = ReqHandle(cmdName=sys.argv[0], captureLogs=True)
results = reqHandle.parseCmdline(test['request'])
if results['overallRC'] == 0:
results = reqHandle.driveFunction()
else:
# Issue a function that is not considered a test.
results = {
'overallRC': 0,
'rc': 0,
'rs': 0,
'errno': 0,
'strError': '',
'response': [],
'logEntries': [],
}
shellCmd = test['request'][11:]
try:
results['response'] = subprocess.check_output(
shellCmd,
stderr=subprocess.STDOUT,
close_fds=True,
shell=True)
results['response'] = bytes.decode(results['response'])
except CalledProcessError as e:
results['response'] = e.output
results['overallRC'] = e.returncode
except Exception as e:
# All other exceptions.
if 'output' in e:
results['response'] = e.output
else:
results['response'] = ('Exception encountered: ' +
"details: %s" % six.text_type(e))
if 'returncode' in e:
results['overallRC'] = e.returncode
else:
results['overallRC'] = -9999999
if isinstance(results['response'], string_types):
results['response'] = [results['response']]
print(" Overall rc: %s" % results['overallRC'])
print(" rc: %s" % results['rc'])
print(" rs: %s" % results['rs'])
if len(results['response']) > 0:
print(" Response:")
for line in results['response']:
print(" " + line)
else:
print(" Response: None returned")
# Validate the response strings
respScore = 1 # Assume the response tests passed.
if 'out' in test.keys() and len(test['out']) > 0:
# Expect a response let's test it.
if len(results['response']) == 0:
# No response returned when one was expected -> failed
respScore = 0
else:
# Test the response to see it matches an expected response
# Put the response into a file. This avoids problems with
# having special characters in the response that would
# cause the shell to complain or get confused.
tempFile = NamedTemporaryFile(delete=False)
file = open(tempFile.name, 'w')
for line in results['response']:
file.write(line + '\n')
file.close()
cmd = ['grep', ''.join(test['out']), tempFile.name]
try:
junk = subprocess.check_output(cmd, close_fds=True)
junk = bytes.decode(junk)
if junk == '':
respScore = 0
except Exception:
respScore = 0
os.remove(tempFile.name)
else:
pass # No responses listed, treat as a match
# Validate the Overall return code
orcScore = 0 # Assume RC is not a desired one
if 'overallRC' not in test.keys():
orcScore = 1 # No special value, assume it passed
elif len(test['overallRC']) == 1:
if test['overallRC'][0] == results['overallRC']:
orcScore = 1
else:
for wanted in test['overallRC']:
if results['overallRC'] == wanted:
orcScore = 1
break
# Validate the failure return code
rcScore = 0 # Assume RC is not a desired one
if 'rc' not in test.keys():
rcScore = 1 # No special value, assume it passed
elif len(test['rc']) == 1:
if test['rc'][0] == results['rc']:
rcScore = 1
else:
for wanted in test['rc']:
if results['rc'] == wanted:
rcScore = 1
break
# Validate the failure reason code
rsScore = 0 # Assume RC is not a desired one
if 'rs' not in test.keys():
rsScore = 1 # No special value, assume it passed
elif len(test['rs']) == 1:
if test['rs'][0] == results['rs']:
rsScore = 1
else:
for wanted in test['rs']:
if results['rs'] == wanted:
rsScore = 1
break
# Determine the final score and show the success or failure of the test
if respScore != 1 or orcScore != 1 or rcScore != 1 or rsScore != 1:
testScore = 0
if len(results['logEntries']) != 0:
print(" Log Entries:")
for line in results['logEntries']:
print(" " + line)
print(" Test Status: FAILED")
if respScore != 1:
print(" Response Validation: FAILED")
if orcScore != 1:
print(" Overall RC Validation: FAILED")
if rcScore != 1:
print(" rc Validation: FAILED")
if rsScore != 1:
print(" rs Validation: FAILED")
else:
testScore = 1
if args.showLog is True and len(results['logEntries']) != 0:
print(" Log Entries:")
for line in results['logEntries']:
print(" " + line)
print(" Test Status: PASSED")
return testScore
def driveTestSet(smt, setId, setToTest):
"""
Drive a set of test.
Input:
SMT daemon object
Dictionary element for the test to drive.
Global:
Count of tests
Count of passed tests
Count of failed tests
List of failed Tests
Output:
Global values changed
"""
global args
global cnts
print(" ")
print("******************************************************************")
print("******************************************************************")
print("Beginning Test Set: " + setToTest['description'])
print("******************************************************************")
print("******************************************************************")
localTotal = 0
localAttempted = 0
localPassed = 0
localFailed = 0
localBypassed = 0
failInfo = []
startTime = datetime.datetime.now()
for test in setToTest['tests']:
if args.listParms is True:
# Only want to list the requests.
print(test['request'])
continue
# Produce Common Test/shell count info.
print("")
localTotal += 1
cntInfo = "%i/%i" % (localTotal,
(cnts['total'] + localTotal))
if 'doIf' in test and not eval(test['doIf']):
print("Bypassing %s: %s" % (cntInfo, test['description']))
localBypassed += 1
continue
if test['request'][0:6] == 'SHELL ':
# Issue a function that is not considered a test.
print("Shell %s: %s" % (cntInfo, test['description']))
shellCmd = test['request'][6:]
shellRC = 0
try:
out = subprocess.check_output(
shellCmd,
stderr=subprocess.STDOUT,
close_fds=True,
shell=True)
out = bytes.decode(out)
out = "".join(out)
except CalledProcessError as e:
out = e.output
shellRC = e.returncode
except Exception as e:
# All other exceptions.
if 'output' in e:
out = e.output
else:
out = ('Exception encountered: ' +
"details: %s" % six.text_type(e))
if 'returncode' in e:
shellRC = e.returncode
else:
shellRC = -9999999
if isinstance(out, string_types):
out = [out]
shellOk = 0
if 'overallRC' in test:
for testRC in test['overallRC']:
if shellRC == testRC:
shellOk = 1
break
if shellOk == 0:
print("***Warning*** A non test related shell function " +
"returned rc: " + str(shellRC) +
" out: " + ''.join(out))
elif test['request'][0:9] == 'CODE_SEG ':
print("Code Segment: %s: %s" % (cntInfo, test['description']))
codeSeg = test['request'][9:]
exec(codeSeg)
else:
# Attempt the test.
print("Test %s: %s" % (cntInfo, test['description']))
localAttempted += 1
testScore = runTest(smt, test)
if testScore == 1:
localPassed += 1
else:
localFailed += 1
failInfo.append(cntInfo)
endTime = datetime.datetime.now()
cnts['total'] += localTotal
cnts['attempted'] += localAttempted
cnts['passed'] += localPassed
cnts['failed'] += localFailed
cnts['bypassed'] += localBypassed
print(" ")
print("Status of this set...")
print(" Time:")
print(" Started: " + str(startTime))
print(" Ended: " + str(endTime))
print(" Duration: " + str(endTime - startTime))
print(" Total Requests: %i, Bypassed Requests: %i" %
(localTotal, localBypassed))
print(" Tests attempted: %i, passed: %i, failed: %i" %
(localAttempted, localPassed, localFailed))
if localFailed > 0:
cnts['failedTests'].append(setId + ": " + " ".join(failInfo))
"""
******************************************************************************
main routine
******************************************************************************
"""
# Parse the input and assign it to the variables.
parser = argparse.ArgumentParser()
parser.add_argument('--listareas',
dest='listAreas',
action='store_true',
help='List names of the test set areas.')
parser.add_argument('--listparms',
dest='listParms',
action='store_true',
help='List the command being run.')
parser.add_argument('--local',
default='smtTestLocal',
dest='localFile',
help="Localization file or 'none'.")
parser.add_argument('--showlog',
dest='showLog',
action='store_true',
help='Show log entries for successful tests.')
parser.add_argument('setsToRun',
metavar='N',
nargs='*',
help='Test sets to run')
args = parser.parse_args()
if args.localFile != 'none':
# Perform the localization.
print("Localization file specified as: " + args.localFile)
print("Importing " + args.localFile)
rc = localize(args.localFile, subs, testSets)
if rc != 0:
exit(2)
else:
print("No localization will be performed.")
# The next lines produce the code that allows the regular expressions to work.
regSubs = dict((re.escape(k), v) for k, v in subs.iteritems())
pattern = re.compile("|".join(regSubs.keys()))
smt = SMT()
smt.enableLogCapture() # Capture request related logs
cnts = {}
cnts['total'] = 0
cnts['passed'] = 0
cnts['failed'] = 0
cnts['failedTests'] = []
cnts['attempted'] = 0
cnts['bypassed'] = 0
# Temporary Preparation for punchFile Test. Create a sample config file.
f = open("sample.config", "w+")
f.write("This is sample config file for punchFile Test")
f.close()
if args.listAreas is True:
for key in sorted(testSets):
print(key + ": " + testSets[key]['description'])
else:
# Initialize the environment. Online the punch.
cmd = "sudo /sbin/cio_ignore -r d; sudo /sbin/chccwdev -e d"
try:
subprocess.check_output(
cmd,
stderr=subprocess.STDOUT,
close_fds=True,
shell=True)
except CalledProcessError as e:
print("Warning: Failed to enable the punch, " +
"cmd: %s, rc: %i, out: %s" %
(cmd, e.returncode, e.output))
except Exception as e:
# All other exceptions.
if 'output' in e:
out = e.output
else:
out = ('Exception encountered: ' +
"details: %s" % six.text_type(e))
if 'returncode' in e:
eRC = e.returncode
else:
eRC = -9999999
print("Warning: Failed to enable the punch, " +
"cmd: %s, rc: %i, %s" %
(cmd, eRC, out))
# Perform the substitution change to all requests and responses
for key in testSets:
if 'doIf' in testSets[key]:
testSets[key]['doIf'] = pattern.sub(lambda m:
regSubs[re.escape(m.group(0))], testSets[key]['doIf'])
for test in testSets[key]['tests']:
test['description'] = pattern.sub(lambda m:
regSubs[re.escape(m.group(0))], test['description'])
test['request'] = pattern.sub(lambda m:
regSubs[re.escape(m.group(0))], test['request'])
if 'doIf' in test:
test['doIf'] = pattern.sub(lambda m:
regSubs[re.escape(m.group(0))], test['doIf'])
if 'out' in test:
test['out'] = pattern.sub(lambda m:
regSubs[re.escape(m.group(0))], test['out'])
# Apply testSet['doIf'] to the tests, if it exists.
if 'doIf' in testSets[key]:
if 'doIf' in test:
test['doIf'] = (testSets[key]['doIf'] + ' and ' +
test['doIf'])
else:
test['doIf'] = testSets[key]['doIf']
# Determine the tests to run based on the first argument.
tests = []
totalStartTime = datetime.datetime.now()
if len(args.setsToRun) > 0:
for key in args.setsToRun:
key = key.upper()
if key in testSets:
driveTestSet(smt, key, testSets[key])
else:
print("The following tests set was not recognized: " + key)
else:
for key in sorted(testSets):
driveTestSet(smt, key, testSets[key])
totalEndTime = datetime.datetime.now()
# Cleanup the work files.
if (os.path.exists("sample.config")):
os.remove("sample.config")
if (os.path.exists(subs['<<<aeModScript>>>'])):
os.remove(subs['<<<aeModScript>>>'])
print("")
print("******************************************************************")
print("Status of this run...")
print(" Time:")
print(" Started: " + str(totalStartTime))
print(" Ended: " + str(totalEndTime))
print(" Duration: " + str(totalEndTime - totalStartTime))
print(" Total Requests: %i, Bypassed Requests: %i" %
(cnts['total'], cnts['bypassed']))
print(" Tests attempted: %i, passed: %i, failed: %i" %
(cnts['attempted'], cnts['passed'], cnts['failed']))
print(" Failed Test(s): " + str(cnts['failedTests']))
print("******************************************************************")
if cnts['failed'] == 0:
exit(0)
else:
exit(1) | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/smtLayer/smtTest.py | smtTest.py |
from smtLayer import generalUtils
from smtLayer import msgs
from smtLayer.vmUtils import invokeSMCLI
modId = 'MIG'
version = "1.0.0"
"""
List of subfunction handlers.
Each subfunction contains a list that has:
Readable name of the routine that handles the subfunction,
Code for the function call.
"""
subfuncHandler = {
'CANCEL': ['cancel', lambda rh: cancelMigrate(rh)],
'HELP': ['help', lambda rh: help(rh)],
'MODIFY': ['modify', lambda rh: modifyMigrate(rh)],
'MOVE': ['move', lambda rh: moveVM(rh)],
'STATUS': ['status', lambda rh: getStatus(rh)],
'TEST': ['test', lambda rh: testMigrate(rh)],
'VERSION': ['getVersion', lambda rh: getVersion(rh)],
}
"""
List of positional operands based on subfunction.
Each subfunction contains a list which has a dictionary with the following
information for the positional operands:
- Human readable name of the operand,
- Property in the parms dictionary to hold the value,
- Is it required (True) or optional (False),
- Type of data (1: int, 2: string).
"""
posOpsList = {}
"""
List of additional operands/options supported by the various subfunctions.
The dictionary followng the subfunction name uses the keyword from the command
as a key. Each keyword has a dictionary that lists:
- the related parms item that stores the value,
- how many values follow the keyword, and
- the type of data for those values (1: int, 2: string)
For example, the 'WAITFOR' subfunction has two keyword operands 'poll'
and 'maxwait', and each of them take one additional operand (time in seconds)
which is an int.
"""
keyOpsList = {
'CANCEL': {'--showparms': ['showParms', 0, 0]},
'HELP': {},
'MODIFY': {
'--maxquiesce': ['maxQuiesce', 1, 1],
'--maxtotal': ['maxTotal', 1, 1],
'--showparms': ['showParms', 0, 0]},
'MOVE': {
'--destination': ['dest', 1, 2],
'--forcearch': ['forcearch', 0, 0],
'--forcedomain': ['forcedomain', 0, 0],
'--forcestorage': ['forcestorage', 0, 0],
'--immediate': ['immediate', 0, 0],
'--maxquiesce': ['maxQuiesce', 1, 1],
'--maxtotal': ['maxTotal', 1, 1],
'--showparms': ['showParms', 0, 0]},
'STATUS': {
'--all': ['all', 0, 0],
'--incoming': ['incoming', 0, 0],
'--outgoing': ['outgoing', 0, 0],
'--showparms': ['showParms', 0, 0]},
'TEST': {
'--destination': ['dest', 1, 2],
'--showparms': ['showParms', 0, 0]},
'VERSION': {},
}
def cancelMigrate(rh):
"""
Cancel an existing VMRelocate request.
Input:
Request Handle with the following properties:
function - 'MIGRATEVM'
subfunction - 'CANCEL'
userid - userid of the virtual machine
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter migrateVM.cancelMigrate")
parms = ["-T", rh.userid, "-k", "action=CANCEL"]
results = invokeSMCLI(rh, "VMRELOCATE", parms)
if results['overallRC'] != 0:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
if results['rc'] == 8 and results['rs'] == 3000:
if "1926" in results['response']:
# No relocation in progress
msg = msgs.msg['0419'][1] % (modId, rh.userid)
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0419'][0])
else:
# More details in message codes
lines = results['response'].split("\n")
for line in lines:
if "Details:" in line:
codes = line.split(' ', 1)[1]
msg = msgs.msg['420'][1] % (modId, "VMRELOCATE Cancel",
rh.userid, codes)
rh.printLn("ES", msg)
rh.printSysLog("Exit migrateVM.cancelMigrate, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def doIt(rh):
"""
Perform the requested function by invoking the subfunction handler.
Input:
Request Handle
Output:
Request Handle updated with parsed input.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter migrateVM.doIt")
# Show the invocation parameters, if requested.
if 'showParms' in rh.parms and rh.parms['showParms'] is True:
rh.printLn("N", "Invocation parameters: ")
rh.printLn("N", " Routine: migrateVM." +
str(subfuncHandler[rh.subfunction][0]) + "(reqHandle)")
rh.printLn("N", " function: " + rh.function)
rh.printLn("N", " userid: " + rh.userid)
rh.printLn("N", " subfunction: " + rh.subfunction)
rh.printLn("N", " parms{}: ")
for key in rh.parms:
if key != 'showParms':
rh.printLn("N", " " + key + ": " + str(rh.parms[key]))
rh.printLn("N", " ")
# Call the subfunction handler
subfuncHandler[rh.subfunction][1](rh)
rh.printSysLog("Exit migrateVM.doIt, rc: " + str(rh.results['overallRC']))
return rh.results['overallRC']
def getStatus(rh):
"""
Get status of a VMRelocate request.
Input:
Request Handle with the following properties:
function - 'MIGRATEVM'
subfunction - 'STATUS'
userid - userid of the virtual machine
parms['all'] - If present, set status_target to ALL.
parms['incoming'] - If present, set status_target to INCOMING.
parms['outgoing'] - If present, set status_target to OUTGOING.
if parms does not contain 'all', 'incoming' or 'outgoing', the
status_target is set to 'USER <userid>'.
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter migrateVM.getStatus")
parms = ["-T", rh.userid]
if 'all' in rh.parms:
parms.extend(["-k", "status_target=ALL"])
elif 'incoming' in rh.parms:
parms.extend(["-k", "status_target=INCOMING"])
elif 'outgoing' in rh.parms:
parms.extend(["-k", "status_target=OUTGOING"])
else:
parms.extend(["-k", "status_target=USER " + rh.userid + ""])
results = invokeSMCLI(rh, "VMRELOCATE_Status", parms)
if results['overallRC'] != 0:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
if results['rc'] == 4 and results['rs'] == 3001:
# No relocation in progress
msg = msgs.msg['0419'][1] % (modId, rh.userid)
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0419'][0])
else:
rh.printLn("N", results['response'])
rh.printSysLog("Exit migrateVM.getStatus, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def getVersion(rh):
"""
Get the version of this function.
Input:
Request Handle
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printLn("N", "Version: " + version)
return 0
def help(rh):
"""
Produce help output specifically for MigrateVM functions.
Input:
Request Handle
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
showInvLines(rh)
showOperandLines(rh)
return 0
def modifyMigrate(rh):
"""
Modify an existing VMRelocate request.
Input:
Request Handle with the following properties:
function - 'MIGRATEVM'
subfunction - 'MODIFY'
userid - userid of the virtual machine
parms['maxQuiesce'] - maximum quiesce time in seconds,
or -1 to indicate no limit.
parms['maxTotal'] - maximum total time in seconds,
or -1 to indicate no limit.
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter migrateVM.modifyMigrate")
parms = ["-T", rh.userid]
if 'maxQuiesce' in rh.parms:
if rh.parms['maxQuiesce'] == -1:
parms.extend(["-k", "max_quiesce=NOLIMIT"])
else:
parms.extend(["-k", "max_quiesce=" + str(rh.parms['maxQuiesce'])])
if 'maxTotal' in rh.parms:
if rh.parms['maxTotal'] == -1:
parms.extend(["-k", "max_total=NOLIMIT"])
else:
parms.extend(["-k", "max_total=" + str(rh.parms['maxTotal'])])
results = invokeSMCLI(rh, "VMRELOCATE_Modify", parms)
if results['overallRC'] != 0:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
if results['rc'] == 8 and results['rs'] == 3010:
if "1926" in results['response']:
# No relocations in progress
msg = msgs.msg['0419'][1] % (modId, rh.userid)
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0419'][0])
else:
# More details in message codes
lines = results['response'].split("\n")
for line in lines:
if "Details:" in line:
codes = line.split(' ', 1)[1]
msg = msgs.msg['0420'][1] % (modId, "VMRELOCATE Modify",
rh.userid, codes)
rh.printLn("ES", msg)
rh.printSysLog("Exit migrateVM.modifyMigrate, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def moveVM(rh):
"""
Initiate a VMRelocate request to move a userid.
Input:
Request Handle with the following properties:
function - 'MIGRATEVM'
subfunction - 'MOVE'
userid - userid of the virtual machine
parms['destination'] - target SSI member
parms['forcearch'] - if present, force=architecture is set.
parms['forcedomain'] - if present, force=domain is set.
parms['forcestorage'] - if present, force=storage is set.
parms['immediate'] - if present, immediate=YES is set.
parms['maxquiesce'] - maximum quiesce time in seconds,
or -1 to indicate no limit.
parms['maxTotal'] - maximum total time in seconds,
or -1 to indicate no limit.
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter migrateVM.moveVM")
parms = ["-T", rh.userid, "-k", "action=MOVE"]
if 'dest' in rh.parms:
parms.extend(["-k", "destination=" + rh.parms['dest']])
forceOption = ''
if 'forcearch' in rh.parms:
forceOption = "ARCHITECTURE "
if 'forcedomain' in rh.parms:
forceOption = forceOption + "DOMAIN "
if 'forcestorage' in rh.parms:
forceOption = forceOption + "STORAGE "
if forceOption != '':
parms.extend(["-k", "force=" + forceOption])
if 'immediate' in rh.parms:
parms.extend(["-k", "immediate=YES"])
if 'maxQuiesce' in rh.parms:
if rh.parms['maxQuiesce'] == -1:
parms.extend(["-k", "max_quiesce=NOLIMIT"])
else:
parms.extend(["-k", "max_quiesce=" + str(rh.parms['maxQuiesce'])])
if 'maxTotal' in rh.parms:
if rh.parms['maxTotal'] == -1:
parms.extend(["-k", "max_total=NOLIMIT"])
else:
parms.extend(["-k", "max_total=" + str(rh.parms['maxTotal'])])
results = invokeSMCLI(rh, "VMRELOCATE", parms)
if results['overallRC'] != 0:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
if results['rc'] == 8 and results['rs'] == 3000:
if "0045" in results['response']:
# User not logged on
msg = msgs.msg['0418'][1] % (modId, rh.userid)
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0418'][0])
else:
# More details in message codes
lines = results['response'].split("\n")
for line in lines:
if "Details:" in line:
codes = line.split(' ', 1)[1]
msg = msgs.msg['0420'][1] % (modId, "VMRELOCATE Move",
rh.userid, codes)
rh.printLn("ES", msg)
rh.printSysLog("Exit migrateVM.moveVM, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def parseCmdline(rh):
"""
Parse the request command input.
Input:
Request Handle
Output:
Request Handle updated with parsed input.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter migrateVM.parseCmdline")
if rh.totalParms >= 2:
rh.userid = rh.request[1].upper()
else:
# Userid is missing.
msg = msgs.msg['0010'][1] % modId
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0010'][0])
rh.printSysLog("Exit migrateVM.parseCmdLine, rc: " +
rh.results['overallRC'])
return rh.results['overallRC']
if rh.totalParms == 2:
rh.subfunction = rh.userid
rh.userid = ''
if rh.totalParms >= 3:
rh.subfunction = rh.request[2].upper()
# Verify the subfunction is valid.
if rh.subfunction not in subfuncHandler:
# Subfunction is missing.
subList = ', '.join(sorted(subfuncHandler.keys()))
msg = msgs.msg['0011'][1] % (modId, subList)
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0011'][0])
# Parse the rest of the command line.
if rh.results['overallRC'] == 0:
rh.argPos = 3 # Begin Parsing at 4th operand
generalUtils.parseCmdline(rh, posOpsList, keyOpsList)
rh.printSysLog("Exit migrateVM.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def showInvLines(rh):
"""
Produce help output related to command synopsis
Input:
Request Handle
"""
if rh.subfunction != '':
rh.printLn("N", "Usage:")
rh.printLn("N", " python " + rh.cmdName +
" MigrateVM <userid> cancel")
rh.printLn("N", " python " + rh.cmdName + " MigrateVM help")
rh.printLn("N", " python " + rh.cmdName +
" MigrateVM <userid> modify [--maxtotal <maxVal>]")
rh.printLn("N", " [--maxquiesce <maxQVal>]")
rh.printLn("N", " python " + rh.cmdName +
" MigrateVM <userid> move --destination <ssiMember>")
rh.printLn("N", " [--immediate] [--forcearch] " +
"[--forcedomain] [--forcestorage]")
rh.printLn("N", " [--maxtotal <maxVal>] " +
"[--maxquiesce <maxQVal>]")
rh.printLn("N", " python " + rh.cmdName +
" MigrateVM <userid> status " +
"[--all | --incoming | --outgoing]")
rh.printLn("N", " python " + rh.cmdName +
" MigrateVM <userid> test --destination <ssiMember>")
rh.printLn("N", " python " + rh.cmdName +
" MigrateVM version")
return
def showOperandLines(rh):
"""
Produce help output related to operands.
Input:
Request Handle
"""
if rh.function == 'HELP':
rh.printLn("N", " For the MigrateVM function:")
else:
rh.printLn("N", "Sub-Functions(s):")
rh.printLn("N", " cancel - " +
"cancels the relocation of the specified virtual machine.")
rh.printLn("N", " help - Displays this help information.")
rh.printLn("N", " modify - " +
"modifies the time limits associated with the relocation already")
rh.printLn("N", " in progress .")
rh.printLn("N", " move - " +
"moves the specified virtual machine, while it continues to run,")
rh.printLn("N", " " +
"to the specified system within the SSI cluster.")
rh.printLn("N", " status - requests information about " +
"relocations currently in progress.")
rh.printLn("N", " test - tests the specified virtual machine " +
"and reports whether or not")
rh.printLn("N", " " +
"it is eligible to be relocated to the specified system.")
rh.printLn("N", " version - show the version of the power function")
if rh.subfunction != '':
rh.printLn("N", "Operand(s):")
rh.printLn("N", " --all - All relocations")
rh.printLn("N", " --destination <dest> - " +
"Specifies the SSI name of the target destination")
rh.printLn("N", " z/VM system ")
rh.printLn("N", " --forcearch - " +
"force relocations past architecture checks.")
rh.printLn("N", " --forcedomain - " +
"force relocations past domain checks.")
rh.printLn("N", " --forcestorage - " +
"force relocations past storage checks.")
rh.printLn("N", " --immediate - " +
"causes the VMRELOCATE command to do one early")
rh.printLn("N", " " +
"pass through virtual machine storage and then go")
rh.printLn("N", " " +
"directly to the quiesce stage.")
rh.printLn("N", " --incoming - Incoming relocations")
rh.printLn("N", " --maxquiesce <maxQVal> - " +
"indicates the maximum quiesce time (in seconds)")
rh.printLn("N", " for this relocation.")
rh.printLn("N", " --maxtotal <maxVal> - " +
"indicates the maximum total time (in seconds)")
rh.printLn("N", " " +
"for relocation to complete.")
rh.printLn("N", " --outgoing - Out-going relocations")
rh.printLn("N", " <userid> - " +
"Userid of the target virtual machine")
return
def testMigrate(rh):
"""
Test the ability to use VMRelocate on the target userid.
Input:
Request Handle with the following properties:
function - 'MIGRATEVM'
subfunction - 'TEST'
userid - userid of the virtual machine
parms['dest'] - Target SSI system.
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter migrateVM.testMigrate")
parms = ["-T", rh.userid, "-k", "action=TEST"]
if 'dest' in rh.parms:
parms.extend(["-k", "destination=" + rh.parms['dest']])
results = invokeSMCLI(rh, "VMRELOCATE", parms)
if results['overallRC'] != 0:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
if results['rc'] == 4 and results['rs'] == 3000:
if "0045" in results['response']:
# User not logged on
msg = msgs.msg['0418'][1] % (modId, rh.userid)
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0418'][0])
else:
# More details in message codes
lines = results['response'].split("\n")
for line in lines:
if "Details:" in line:
codes = line.split(' ', 1)[1]
msg = msgs.msg['0420'][1] % (modId, "VMRELOCATE Move",
rh.userid, codes)
rh.printSysLog("Exit migrateVM.testMigrate, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC'] | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/smtLayer/migrateVM.py | migrateVM.py |
import time
from smtLayer import generalUtils
from smtLayer import msgs
from smtLayer.vmUtils import execCmdThruIUCV, invokeSMCLI
from smtLayer.vmUtils import isLoggedOn
from smtLayer.vmUtils import waitForOSState, waitForVMState
modId = 'PVM'
vmOSUpStates = ['on', 'up']
vmOSUpDownStates = ['down', 'off', 'on', 'up']
version = "1.0.0"
"""
List of subfunction handlers.
Each subfunction contains a list that has:
Readable name of the routine that handles the subfunction,
Code for the function call.
"""
subfuncHandler = {
'HELP': ['help', lambda rh: help(rh)],
'ISREACHABLE': ['checkIsReachable',
lambda rh: checkIsReachable(rh)],
'OFF': ['deactivate', lambda rh: deactivate(rh)],
'ON': ['activate', lambda rh: activate(rh)],
'PAUSE': ['pause', lambda rh: pause(rh)],
'REBOOT': ['reboot', lambda rh: reboot(rh)],
'RESET': ['reset', lambda rh: reset(rh)],
'SOFTOFF': ['softDeactivate', lambda rh: softDeactivate(rh)],
'STATUS': ['getStatus', lambda rh: getStatus(rh)],
'UNPAUSE': ['unpause', lambda rh: unpause(rh)],
'VERSION': ['getVersion', lambda rh: getVersion(rh)],
'WAIT': ['wait', lambda rh: wait(rh)],
}
"""
List of positional operands based on subfunction.
Each subfunction contains a list which has a dictionary with the following
information for the positional operands:
- Human readable name of the operand,
- Property in the parms dictionary to hold the value,
- Is it required (True) or optional (False),
- Type of data (1: int, 2: string).
"""
posOpsList = {}
"""
List of additional operands/options supported by the various subfunctions.
The dictionary followng the subfunction name uses the keyword from the
command as a key. Each keyword has a dictionary that lists:
- the related parms item that stores the value,
- how many values follow the keyword, and
- the type of data for those values (1: int, 2: string)
For example, the 'WAIT' subfunction has a 'poll' operand that takes
one additional operand (time in seconds) which is an int.
While the 'showparms' operand is just the keyword and has no additional
value portion.
"""
keyOpsList = {
'HELP': {},
'ISREACHABLE': {
'--showparms': ['showParms', 0, 0]},
'OFF': {
'--maxwait': ['maxWait', 1, 1],
'--poll': ['poll', 1, 1],
'--showparms': ['showParms', 0, 0],
'--wait': ['wait', 0, 0]},
'ON': {
'--state': ['desiredState', 1, 2],
'--maxwait': ['maxWait', 1, 1],
'--poll': ['poll', 1, 1],
'--showparms': ['showParms', 0, 0],
'--wait': ['wait', 0, 0]},
'PAUSE': {'--showparms': ['showParms', 0, 0]},
'REBOOT': {
'--maxwait': ['maxWait', 1, 1],
'--poll': ['poll', 1, 1],
'--showparms': ['showParms', 0, 0],
'--wait': ['wait', 0, 0]},
'RESET': {
'--state': ['desiredState', 1, 2],
'--maxwait': ['maxWait', 1, 1],
'--poll': ['poll', 1, 1],
'--showparms': ['showParms', 0, 0],
'--wait': ['wait', 0, 0]},
'SOFTOFF': {
'--maxwait': ['maxWait', 1, 1],
'--poll': ['poll', 1, 1],
'--showparms': ['showParms', 0, 0],
'--wait': ['wait', 0, 0]},
'STATUS': {
'--showparms': ['showParms', 0, 0]
},
'UNPAUSE': {
'--showparms': ['showParms', 0, 0]},
'VERSION': {},
'WAIT': {
'--maxwait': ['maxWait', 1, 1],
'--poll': ['poll', 1, 1],
'--showparms': ['showParms', 0, 0],
'--state': ['desiredState', 1, 2]},
}
def activate(rh):
"""
Activate a virtual machine.
Input:
Request Handle with the following properties:
function - 'POWERVM'
subfunction - 'ON'
userid - userid of the virtual machine
parms['desiredState'] - Desired state. Optional,
unless 'maxQueries' is specified.
parms['maxQueries'] - Maximum number of queries to issue.
Optional.
parms['maxWait'] - Maximum time to wait in seconds. Optional,
unless 'maxQueries' is specified.
parms['poll'] - Polling interval in seconds. Optional,
unless 'maxQueries' is specified.
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter powerVM.activate, userid: " + rh.userid)
parms = ["-T", rh.userid]
smcliResults = invokeSMCLI(rh, "Image_Activate", parms)
if smcliResults['overallRC'] == 0:
pass
elif (smcliResults['overallRC'] == 8 and
smcliResults['rc'] == 200 and smcliResults['rs'] == 8):
pass # All good. No need to change the ReqHandle results.
else:
# SMAPI API failed.
rh.printLn("ES", smcliResults['response'])
rh.updateResults(smcliResults) # Use results from invokeSMCLI
if rh.results['overallRC'] == 0 and 'maxQueries' in rh.parms:
# Wait for the system to be in the desired state of:
# OS is 'up' and reachable or VM is 'on'.
if rh.parms['desiredState'] == 'up':
results = waitForOSState(
rh,
rh.userid,
rh.parms['desiredState'],
maxQueries=rh.parms['maxQueries'],
sleepSecs=rh.parms['poll'])
else:
results = waitForVMState(
rh,
rh.userid,
rh.parms['desiredState'],
maxQueries=rh.parms['maxQueries'],
sleepSecs=rh.parms['poll'])
if results['overallRC'] == 0:
rh.printLn("N", "%s: %s" %
(rh.userid, rh.parms['desiredState']))
else:
rh.updateResults(results)
rh.printSysLog("Exit powerVM.activate, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def checkIsReachable(rh):
"""
Check if a virtual machine is reachable.
Input:
Request Handle with the following properties:
function - 'POWERVM'
subfunction - 'ISREACHABLE'
userid - userid of the virtual machine
Output:
Request Handle updated with the results.
overallRC - 0: determined the status, non-zero: some weird failure
while trying to execute a command
on the guest via IUCV
rc - RC returned from execCmdThruIUCV
rs - 0: not reachable, 1: reachable
"""
rh.printSysLog("Enter powerVM.checkIsReachable, userid: " +
rh.userid)
strCmd = "echo 'ping'"
results = execCmdThruIUCV(rh, rh.userid, strCmd)
if results['overallRC'] == 0:
rh.printLn("N", rh.userid + ": reachable")
reachable = 1
else:
# A failure from execCmdThruIUCV is acceptable way of determining
# that the system is unreachable. We won't pass along the
# error message.
rh.printLn("N", rh.userid + ": unreachable")
reachable = 0
rh.updateResults({"rs": reachable})
rh.printSysLog("Exit powerVM.checkIsReachable, rc: 0")
return 0
def deactivate(rh):
"""
Deactivate a virtual machine.
Input:
Request Handle with the following properties:
function - 'POWERVM'
subfunction - 'OFF'
userid - userid of the virtual machine
parms['maxQueries'] - Maximum number of queries to issue.
Optional.
parms['maxWait'] - Maximum time to wait in seconds. Optional,
unless 'maxQueries' is specified.
parms['poll'] - Polling interval in seconds. Optional,
unless 'maxQueries' is specified.
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter powerVM.deactivate, userid: " +
rh.userid)
parms = ["-T", rh.userid, "-f", "IMMED"]
results = invokeSMCLI(rh, "Image_Deactivate", parms)
if results['overallRC'] == 0:
pass
elif (results['overallRC'] == 8 and results['rc'] == 200 and
(results['rs'] == 12 or results['rs'] == 16)):
# Tolerable error. Machine is already in or going into the state
# we want it to enter.
rh.printLn("N", rh.userid + ": off")
rh.updateResults({}, reset=1)
else:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
if results['overallRC'] == 0 and 'maxQueries' in rh.parms:
results = waitForVMState(
rh,
rh.userid,
'off',
maxQueries=rh.parms['maxQueries'],
sleepSecs=rh.parms['poll'])
if results['overallRC'] == 0:
rh.printLn("N", rh.userid + ": off")
else:
rh.updateResults(results)
rh.printSysLog("Exit powerVM.deactivate, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def doIt(rh):
"""
Perform the requested function by invoking the subfunction handler.
Input:
Request Handle
Output:
Request Handle updated with parsed input.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter powerVM.doIt")
# Show the invocation parameters, if requested.
if 'showParms' in rh.parms and rh.parms['showParms'] is True:
rh.printLn("N", "Invocation parameters: ")
rh.printLn("N", " Routine: powerVM." +
str(subfuncHandler[rh.subfunction][0]) + "(reqHandle)")
rh.printLn("N", " function: " + rh.function)
rh.printLn("N", " userid: " + rh.userid)
rh.printLn("N", " subfunction: " + rh.subfunction)
rh.printLn("N", " parms{}: ")
for key in rh.parms:
if key != 'showParms':
rh.printLn("N", " " + key + ": " +
str(rh.parms[key]))
rh.printLn("N", " ")
# Call the subfunction handler
subfuncHandler[rh.subfunction][1](rh)
rh.printSysLog("Exit powerVM.doIt, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def getStatus(rh):
"""
Get the power (logon/off) status of a virtual machine.
Input:
Request Handle with the following properties:
function - 'POWERVM'
subfunction - 'STATUS'
userid - userid of the virtual machine
Output:
Request Handle updated with the results.
results['overallRC'] - 0: ok, non-zero: error
if ok:
results['rc'] - 0: for both on and off cases
results['rs'] - 0: powered on
results['rs'] - 1: powered off
"""
rh.printSysLog("Enter powerVM.getStatus, userid: " +
rh.userid)
results = isLoggedOn(rh, rh.userid)
if results['overallRC'] != 0:
# Unexpected error
pass
elif results['rs'] == 0:
rh.printLn("N", rh.userid + ": on")
else:
rh.printLn("N", rh.userid + ": off")
rh.updateResults(results)
rh.printSysLog("Exit powerVM.getStatus, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def getVersion(rh):
"""
Get the version of this function.
Input:
Request Handle
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printLn("N", "Version: " + version)
return 0
def help(rh):
"""
Produce help output specifically for PowerVM functions.
Input:
Request Handle
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
showInvLines(rh)
showOperandLines(rh)
return 0
def parseCmdline(rh):
"""
Parse the request command input.
Input:
Request Handle
Output:
Request Handle updated with parsed input.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter powerVM.parseCmdline")
if rh.totalParms >= 2:
rh.userid = rh.request[1].upper()
else:
# Userid is missing.
msg = msgs.msg['0010'][1] % modId
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0010'][0])
rh.printSysLog("Exit powerVM.parseCmdLine, rc: " +
rh.results['overallRC'])
return rh.results['overallRC']
if rh.totalParms == 2:
rh.subfunction = rh.userid
rh.userid = ''
if rh.totalParms >= 3:
rh.subfunction = rh.request[2].upper()
# Verify the subfunction is valid.
if rh.subfunction not in subfuncHandler:
# Subfunction is missing.
subList = ', '.join(sorted(subfuncHandler.keys()))
msg = msgs.msg['0011'][1] % (modId, subList)
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0011'][0])
# Parse the rest of the command line.
if rh.results['overallRC'] == 0:
rh.argPos = 3 # Begin Parsing at 4th operand
generalUtils.parseCmdline(rh, posOpsList, keyOpsList)
waiting = 0
if rh.results['overallRC'] == 0:
if rh.subfunction == 'WAIT':
waiting = 1
if rh.parms['desiredState'] not in vmOSUpDownStates:
# Desired state is not: down, off, on or up.
msg = msgs.msg['0013'][1] % (modId,
rh.parms['desiredState'], ", ".join(vmOSUpDownStates))
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0013'][0])
if (rh.results['overallRC'] == 0 and 'wait' in rh.parms):
waiting = 1
if 'desiredState' not in rh.parms:
if rh.subfunction in ['ON', 'RESET', 'REBOOT']:
rh.parms['desiredState'] = 'up'
else:
# OFF and SOFTOFF default to 'off'.
rh.parms['desiredState'] = 'off'
if rh.results['overallRC'] == 0 and waiting == 1:
if rh.subfunction == 'ON' or rh.subfunction == 'RESET':
if ('desiredState' not in rh.parms or
rh.parms['desiredState'] not in vmOSUpStates):
# Desired state is not: on or up.
msg = msgs.msg['0013'][1] % (modId,
rh.parms['desiredState'], ", ".join(vmOSUpStates))
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0013'][0])
if rh.results['overallRC'] == 0:
if 'maxWait' not in rh.parms:
rh.parms['maxWait'] = 300
if 'poll' not in rh.parms:
rh.parms['poll'] = 15
rh.parms['maxQueries'] = (rh.parms['maxWait'] +
rh.parms['poll'] - 1) / rh.parms['poll']
# If we had to do some rounding, give a warning
# out to the command line user that the wait
# won't be what they expected.
if rh.parms['maxWait'] % rh.parms['poll'] != 0:
msg = msgs.msg['0017'][1] % (modId,
rh.parms['maxWait'], rh.parms['poll'],
rh.parms['maxQueries'] * rh.parms['poll'],
rh.parms['maxQueries'])
rh.printLn("W", msg)
rh.printSysLog("Exit powerVM.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def pause(rh):
"""
Pause a virtual machine.
Input:
Request Handle with the following properties:
function - 'POWERVM'
subfunction - 'PAUSE'
userid - userid of the virtual machine
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter powerVM.pause, userid: " + rh.userid)
parms = ["-T", rh.userid, "-k", "PAUSE=YES"]
results = invokeSMCLI(rh, "Image_Pause", parms)
if results['overallRC'] != 0:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
rh.printSysLog("Exit powerVM.pause, rc: " + str(rh.results['overallRC']))
return rh.results['overallRC']
def reboot(rh):
"""
Reboot a virtual machine.
Input:
Request Handle with the following properties:
function - 'POWERVM'
subfunction - 'REBOOT'
userid - userid of the virtual machine
parms['desiredState'] - Desired state. Optional, unless
'maxQueries' is specified.
parms['maxQueries'] - Maximum number of queries to issue.
Optional.
parms['maxWait'] - Maximum time to wait in seconds. Optional,
unless 'maxQueries' is specified.
parms['poll'] - Polling interval in seconds. Optional,
unless 'maxQueries' is specified.
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter powerVM.reboot, userid: " + rh.userid)
strCmd = "shutdown -r now"
results = execCmdThruIUCV(rh, rh.userid, strCmd, timeout = 60)
if results['overallRC'] != 0:
# Command failed to execute using IUCV.
rh.printLn("ES", results['response'])
rh.updateResults(results)
if rh.results['overallRC'] == 0:
# Wait for the OS to go down
results = waitForOSState(rh, rh.userid, "down",
maxQueries=30, sleepSecs=10)
if results['overallRC'] == 0:
rh.printLn("N", rh.userid + ": down (interim state)")
if rh.results['overallRC'] == 0 and 'maxQueries' in rh.parms:
results = waitForOSState(rh,
rh.userid,
'up',
maxQueries=rh.parms['maxQueries'],
sleepSecs=rh.parms['poll'])
if results['overallRC'] == 0:
rh.printLn("N", rh.userid + ": up")
else:
rh.updateResults(results)
rh.printSysLog("Exit powerVM.reboot, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def reset(rh):
"""
Reset a virtual machine.
Input:
Request Handle with the following properties:
function - 'POWERVM'
subfunction - 'RESET'
userid - userid of the virtual machine
parms['maxQueries'] - Maximum number of queries to issue.
Optional.
parms['maxWait'] - Maximum time to wait in seconds. Optional,
unless 'maxQueries' is specified.
parms['poll'] - Polling interval in seconds. Optional,
unless 'maxQueries' is specified.
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter powerVM.reset, userid: " + rh.userid)
# Log off the user
parms = ["-T", rh.userid]
results = invokeSMCLI(rh, "Image_Deactivate", parms)
if results['overallRC'] != 0:
if results['rc'] == 200 and results['rs'] == 12:
# Tolerated error. Machine is already in the desired state.
results['overallRC'] = 0
results['rc'] = 0
results['rs'] = 0
else:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
# Wait for the logoff to complete
if results['overallRC'] == 0:
results = waitForVMState(rh, rh.userid, "off",
maxQueries=30, sleepSecs=10)
# Log the user back on
if results['overallRC'] == 0:
parms = ["-T", rh.userid]
results = invokeSMCLI(rh, "Image_Activate", parms)
if results['overallRC'] != 0:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
if results['overallRC'] == 0 and 'maxQueries' in rh.parms:
if rh.parms['desiredState'] == 'up':
results = waitForOSState(
rh,
rh.userid,
rh.parms['desiredState'],
maxQueries=rh.parms['maxQueries'],
sleepSecs=rh.parms['poll'])
else:
results = waitForVMState(
rh,
rh.userid,
rh.parms['desiredState'],
maxQueries=rh.parms['maxQueries'],
sleepSecs=rh.parms['poll'])
if results['overallRC'] == 0:
rh.printLn("N", rh.userid + ": " +
rh.parms['desiredState'])
else:
rh.updateResults(results)
rh.printSysLog("Exit powerVM.reset, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def showInvLines(rh):
"""
Produce help output related to command synopsis
Input:
Request Handle
"""
if rh.subfunction != '':
rh.printLn("N", "Usage:")
rh.printLn("N", " python " + rh.cmdName +
" PowerVM <userid>")
rh.printLn("N", " [isreachable | pause | " +
"status | unpause]")
rh.printLn("N", " python " + rh.cmdName +
" PowerVM <userid>")
rh.printLn("N", " [on | reset] --wait --state " +
"[on | up] --maxwait <secs>")
rh.printLn("N", " --poll <secs>")
rh.printLn("N", " python " + rh.cmdName +
" PowerVM <userid>")
rh.printLn("N", " [off | reboot | softoff] " +
"--maxwait <secs> --poll <secs>")
rh.printLn("N", " python " + rh.cmdName + " PowerVM " +
"<userid> wait")
rh.printLn("N", " --state [down | on | off | up] " +
"--maxwait <secs>")
rh.printLn("N", " --poll <secs>")
rh.printLn("N", " python " + rh.cmdName + " PowerVM help")
rh.printLn("N", " python " + rh.cmdName + " PowerVM version")
return
def showOperandLines(rh):
"""
Produce help output related to operands.
Input:
Request Handle
"""
if rh.function == 'HELP':
rh.printLn("N", " For the PowerVM function:")
else:
rh.printLn("N", "Sub-Functions(s):")
rh.printLn("N", " help - Displays this help " +
"information.")
rh.printLn("N", " isreachable - Determine whether the " +
"virtual OS in a virtual machine")
rh.printLn("N", " is reachable")
rh.printLn("N", " on - Log on the virtual machine")
rh.printLn("N", " off - Log off the virtual machine")
rh.printLn("N", " pause - Pause a virtual machine")
rh.printLn("N", " reboot - Issue a shutdown command to " +
"reboot the OS in a virtual")
rh.printLn("N", " machine")
rh.printLn("N", " reset - Power a virtual machine off " +
"and then back on")
rh.printLn("N", " softoff - Issue a shutdown command to " +
"shutdown the OS in a virtual")
rh.printLn("N", " machine and then log the " +
"virtual machine off z/VM.")
rh.printLn("N", " status - show the log on/off status " +
"of the virtual machine")
rh.printLn("N", " unpause - Unpause a virtual machine")
rh.printLn("N", " wait - Wait for the virtual machine " +
"to go into the specified")
rh.printLn("N", " state of either:")
rh.printLn("N", " down: virtual machine's " +
"OS is not reachable with IUCV")
rh.printLn("N", " off: virtual machine is " +
"logged off")
rh.printLn("N", " on: virtual machine is " +
"logged on")
rh.printLn("N", " up: virtual machine's OS " +
"is reachable with IUCV")
rh.printLn("N", " version - show the version of the " +
"power function")
if rh.subfunction != '':
rh.printLn("N", "Operand(s):")
rh.printLn("N", " <userid> - Userid of the target " +
"virtual machine")
rh.printLn("N", " --maxwait <secs> - " +
"Maximum time in seconds to wait")
rh.printLn("N", " --poll <secs> - " +
"Seconds to wait between polling")
rh.printLn("N", " --state [down | off | on | up] - " +
"Desired state for virtual machine")
rh.printLn("N", " (on or off) or for the operating " +
"system (down or up).")
rh.printLn("N", " --wait - wait for the machine to go into " +
"the desired state.")
return
def softDeactivate(rh):
"""
Deactivate a virtual machine by first shutting down Linux and
then log it off.
Input:
Request Handle with the following properties:
function - 'POWERVM'
subfunction - 'SOFTOFF'
userid - userid of the virtual machine
parms['maxQueries'] - Maximum number of queries to issue.
Optional.
parms['maxWait'] - Maximum time to wait in seconds.
Optional,
unless 'maxQueries' is specified.
parms['poll'] - Polling interval in seconds. Optional,
unless 'maxQueries' is specified.
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter powerVM.softDeactivate, userid: " +
rh.userid)
strCmd = "echo 'ping'"
iucvResults = execCmdThruIUCV(rh, rh.userid, strCmd)
if iucvResults['overallRC'] == 0:
# We could talk to the machine, tell it to shutdown nicely.
strCmd = "shutdown -h now"
iucvResults = execCmdThruIUCV(rh, rh.userid, strCmd, timeout = 60)
if iucvResults['overallRC'] == 0:
time.sleep(15)
else:
# Shutdown failed. Let CP take down the system
# after we log the results.
rh.printSysLog("powerVM.softDeactivate " + rh.userid +
" is unreachable. Treating it as already shutdown.")
else:
# Could not ping the machine. Treat it as a success
# after we log the results.
rh.printSysLog("powerVM.softDeactivate " + rh.userid +
" is unreachable. Treating it as already shutdown.")
# Tell z/VM to log off the system.
parms = ["-T", rh.userid]
smcliResults = invokeSMCLI(rh, "Image_Deactivate", parms)
if smcliResults['overallRC'] == 0:
pass
elif (smcliResults['overallRC'] == 8 and smcliResults['rc'] == 200 and
(smcliResults['rs'] == 12 or + smcliResults['rs'] == 16)):
# Tolerable error.
# Machine is already logged off or is logging off.
rh.printLn("N", rh.userid + " is already logged off.")
else:
# SMAPI API failed.
rh.printLn("ES", smcliResults['response'])
rh.updateResults(smcliResults) # Use results from invokeSMCLI
if rh.results['overallRC'] == 0 and 'maxQueries' in rh.parms:
# Wait for the system to log off.
waitResults = waitForVMState(
rh,
rh.userid,
'off',
maxQueries=rh.parms['maxQueries'],
sleepSecs=rh.parms['poll'])
if waitResults['overallRC'] == 0:
rh.printLn("N", "Userid '" + rh.userid +
" is in the desired state: off")
else:
rh.updateResults(waitResults)
rh.printSysLog("Exit powerVM.softDeactivate, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def unpause(rh):
"""
Unpause a virtual machine.
Input:
Request Handle with the following properties:
function - 'POWERVM'
subfunction - 'UNPAUSE'
userid - userid of the virtual machine
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter powerVM.unpause, userid: " + rh.userid)
parms = ["-T", rh.userid, "-k", "PAUSE=NO"]
results = invokeSMCLI(rh, "Image_Pause", parms)
if results['overallRC'] != 0:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
rh.printSysLog("Exit powerVM.unpause, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def wait(rh):
"""
Wait for the virtual machine to go into the specified state.
Input:
Request Handle with the following properties:
function - 'POWERVM'
subfunction - 'WAIT'
userid - userid of the virtual machine
parms['desiredState'] - Desired state
parms['maxQueries'] - Maximum number of queries to issue
parms['maxWait'] - Maximum time to wait in seconds
parms['poll'] - Polling interval in seconds
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter powerVM.wait, userid: " + rh.userid)
if (rh.parms['desiredState'] == 'off' or
rh.parms['desiredState'] == 'on'):
results = waitForVMState(
rh,
rh.userid,
rh.parms['desiredState'],
maxQueries=rh.parms['maxQueries'],
sleepSecs=rh.parms['poll'])
else:
results = waitForOSState(
rh,
rh.userid,
rh.parms['desiredState'],
maxQueries=rh.parms['maxQueries'],
sleepSecs=rh.parms['poll'])
if results['overallRC'] == 0:
rh.printLn("N", rh.userid + ": " + rh.parms['desiredState'])
else:
rh.updateResults(results)
rh.printSysLog("Exit powerVM.wait, rc: " + str(rh.results['overallRC']))
return rh.results['overallRC'] | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/smtLayer/powerVM.py | powerVM.py |
import re
import subprocess
from subprocess import CalledProcessError
import time
from smtLayer import msgs
from smtLayer import vmStatus
modId = 'VMU'
version = '1.0.0' # Version of this script
def disableEnableDisk(rh, userid, vaddr, option):
"""
Disable or enable a disk.
Input:
Request Handle:
owning userid
virtual address
option ('-e': enable, '-d': disable)
Output:
Dictionary containing the following:
overallRC - overall return code, 0: success, non-zero: failure
rc - rc from the chccwdev command or IUCV transmission.
rs - rs from the chccwdev command or IUCV transmission.
results - possible error message from the IUCV transmission.
"""
rh.printSysLog("Enter vmUtils.disableEnableDisk, userid: " + userid +
" addr: " + vaddr + " option: " + option)
results = {
'overallRC': 0,
'rc': 0,
'rs': 0,
'response': ''
}
"""
Can't guarantee the success of online/offline disk, need to wait
Until it's done because we may detach the disk after -d option
or use the disk after the -e option
"""
for secs in [0.1, 0.4, 1, 1.5, 3, 7, 15, 32, 30, 30,
60, 60, 60, 60, 60]:
strCmd = "sudo /sbin/chccwdev " + option + " " + vaddr + " 2>&1"
results = execCmdThruIUCV(rh, userid, strCmd)
if results['overallRC'] == 0:
break
elif (results['overallRC'] == 2 and results['rc'] == 8 and
results['rs'] == 1 and option == '-d'):
# Linux does not know about the disk being disabled.
# Ok, nothing to do. Treat this as a success.
results = {'overallRC': 0, 'rc': 0, 'rs': 0, 'response': ''}
break
time.sleep(secs)
rh.printSysLog("Exit vmUtils.disableEnableDisk, rc: " +
str(results['overallRC']))
return results
def execCmdThruIUCV(rh, userid, strCmd, hideInLog=[], timeout=None):
"""
Send a command to a virtual machine using IUCV.
Input:
Request Handle
Userid of the target virtual machine
Command string to send
(Optional) List of strCmd words (by index) to hide in
sysLog by replacing the word with "<hidden>".
(Optional) timeout value in seconds for executing this command.
Output:
Dictionary containing the following:
overallRC - overall return code, 0: success, 2: failure
rc - RC returned from iucvclnt if overallRC != 0.
rs - RS returned from iucvclnt if overallRC != 0.
errno - Errno returned from iucvclnt if overallRC != 0.
response - Output of the iucvclnt command or this routine.
Notes:
1) This routine does not use the Request Handle printLn function.
This is because an error might be expected and we might desire
to suppress it. Instead, any error messages are put in the
response dictionary element that is returned.
"""
if len(hideInLog) == 0:
rh.printSysLog("Enter vmUtils.execCmdThruIUCV, userid: " +
userid + " cmd: " + strCmd +
" timeout: " + str(timeout))
else:
logCmd = strCmd.split(' ')
for i in hideInLog:
logCmd[i] = '<hidden>'
rh.printSysLog("Enter vmUtils.execCmdThruIUCV, userid: " +
userid + " cmd: " + ' '.join(logCmd) +
" timeout: " + str(timeout))
iucvpath = '/opt/zthin/bin/IUCV/'
results = {
'overallRC': 0,
'rc': 0,
'rs': 0,
'errno': 0,
'response': [],
}
cmd = ['sudo',
iucvpath + "iucvclnt",
userid,
strCmd]
try:
results['response'] = subprocess.check_output(
cmd,
stderr=subprocess.STDOUT,
close_fds=True,
timeout=timeout)
if isinstance(results['response'], bytes):
results['response'] = bytes.decode(results['response'])
except subprocess.TimeoutExpired as e:
# Timeout exceptions from this system
rh.printSysLog("Timeout exception in vmUtils.execCmdThruIUCV")
results = msgs.msg['0501'][0]
msg = msgs.msg['0501'][1] % (modId, strCmd,
type(e).__name__, str(e))
results['response'] = msg
except CalledProcessError as e:
msg = []
results['overallRC'] = 2
results['rc'] = e.returncode
output = bytes.decode(e.output)
match = re.search('Return code (.+?),', output)
if match:
try:
results['rc'] = int(match.group(1))
except ValueError:
# Return code in response from IUCVCLNT is not an int.
msg = msgs.msg['0311'][1] % (modId, userid, strCmd,
results['rc'], match.group(1), output)
if not msg:
# We got the rc. Now, get the rs.
match = re.search('Reason code (.+?)\.', output)
if match:
try:
results['rs'] = int(match.group(1))
except ValueError:
# Reason code in response from IUCVCLNT is not an int.
msg = msgs.msg['0312'][1] % (modId, userid, strCmd,
results['rc'], match.group(1), output)
if msg:
# Already produced an error message.
pass
elif results['rc'] == 1:
# Command was not authorized or a generic Linux error.
msg = msgs.msg['0313'][1] % (modId, userid, strCmd,
results['rc'], results['rs'], output)
elif results['rc'] == 2:
# IUCV client parameter error.
msg = msgs.msg['0314'][1] % (modId, userid, strCmd,
results['rc'], results['rs'], output)
elif results['rc'] == 4:
# IUCV socket error
msg = msgs.msg['0315'][1] % (modId, userid, strCmd,
results['rc'], results['rs'], output)
elif results['rc'] == 8:
# Executed command failed
msg = msgs.msg['0316'][1] % (modId, userid, strCmd,
results['rc'], results['rs'], output)
elif results['rc'] == 16:
# File Transport failed
msg = msgs.msg['0317'][1] % (modId, userid, strCmd,
results['rc'], results['rs'], output)
elif results['rc'] == 32:
# IUCV server file was not found on this system.
msg += msgs.msg['0318'][1] % (modId, userid, strCmd,
results['rc'], results['rs'], output)
else:
# Unrecognized IUCV client error
msg = msgs.msg['0319'][1] % (modId, userid, strCmd,
results['rc'], results['rs'], output)
results['response'] = msg
except (subprocess.TimeoutExpired,
PermissionError) as e:
results['overallRC'] = 3
# return code
results['rc'] = 64
# reason code
results['rs'] = 408
output = str(e)
msg = msgs.msg['0320'][1] % (modId, userid, strCmd,
results['rc'], results['rs'], output)
results['response'] = msg
except Exception as e:
# Other exceptions from this system (i.e. not the managed system).
results = msgs.msg['0421'][0]
msg = msgs.msg['0421'][1] % (modId, strCmd,
type(e).__name__, str(e))
results['response'] = msg
rh.printSysLog("Exit vmUtils.execCmdThruIUCV, rc: " +
str(results['rc']))
return results
def getPerfInfo(rh, useridlist):
"""
Get the performance information for a userid
Input:
Request Handle
Userid to query <- may change this to a list later.
Output:
Dictionary containing the following:
overallRC - overall return code, 0: success, non-zero: failure
rc - RC returned from SMCLI if overallRC = 0.
rs - RS returned from SMCLI if overallRC = 0.
errno - Errno returned from SMCLI if overallRC = 0.
response - Stripped and reformatted output of the SMCLI command.
"""
rh.printSysLog("Enter vmUtils.getPerfInfo, userid: " + useridlist)
parms = ["-T", rh.userid,
"-c", "1"]
results = invokeSMCLI(rh, "Image_Performance_Query", parms)
if results['overallRC'] != 0:
# SMCLI failed.
rh.printLn("ES", results['response'])
rh.printSysLog("Exit vmUtils.getPerfInfo, rc: " +
str(results['overallRC']))
return results
lines = results['response'].split("\n")
usedTime = 0
totalCpu = 0
totalMem = 0
usedMem = 0
try:
for line in lines:
if "Used CPU time:" in line:
usedTime = line.split()[3].strip('"')
# Value is in us, need make it seconds
usedTime = int(usedTime) / 1000000
if "Guest CPUs:" in line:
totalCpu = line.split()[2].strip('"')
if "Max memory:" in line:
totalMem = line.split()[2].strip('"')
# Value is in Kb, need to make it Mb
totalMem = int(totalMem) / 1024
if "Used memory:" in line:
usedMem = line.split()[2].strip('"')
usedMem = int(usedMem) / 1024
except Exception as e:
msg = msgs.msg['0412'][1] % (modId, type(e).__name__,
str(e), results['response'])
rh.printLn("ES", msg)
results['overallRC'] = 4
results['rc'] = 4
results['rs'] = 412
if results['overallRC'] == 0:
memstr = "Total Memory: %iM\n" % totalMem
usedmemstr = "Used Memory: %iM\n" % usedMem
procstr = "Processors: %s\n" % totalCpu
timestr = "CPU Used Time: %i sec\n" % usedTime
results['response'] = memstr + usedmemstr + procstr + timestr
rh.printSysLog("Exit vmUtils.getPerfInfo, rc: " +
str(results['rc']))
return results
def installFS(rh, vaddr, mode, fileSystem, diskType):
"""
Install a filesystem on a virtual machine's dasd.
Input:
Request Handle:
userid - Userid that owns the disk
Virtual address as known to the owning system.
Access mode to use to get the disk.
Disk Type - 3390 or 9336
Output:
Dictionary containing the following:
overallRC - overall return code, 0: success, non-zero: failure
rc - RC returned from SMCLI if overallRC = 0.
rs - RS returned from SMCLI if overallRC = 0.
errno - Errno returned from SMCLI if overallRC = 0.
response - Output of the SMCLI command.
"""
rh.printSysLog("Enter vmUtils.installFS, userid: " + rh.userid +
", vaddr: " + str(vaddr) + ", mode: " + mode + ", file system: " +
fileSystem + ", disk type: " + diskType)
results = {
'overallRC': 0,
'rc': 0,
'rs': 0,
'errno': 0,
}
out = ''
diskAccessed = False
# Get access to the disk.
cmd = ["sudo",
"/opt/zthin/bin/linkdiskandbringonline",
rh.userid,
vaddr,
mode]
strCmd = ' '.join(cmd)
rh.printSysLog("Invoking: " + strCmd)
try:
# Sometimes the disk is not ready: sleep and retry
try_num = 0
for sleep_secs in [0.1, 0.2, 0.3, 0.5, 1, 2, -1]:
try_num += 1
try:
out = subprocess.check_output(cmd, close_fds=True)
rh.printSysLog("Run `%s` successfully." % strCmd)
diskAccessed = True
break
except CalledProcessError as e:
if sleep_secs > 0:
rh.printSysLog("Num %d try `%s` failed ("
"retry after %s seconds): "
"rc=%d msg=%s" % (
try_num, strCmd, sleep_secs,
e.returncode, e.output))
time.sleep(sleep_secs)
else:
raise
if isinstance(out, bytes):
out = bytes.decode(out)
except CalledProcessError as e:
rh.printLn("ES", msgs.msg['0415'][1] % (modId, strCmd,
e.returncode, e.output))
results = msgs.msg['0415'][0]
results['rs'] = e.returncode
rh.updateResults(results)
except Exception as e:
# All other exceptions.
results = msgs.msg['0421'][0]
rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd,
type(e).__name__, str(e)))
if results['overallRC'] == 0:
"""
sample output:
linkdiskandbringonline maint start time: 2017-03-03-16:20:48.011
Success: Userid maint vdev 193 linked at ad35 device name dasdh
linkdiskandbringonline exit time: 2017-03-03-16:20:52.150
"""
match = re.search('Success:(.+?)\n', out)
if match:
parts = match.group(1).split()
if len(parts) > 9:
device = "/dev/" + parts[9]
else:
strCmd = ' '.join(cmd)
rh.printLn("ES", msgs.msg['0416'][1] % (modId,
'Success:', 10, strCmd, out))
results = msgs.msg['0416'][0]
rh.updateResults(results)
else:
strCmd = ' '.join(cmd)
rh.printLn("ES", msgs.msg['0417'][1] % (modId,
'Success:', strCmd, out))
results = msgs.msg['0417'][0]
rh.updateResults(results)
if results['overallRC'] == 0 and diskType == "3390":
# dasdfmt the disk
cmd = ["sudo",
"/sbin/dasdfmt",
"-y",
"-b", "4096",
"-d", "cdl",
"-v", device]
strCmd = ' '.join(cmd)
rh.printSysLog("Invoking: " + strCmd)
try:
out = subprocess.check_output(cmd, close_fds=True)
if isinstance(out, bytes):
out = bytes.decode(out)
except CalledProcessError as e:
rh.printLn("ES", msgs.msg['0415'][1] % (modId, strCmd,
e.returncode, e.output))
results = msgs.msg['0415'][0]
results['rs'] = e.returncode
rh.updateResults(results)
except Exception as e:
# All other exceptions.
strCmd = " ".join(cmd)
rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd,
type(e).__name__, str(e)))
results = msgs.msg['0421'][0]
rh.updateResults(results)
if results['overallRC'] == 0 and diskType == "3390":
# Settle the devices so we can do the partition.
strCmd = ("which udevadm &> /dev/null && " +
"udevadm settle || udevsettle")
rh.printSysLog("Invoking: " + strCmd)
try:
subprocess.check_output(
strCmd,
stderr=subprocess.STDOUT,
close_fds=True,
shell=True)
if isinstance(out, bytes):
out = bytes.decode(out)
except CalledProcessError as e:
rh.printLn("ES", msgs.msg['0415'][1] % (modId, strCmd,
e.returncode, e.output))
results = msgs.msg['0415'][0]
results['rs'] = e.returncode
rh.updateResults(results)
except Exception as e:
# All other exceptions.
strCmd = " ".join(cmd)
rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd,
type(e).__name__, str(e)))
results = msgs.msg['0421'][0]
rh.updateResults(results)
if results['overallRC'] == 0 and diskType == "3390":
# Prepare the partition with fdasd
cmd = ["sudo", "/sbin/fdasd", "-a", device]
strCmd = ' '.join(cmd)
rh.printSysLog("Invoking: " + strCmd)
try:
out = subprocess.check_output(cmd,
stderr=subprocess.STDOUT, close_fds=True)
if isinstance(out, bytes):
out = bytes.decode(out)
except CalledProcessError as e:
rh.printLn("ES", msgs.msg['0415'][1] % (modId, strCmd,
e.returncode, e.output))
results = msgs.msg['0415'][0]
results['rs'] = e.returncode
rh.updateResults(results)
except Exception as e:
# All other exceptions.
rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd,
type(e).__name__, str(e)))
results = msgs.msg['0421'][0]
rh.updateResults(results)
if results['overallRC'] == 0 and diskType == "9336":
# Delete the existing partition in case the disk already
# has a partition in it.
cmd = "sudo /sbin/fdisk " + device + " << EOF\ng\nw\nEOF"
rh.printSysLog("Invoking: sudo /sbin/fdisk " + device +
" << EOF\\nd\\nw\\nEOF ")
try:
out = subprocess.check_output(cmd,
stderr=subprocess.STDOUT,
close_fds=True,
shell=True)
rh.printSysLog("Run `%s` success with output: %s"
% (cmd, out))
if isinstance(out, bytes):
out = bytes.decode(out)
except CalledProcessError as e:
rh.printLn("ES", msgs.msg['0415'][1] % (modId, cmd,
e.returncode, e.output))
results = msgs.msg['0415'][0]
results['rs'] = e.returncode
rh.updateResults(results)
except Exception as e:
# All other exceptions.
rh.printLn("ES", msgs.msg['0421'][1] % (modId, cmd,
type(e).__name__, str(e)))
results = msgs.msg['0421'][0]
rh.updateResults(results)
if results['overallRC'] == 0 and diskType == "9336":
# Prepare the partition with fdisk
cmd = "sudo /sbin/fdisk " + device + " << EOF\nn\np\n1\n\n\nw\nEOF"
rh.printSysLog("Invoking: sudo /sbin/fdisk " + device +
" << EOF\\nn\\np\\n1\\n\\n\\nw\\nEOF")
try:
# Sometimes the table is not ready: sleep and retry
try_num = 0
for sleep_secs in [0.1, 0.2, 0.3, 0.5, 1, 2, -1]:
try_num += 1
try:
out = subprocess.check_output(cmd,
stderr=subprocess.STDOUT,
close_fds=True,
shell=True)
rh.printSysLog("Run `%s` success with output: %s"
% (cmd, out))
break
except CalledProcessError as e:
if sleep_secs > 0:
rh.printSysLog("Num %d try `%s` failed ("
"retry after %s seconds): "
"rc=%d msg=%s" % (
try_num, cmd, sleep_secs,
e.returncode, e.output))
time.sleep(sleep_secs)
else:
raise
if isinstance(out, bytes):
out = bytes.decode(out)
except CalledProcessError as e:
rh.printLn("ES", msgs.msg['0415'][1] % (modId, cmd,
e.returncode, e.output))
results = msgs.msg['0415'][0]
results['rs'] = e.returncode
rh.updateResults(results)
except Exception as e:
# All other exceptions.
rh.printLn("ES", msgs.msg['0421'][1] % (modId, cmd,
type(e).__name__, str(e)))
results = msgs.msg['0421'][0]
rh.updateResults(results)
if results['overallRC'] == 0:
# Settle the devices so we can do the partition.
strCmd = ("which udevadm &> /dev/null && " +
"udevadm settle || udevsettle")
rh.printSysLog("Invoking: " + strCmd)
try:
subprocess.check_output(
strCmd,
stderr=subprocess.STDOUT,
close_fds=True,
shell=True)
if isinstance(out, bytes):
out = bytes.decode(out)
except CalledProcessError as e:
rh.printLn("ES", msgs.msg['0415'][1] % (modId, strCmd,
e.returncode, e.output))
results = msgs.msg['0415'][0]
results['rs'] = e.returncode
rh.updateResults(results)
except Exception as e:
# All other exceptions.
strCmd = " ".join(cmd)
rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd,
type(e).__name__, str(e)))
results = msgs.msg['0421'][0]
rh.updateResults(results)
if results['overallRC'] == 0:
# Install the file system into the disk.
device = device + "1" # Point to first partition
if fileSystem == 'swap':
cmd = ["sudo", "mkswap", device]
elif fileSystem == 'xfs':
cmd = ["sudo", "mkfs.xfs", "-f", device]
else:
cmd = ["sudo", "mkfs", "-F", "-t", fileSystem, device]
strCmd = ' '.join(cmd)
rh.printSysLog("Invoking: " + strCmd)
try:
# Sometimes the device is not ready: sleep and retry
try_num = 0
for sleep_secs in [0.1, 0.2, 0.3, 0.5, 1, 2, -1]:
try_num += 1
try:
out = subprocess.check_output(cmd,
stderr=subprocess.STDOUT,
close_fds=True)
rh.printSysLog("Run `%s` successfully." % strCmd)
break
except CalledProcessError as e:
if sleep_secs > 0:
rh.printSysLog("Num %d try `%s` failed ("
"retry after %s seconds): "
"rc=%d msg=%s" % (
try_num, strCmd, sleep_secs,
e.returncode, e.output))
time.sleep(sleep_secs)
else:
raise
if isinstance(out, bytes):
out = bytes.decode(out)
rh.printLn("N", "File system: " + fileSystem +
" is installed.")
except CalledProcessError as e:
rh.printLn("ES", msgs.msg['0415'][1] % (modId, strCmd,
e.returncode, e.output))
results = msgs.msg['0415'][0]
results['rs'] = e.returncode
rh.updateResults(results)
except Exception as e:
# All other exceptions.
rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd,
type(e).__name__, str(e)))
results = msgs.msg['0421'][0]
rh.updateResults(results)
# TODO: diskAccessed hard code to True, because if linkdiskandbringonline
# failed, can not set diskAccessed. will leave DASD undetached.
# So always try to disconnect the disk. If this fixed in the future, need
# remove this.
diskAccessed = True
if diskAccessed:
# flush disk buffer before offline the disk.
cmd = ["sudo", "/usr/sbin/blockdev", "--flushbufs", device]
strCmd = ' '.join(cmd)
rh.printSysLog("Invoking: " + strCmd)
try:
out = subprocess.check_output(cmd, close_fds=True)
if isinstance(out, bytes):
out = bytes.decode(out)
except Exception as e:
# log worning and ignore the exception
wmesg = "Executing %(cmd)s failed: %(exp)s" % {'cmd': strCmd,
'exp': str(e)}
rh.printLn("WS", wmesg)
# Give up the disk.
cmd = ["sudo", "/opt/zthin/bin/offlinediskanddetach",
rh.userid,
vaddr]
strCmd = ' '.join(cmd)
rh.printSysLog("Invoking: " + strCmd)
try:
out = subprocess.check_output(cmd, close_fds=True)
if isinstance(out, bytes):
out = bytes.decode(out)
except CalledProcessError as e:
rh.printLn("ES", msgs.msg['0415'][1] % (modId, strCmd,
e.returncode, e.output))
results = msgs.msg['0415'][0]
results['rs'] = e.returncode
rh.updateResults(results)
except Exception as e:
# All other exceptions.
rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd,
type(e).__name__, str(e)))
results = msgs.msg['0421'][0]
rh.updateResults(results)
rh.printSysLog("Exit vmUtils.installFS, rc: " + str(results['rc']))
return results
def invokeSMCLI(rh, api, parms, hideInLog=[]):
"""
Invoke SMCLI and parse the results.
Input:
Request Handle
API name,
SMCLI parms as an array
(Optional) List of parms (by index) to hide in
sysLog by replacing the parm with "<hidden>".
Output:
Dictionary containing the following:
overallRC - overall return code, 0: success, non-zero: failure
rc - RC returned from SMCLI if overallRC = 0.
rs - RS returned from SMCLI if overallRC = 0.
errno - Errno returned from SMCLI if overallRC = 0.
response - String output of the SMCLI command.
Note:
- If the first three words of the header returned from smcli
do not do not contain words that represent valid integer
values or contain too few words then one or more error
messages are generated. THIS SHOULD NEVER OCCUR !!!!
"""
if len(hideInLog) == 0:
rh.printSysLog("Enter vmUtils.invokeSMCLI, userid: " +
rh.userid + ", function: " + api +
", parms: " + str(parms))
else:
logParms = parms
for i in hideInLog:
logParms[i] = '<hidden>'
rh.printSysLog("Enter vmUtils.invokeSMCLI, userid: " +
rh.userid + ", function: " + api +
", parms: " + str(logParms))
goodHeader = False
results = {
'overallRC': 0,
'rc': 0,
'rs': 0,
'errno': 0,
'response': [],
'strError': '',
}
cmd = []
cmd.append('sudo')
cmd.append('/opt/zthin/bin/smcli')
cmd.append(api)
cmd.append('--addRCheader')
status = vmStatus.GetSMAPIStatus()
try:
smcliResp = subprocess.check_output(cmd + parms,
close_fds=True)
if isinstance(smcliResp, bytes):
smcliResp = bytes.decode(smcliResp, errors='replace')
smcliResp = smcliResp.split('\n', 1)
results['response'] = smcliResp[1]
results['overallRC'] = 0
results['rc'] = 0
status.RecordSuccess()
except CalledProcessError as e:
status.RecordFail()
strCmd = " ".join(cmd + parms)
# Break up the RC header into its component parts.
if e.output == '':
smcliResp = ['']
else:
smcliResp = bytes.decode(e.output).split('\n', 1)
# Split the header into its component pieces.
rcHeader = smcliResp[0].split('(details)', 1)
if len(rcHeader) == 0:
rcHeader = ['', '']
elif len(rcHeader) == 1:
# No data after the details tag. Add empty [1] value.
rcHeader.append('')
codes = rcHeader[0].split(' ')
# Validate the rc, rs, and errno.
if len(codes) < 3:
# Unexpected number of codes. Need at least 3.
results = msgs.msg['0301'][0]
results['response'] = msgs.msg['0301'][1] % (modId, api,
strCmd, rcHeader[0], rcHeader[1])
else:
goodHeader = True
# Convert the first word (overall rc from SMAPI) to an int
# and set the SMT overall rc based on this value.
orcError = False
try:
results['overallRC'] = int(codes[0])
if results['overallRC'] not in [8, 24, 25]:
orcError = True
except ValueError:
goodHeader = False
orcError = True
if orcError:
results['overallRC'] = 25 # SMCLI Internal Error
results = msgs.msg['0302'][0]
results['response'] = msgs.msg['0302'][1] % (modId,
api, codes[0], strCmd, rcHeader[0], rcHeader[1])
# Convert the second word to an int and save as rc.
try:
results['rc'] = int(codes[1])
except ValueError:
goodHeader = False
results = msgs.msg['0303'][0]
results['response'] = msgs.msg['0303'][1] % (modId,
api, codes[1], strCmd, rcHeader[0], rcHeader[1])
# Convert the second word to an int and save it as either
# the rs or errno.
try:
word3 = int(codes[2])
if results['overallRC'] == 8:
results['rs'] = word3 # Must be an rs
elif results['overallRC'] == 25:
results['errno'] = word3 # Must be the errno
# We ignore word 3 for everyone else and default to 0.
except ValueError:
goodHeader = False
results = msgs.msg['0304'][0]
results['response'] = msgs.msg['0304'][1] % (modId,
api, codes[1], strCmd, rcHeader[0], rcHeader[1])
results['strError'] = rcHeader[1].lstrip()
if goodHeader:
# Produce a message that provides the error info.
results['response'] = msgs.msg['0300'][1] % (modId,
api, results['overallRC'], results['rc'],
results['rs'], results['errno'],
strCmd, smcliResp[1])
except Exception as e:
# All other exceptions.
strCmd = " ".join(cmd + parms)
results = msgs.msg['0305'][0]
results['response'] = msgs.msg['0305'][1] % (modId, strCmd,
type(e).__name__, str(e))
rh.printSysLog("Exit vmUtils.invokeSMCLI, rc: " +
str(results['overallRC']))
return results
def isLoggedOn(rh, userid):
"""
Determine whether a virtual machine is logged on.
Input:
Request Handle:
userid being queried
Output:
Dictionary containing the following:
overallRC - overall return code, 0: success, non-zero: failure
rc - 0: if we got status. Otherwise, it is the
error return code from the commands issued.
rs - Based on rc value. For rc==0, rs is:
0: if we determined it is logged on.
1: if we determined it is logged off.
"""
rh.printSysLog("Enter vmUtils.isLoggedOn, userid: " + userid)
results = {
'overallRC': 0,
'rc': 0,
'rs': 0,
}
cmd = ["sudo", "/sbin/vmcp", "query", "user", userid]
strCmd = ' '.join(cmd)
rh.printSysLog("Invoking: " + strCmd)
try:
subprocess.check_output(
cmd,
close_fds=True,
stderr=subprocess.STDOUT)
except CalledProcessError as e:
search_pattern = '(^HCP\w\w\w045E|^HCP\w\w\w361E)'.encode()
match = re.search(search_pattern, e.output)
if match:
# Not logged on
results['rs'] = 1
else:
# Abnormal failure
rh.printLn("ES", msgs.msg['0415'][1] % (modId, strCmd,
e.returncode, e.output))
results = msgs.msg['0415'][0]
results['rs'] = e.returncode
except Exception as e:
# All other exceptions.
results = msgs.msg['0421'][0]
rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd,
type(e).__name__, str(e)))
rh.printSysLog("Exit vmUtils.isLoggedOn, overallRC: " +
str(results['overallRC']) + " rc: " + str(results['rc']) +
" rs: " + str(results['rs']))
return results
def punch2reader(rh, userid, fileLoc, spoolClass):
"""
Punch a file to a virtual reader of the specified virtual machine.
Input:
Request Handle - for general use and to hold the results
userid - userid of the virtual machine
fileLoc - File to send
spoolClass - Spool class
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter punch2reader.punchFile")
results = {}
# Setting rc to time out rc code as default and its changed during runtime
results['rc'] = 9
# Punch to the current user intially and then change the spool class.
cmd = ["sudo", "/usr/sbin/vmur", "punch", "-r", fileLoc]
strCmd = ' '.join(cmd)
for secs in [1, 2, 3, 5, 10]:
rh.printSysLog("Invoking: " + strCmd)
try:
results['response'] = subprocess.check_output(cmd,
close_fds=True,
stderr=subprocess.STDOUT)
if isinstance(results['response'], bytes):
results['response'] = bytes.decode(results['response'])
results['rc'] = 0
rh.updateResults(results)
break
except CalledProcessError as e:
results['response'] = e.output
# Check if we have concurrent instance of vmur active
to_find = "A concurrent instance of vmur is already active"
to_find = to_find.encode()
if results['response'].find(to_find) == -1:
# Failure in VMUR punch update the rc
results['rc'] = 7
break
else:
# if concurrent vmur is active try after sometime
rh.printSysLog("Punch in use. Retrying after " +
str(secs) + " seconds")
time.sleep(secs)
except Exception as e:
# All other exceptions.
rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd,
type(e).__name__, str(e)))
results = msgs.msg['0421'][0]
rh.updateResults(results)
if results['rc'] == 7:
# Failure while issuing vmur command (For eg: invalid file given)
msg = msgs.msg['0401'][1] % (modId, fileLoc, userid,
results['response'])
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0401'][0])
elif results['rc'] == 9:
# Failure due to vmur timeout
msg = msgs.msg['0406'][1] % (modId, fileLoc)
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0406'][0])
if rh.results['overallRC'] == 0:
# On VMUR success change the class of the spool file
spoolId = re.findall(r'\d+', str(results['response']))
cmd = ["sudo", "vmcp", "change", "rdr", str(spoolId[0]), "class",
spoolClass]
strCmd = " ".join(cmd)
rh.printSysLog("Invoking: " + strCmd)
try:
results['response'] = subprocess.check_output(cmd,
close_fds=True,
stderr=subprocess.STDOUT)
if isinstance(results['response'], bytes):
results['response'] = bytes.decode(results['response'])
rh.updateResults(results)
except CalledProcessError as e:
msg = msgs.msg['0404'][1] % (modId,
spoolClass,
e.output)
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0404'][0])
# Class change failed
# Delete the punched file from current userid
cmd = ["sudo", "vmcp", "purge", "rdr", spoolId[0]]
strCmd = " ".join(cmd)
rh.printSysLog("Invoking: " + strCmd)
try:
results['response'] = subprocess.check_output(cmd,
close_fds=True,
stderr=subprocess.STDOUT)
if isinstance(results['response'], bytes):
results['response'] = bytes.decode(results['response'])
# We only need to issue the printLn.
# Don't need to change return/reason code values
except CalledProcessError as e:
msg = msgs.msg['0403'][1] % (modId,
spoolId[0],
e.output)
rh.printLn("ES", msg)
except Exception as e:
# All other exceptions related to purge.
# We only need to issue the printLn.
# Don't need to change return/reason code values
rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd,
type(e).__name__, str(e)))
except Exception as e:
# All other exceptions related to change rdr.
results = msgs.msg['0421'][0]
rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd,
type(e).__name__, str(e)))
rh.updateResults(msgs.msg['0421'][0])
if rh.results['overallRC'] == 0:
# Transfer the file from current user to specified user
cmd = ["sudo", "vmcp", "transfer", "*", "rdr", str(spoolId[0]), "to",
userid, "rdr"]
strCmd = " ".join(cmd)
rh.printSysLog("Invoking: " + strCmd)
try:
results['response'] = subprocess.check_output(cmd,
close_fds=True,
stderr=subprocess.STDOUT)
if isinstance(results['response'], bytes):
results['response'] = bytes.decode(results['response'])
rh.updateResults(results)
except CalledProcessError as e:
msg = msgs.msg['0424'][1] % (modId,
fileLoc,
userid, e.output)
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0424'][0])
# Transfer failed so delete the punched file from current userid
cmd = ["sudo", "vmcp", "purge", "rdr", spoolId[0]]
strCmd = " ".join(cmd)
rh.printSysLog("Invoking: " + strCmd)
try:
results['response'] = subprocess.check_output(cmd,
close_fds=True,
stderr=subprocess.STDOUT)
if isinstance(results['response'], bytes):
results['response'] = bytes.decode(results['response'])
# We only need to issue the printLn.
# Don't need to change return/reason code values
except CalledProcessError as e:
msg = msgs.msg['0403'][1] % (modId,
spoolId[0],
e.output)
rh.printLn("ES", msg)
except Exception as e:
# All other exceptions related to purge.
rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd,
type(e).__name__, str(e)))
except Exception as e:
# All other exceptions related to transfer.
results = msgs.msg['0421'][0]
rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd,
type(e).__name__, str(e)))
rh.updateResults(msgs.msg['0421'][0])
rh.printSysLog("Exit vmUtils.punch2reader, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def waitForOSState(rh, userid, desiredState, maxQueries=90, sleepSecs=5):
"""
Wait for the virtual OS to go into the indicated state.
Input:
Request Handle
userid whose state is to be monitored
Desired state, 'up' or 'down', case sensitive
Maximum attempts to wait for desired state before giving up
Sleep duration between waits
Output:
Dictionary containing the following:
overallRC - overall return code, 0: success, non-zero: failure
rc - RC returned from execCmdThruIUCV if overallRC = 0.
rs - RS returned from execCmdThruIUCV if overallRC = 0.
errno - Errno returned from execCmdThruIUCV if overallRC = 0.
response - Updated with an error message if wait times out.
Note:
"""
rh.printSysLog("Enter vmUtils.waitForOSState, userid: " + userid +
" state: " + desiredState +
" maxWait: " + str(maxQueries) +
" sleepSecs: " + str(sleepSecs))
results = {}
strCmd = "echo 'ping'"
stateFnd = False
for i in range(1, maxQueries + 1):
results = execCmdThruIUCV(rh, rh.userid, strCmd)
if results['overallRC'] == 0:
if desiredState == 'up':
stateFnd = True
break
else:
if desiredState == 'down':
stateFnd = True
break
if i < maxQueries:
time.sleep(sleepSecs)
if stateFnd is True:
results = {
'overallRC': 0,
'rc': 0,
'rs': 0,
}
else:
maxWait = maxQueries * sleepSecs
rh.printLn("ES", msgs.msg['0413'][1] % (modId, userid,
desiredState, maxWait))
results = msgs.msg['0413'][0]
rh.printSysLog("Exit vmUtils.waitForOSState, rc: " +
str(results['overallRC']))
return results
def waitForVMState(rh, userid, desiredState, maxQueries=90, sleepSecs=5):
"""
Wait for the virtual machine to go into the indicated state.
Input:
Request Handle
userid whose state is to be monitored
Desired state, 'on' or 'off', case sensitive
Maximum attempts to wait for desired state before giving up
Sleep duration between waits
Output:
Dictionary containing the following:
overallRC - overall return code, 0: success, non-zero: failure
rc - RC returned from SMCLI if overallRC = 0.
rs - RS returned from SMCLI if overallRC = 0.
Note:
"""
rh.printSysLog("Enter vmUtils.waitForVMState, userid: " + userid +
" state: " + desiredState +
" maxWait: " + str(maxQueries) +
" sleepSecs: " + str(sleepSecs))
results = {}
maxQueries = int(maxQueries)
cmd = ["sudo", "/sbin/vmcp", "query", "user", userid]
strCmd = " ".join(cmd)
stateFnd = False
for i in range(1, maxQueries + 1):
rh.printSysLog("Invoking: " + strCmd)
try:
out = subprocess.check_output(
cmd,
close_fds=True,
stderr=subprocess.STDOUT)
if isinstance(out, bytes):
out = bytes.decode(out)
rh.printSysLog("Query user output: " + out)
if desiredState == 'on':
stateFnd = True
break
except CalledProcessError as e:
out = e.output
if isinstance(out, bytes):
out = bytes.decode(out)
rh.printSysLog("Query user output: " + out)
match = re.search('(^HCP\w\w\w045E|^HCP\w\w\w361E)', out)
if match:
# Logged off
if desiredState == 'off':
stateFnd = True
break
else:
# Abnormal failure
rh.printLn("ES", msgs.msg['0415'][1] % (modId, strCmd,
e.returncode, out))
results = msgs.msg['0415'][0]
results['rs'] = e.returncode
break
except Exception as e:
# All other exceptions.
rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd,
type(e).__name__, str(e)))
results = msgs.msg['0421'][0]
if i < maxQueries:
# Sleep a bit before looping.
time.sleep(sleepSecs)
if stateFnd is True:
results = {
'overallRC': 0,
'rc': 0,
'rs': 0,
}
else:
maxWait = maxQueries * sleepSecs
rh.printLn("ES", msgs.msg['0414'][1] % (modId, userid,
desiredState, maxWait))
results = msgs.msg['0414'][0]
rh.printSysLog("Exit vmUtils.waitForVMState, rc: " +
str(results['overallRC']))
return results
def purgeReader(rh):
"""
Purge reader of the specified userid.
Input:
Request Handle
Output:
Dictionary containing the following:
overallRC - overall return code, 0: success, non-zero: failure
rc - RC returned from SMCLI if overallRC = 0.
rs - RS returned from SMCLI if overallRC = 0.
errno - Errno returned from SMCLI if overallRC = 0.
response - Updated with an error message.
Note:
"""
rh.printSysLog("Enter vmUtils.purgeRDR, userid: " + rh.userid)
results = {'overallRC': 0,
'rc': 0,
'rs': 0,
'response': []}
parms = ['-T', rh.userid, '-k', 'spoolids=all']
results = invokeSMCLI(rh, "System_RDR_File_Manage", parms)
if results['overallRC'] != 0:
rh.printLn("ES", results['response'])
rh.updateResults(results)
rh.printSysLog("Exit vmUtils.purgeReader, rc: " +
str(results['overallRC']))
return results | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/smtLayer/vmUtils.py | vmUtils.py |
import logging
import logging.handlers
import shlex
from six import string_types
from smtLayer import changeVM
from smtLayer import cmdVM
from smtLayer import deleteVM
from smtLayer import getHost
from smtLayer import getVM
from smtLayer import makeVM
from smtLayer import migrateVM
from smtLayer import msgs
from smtLayer import smapi
from smtLayer import powerVM
from zvmsdk import log as zvmsdklog
modId = "RQH"
version = '1.0.0' # Version of this script
class ReqHandle(object):
"""
Systems Management Ultra Thin Layer Request Handle.
This class contains all information related to a specific request.
All functions are passed this request handle.
"""
funcHandler = {
'CHANGEVM': [
lambda rh: changeVM.showInvLines(rh),
lambda rh: changeVM.showOperandLines(rh),
lambda rh: changeVM.parseCmdline(rh),
lambda rh: changeVM.doIt(rh)],
'CMDVM': [
lambda rh: cmdVM.showInvLines(rh),
lambda rh: cmdVM.showOperandLines(rh),
lambda rh: cmdVM.parseCmdline(rh),
lambda rh: cmdVM.doIt(rh)],
'DELETEVM': [
lambda rh: deleteVM.showInvLines(rh),
lambda rh: deleteVM.showOperandLines(rh),
lambda rh: deleteVM.parseCmdline(rh),
lambda rh: deleteVM.doIt(rh)],
'GETHOST': [
lambda rh: getHost.showInvLines(rh),
lambda rh: getHost.showOperandLines(rh),
lambda rh: getHost.parseCmdline(rh),
lambda rh: getHost.doIt(rh)],
'GETVM': [
lambda rh: getVM.showInvLines(rh),
lambda rh: getVM.showOperandLines(rh),
lambda rh: getVM.parseCmdline(rh),
lambda rh: getVM.doIt(rh)],
'MAKEVM': [
lambda rh: makeVM.showInvLines(rh),
lambda rh: makeVM.showOperandLines(rh),
lambda rh: makeVM.parseCmdline(rh),
lambda rh: makeVM.doIt(rh)],
'MIGRATEVM': [
lambda rh: migrateVM.showInvLines(rh),
lambda rh: migrateVM.showOperandLines(rh),
lambda rh: migrateVM.parseCmdline(rh),
lambda rh: migrateVM.doIt(rh)],
'POWERVM': [
lambda rh: powerVM.showInvLines(rh),
lambda rh: powerVM.showOperandLines(rh),
lambda rh: powerVM.parseCmdline(rh),
lambda rh: powerVM.doIt(rh)],
'SMAPI': [
lambda rh: smapi.showInvLines(rh),
lambda rh: smapi.showOperandLines(rh),
lambda rh: smapi.parseCmdline(rh),
lambda rh: smapi.doIt(rh)],
}
def __init__(self, **kwArgs):
"""
Constructor
Input:
captureLogs=<True|False>
Enables or disables log capture for all requests.
cmdName=<cmdName>
Name of the command that is using ReqHandle.
This is only used for the function help.
It defaults to "smtCmd.py".
requestId=requestId
Optional request Id
smt=<smtDaemon>
SMT daemon, it it exists.
"""
self.results = {
'overallRC': 0, # Overall return code for the function, e.g.
# 0 - Everything went ok
# 2 - Something in the IUCVCLNT failed
# 3 - Something in a local vmcp failed
# 4 - Input validation error
# 5 - Miscellaneous processing error
# 8 - SMCLI - SMAPI failure
# 24 - SMCLI - Parsing failure
# 25 - SMCLI - Internal Processing Error
# 99 - Unexpected failure
'rc': 0, # Return code causing the return
'rs': 0, # Reason code causing the return
'errno': 0, # Errno value causing the return
'strError': '', # Error as a string value.
# Normally, this is the errno description.
'response': [], # Response strings
'logEntries': [], # Syslog entries related to this request
}
if 'smt' in kwArgs.keys():
self.daemon = kwArgs['smt'] # SMT Daemon
# Actual SysLog handling is done in SMT.
else:
self.daemon = ''
# Set up SysLog handling to be done by ReqHandle
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
self.handler = logging.handlers.SysLogHandler(address = '/dev/log')
self.formatter = (
logging.Formatter('%(module)s.%(funcName)s: %(message)s'))
self.handler.setFormatter(self.formatter)
self.logger.addHandler(self.handler)
if 'cmdName' in kwArgs.keys():
self.cmdName = kwArgs['cmdName']
else:
self.cmdName = 'smtCmd.py'
if 'requestId' in kwArgs.keys():
self.requestId = kwArgs['requestId']
else:
self.requestId = 'REQ_' + hex(id(self))[2:]
# <todo> Need to generate a default request Id
self.function = '' # Function being processed
self.subfunction = '' # Subfunction be processed (optional)
self.userid = '' # Target userid
self.parms = {} # Dictionary of additional parms
self.argPos = 0 # Prep to parse first command line arg
# Capture & return Syslog entries
if 'captureLogs' in kwArgs.keys():
self.captureLogs = kwArgs['captureLogs']
else:
self.captureLogs = False
def driveFunction(self):
"""
Drive the function/subfunction call.
Input:
Self with request filled in.
Output:
Request Handle updated with the results.
Overall return code - 0: successful, non-zero: error
"""
if self.function == 'HELP':
# General help for all functions.
self.printLn("N", "")
self.printLn("N", "Usage:")
self.printLn("N", " python " + self.cmdName + " --help")
for key in sorted(ReqHandle.funcHandler):
ReqHandle.funcHandler[key][0](self)
self.printLn("N", "")
self.printLn("N", "Operand(s):")
for key in sorted(ReqHandle.funcHandler):
ReqHandle.funcHandler[key][1](self)
self.printLn("N", "")
self.updateResults({}, reset=1)
elif self.function == 'VERSION':
# Version of ReqHandle.
self.printLn("N", "Version: " + version)
self.updateResults({}, reset=1)
else:
# Some type of function/subfunction invocation.
if self.function in self.funcHandler:
# Invoke the functions doIt routine to route to the
# appropriate subfunction.
self.funcHandler[self.function][3](self)
else:
# Unrecognized function
msg = msgs.msg['0007'][1] % (modId, self.function)
self.printLn("ES", msg)
self.updateResults(msgs.msg['0007'][0])
return self.results
def parseCmdline(self, requestData):
"""
Parse the request command string.
Input:
Self with request filled in.
Output:
Request Handle updated with the parsed information so that
it is accessible via key/value pairs for later processing.
Return code - 0: successful, non-zero: error
"""
self.printSysLog("Enter ReqHandle.parseCmdline")
# Save the request data based on the type of operand.
if isinstance(requestData, list):
self.requestString = ' '.join(requestData) # Request as a string
self.request = requestData # Request as a list
elif isinstance(requestData, string_types):
self.requestString = requestData # Request as a string
self.request = shlex.split(requestData) # Request as a list
else:
# Request data type is not supported.
msg = msgs.msg['0012'][1] % (modId, type(requestData))
self.printLn("ES", msg)
self.updateResults(msgs.msg['0012'][0])
return self.results
self.totalParms = len(self.request) # Number of parms in the cmd
# Handle the request, parse it or return an error.
if self.totalParms == 0:
# Too few arguments.
msg = msgs.msg['0009'][1] % modId
self.printLn("ES", msg)
self.updateResults(msgs.msg['0009'][0])
elif self.totalParms == 1:
self.function = self.request[0].upper()
if self.function == 'HELP' or self.function == 'VERSION':
pass
else:
# Function is not HELP or VERSION.
msg = msgs.msg['0008'][1] % (modId, self.function)
self.printLn("ES", msg)
self.updateResults(msgs.msg['0008'][0])
else:
# Process based on the function operand.
self.function = self.request[0].upper()
if self.request[0] == 'HELP' or self.request[0] == 'VERSION':
pass
else:
# Handle the function related parms by calling the function
# parser.
if self.function in ReqHandle.funcHandler:
self.funcHandler[self.function][2](self)
else:
# Unrecognized function
msg = msgs.msg['0007'][1] % (modId, self.function)
self.printLn("ES", msg)
self.updateResults(msgs.msg['0007'][0])
self.printSysLog("Exit ReqHandle.parseCmdline, rc: " +
str(self.results['overallRC']))
return self.results
def printLn(self, respType, respString):
"""
Add one or lines of output to the response list.
Input:
Response type: One or more characters indicate type of response.
E - Error message
N - Normal message
S - Output should be logged
W - Warning message
"""
if 'E' in respType:
respString = '(Error) ' + respString
if 'W' in respType:
respString = '(Warning) ' + respString
if 'S' in respType:
self.printSysLog(respString)
self.results['response'] = (self.results['response'] +
respString.splitlines())
return
def printSysLog(self, logString):
"""
Log one or more lines. Optionally, add them to logEntries list.
Input:
Strings to be logged.
"""
if self.daemon:
self.daemon.logger.debug(self.requestId + ": " + logString)
elif zvmsdklog.LOGGER.getloglevel() <= logging.DEBUG:
# print log only when debug is enabled
if self.daemon == '':
self.logger.debug(self.requestId + ": " + logString)
if self.captureLogs is True:
self.results['logEntries'].append(self.requestId + ": " +
logString)
return
def updateResults(self, newResults, **kwArgs):
"""
Update the results related to this request excluding the 'response'
and 'logEntries' values.
We specifically update (if present):
overallRC, rc, rs, errno.
Input:
Dictionary containing the results to be updated or an empty
dictionary the reset keyword was specified.
Reset keyword:
0 - Not a reset. This is the default is reset keyword was not
specified.
1 - Reset failure related items in the result dictionary.
This exclude responses and log entries.
2 - Reset all result items in the result dictionary.
Output:
Request handle is updated with the results.
"""
if 'reset' in kwArgs.keys():
reset = kwArgs['reset']
else:
reset = 0
if reset == 0:
# Not a reset. Set the keys from the provided dictionary.
for key in newResults.keys():
if key == 'response' or key == 'logEntries':
continue
self.results[key] = newResults[key]
elif reset == 1:
# Reset all failure related items.
self.results['overallRC'] = 0
self.results['rc'] = 0
self.results['rs'] = 0
self.results['errno'] = 0
self.results['strError'] = ''
elif reset == 2:
# Reset all results information including any responses and
# log entries.
self.results['overallRC'] = 0
self.results['rc'] = 0
self.results['rs'] = 0
self.results['errno'] = 0
self.results['strError'] = ''
self.results['logEntries'] = ''
self.results['response'] = ''
return | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/smtLayer/ReqHandle.py | ReqHandle.py |
from smtLayer import generalUtils
from smtLayer import msgs
from smtLayer.vmUtils import execCmdThruIUCV
modId = 'CMD'
version = "1.0.0"
"""
List of subfunction handlers.
Each subfunction contains a list that has:
Readable name of the routine that handles the subfunction,
Code for the function call.
"""
subfuncHandler = {
'CMD': ['invokeCmd', lambda rh: invokeCmd(rh)],
'HELP': ['help', lambda rh: help(rh)],
'VERSION': ['getVersion', lambda rh: getVersion(rh)],
}
"""
List of positional operands based on subfunction.
Each subfunction contains a list which has a dictionary with the following
information for the positional operands:
- Human readable name of the operand,
- Property in the parms dictionary to hold the value,
- Is it required (True) or optional (False),
- Type of data (1: int, 2: string).
"""
posOpsList = {
'CMD': [
['Command to send', 'cmd', True, 2],
['Timeout value', 'timeout', False, 1],
],
}
"""
List of additional operands/options supported by the various subfunctions.
The dictionary followng the subfunction name uses the keyword from the command
as a key. Each keyword has a dictionary that lists:
- the related parms item that stores the value,
- how many values follow the keyword, and
- the type of data for those values (1: int, 2: string)
"""
keyOpsList = {
'CMD': {
'--showparms': ['showParms', 0, 0],
},
}
def doIt(rh):
"""
Perform the requested function by invoking the subfunction handler.
Input:
Request Handle
Output:
Request Handle updated with parsed input.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter cmdVM.doIt")
# Show the invocation parameters, if requested.
if 'showParms' in rh.parms and rh.parms['showParms'] is True:
rh.printLn("N", "Invocation parameters: ")
rh.printLn("N", " Routine: cmdVM." +
str(subfuncHandler[rh.subfunction][0]) + "(reqHandle)")
rh.printLn("N", " function: " + rh.function)
rh.printLn("N", " userid: " + rh.userid)
rh.printLn("N", " subfunction: " + rh.subfunction)
rh.printLn("N", " parms{}: ")
for key in rh.parms:
if key != 'showParms':
rh.printLn("N", " " + key + ": " + str(rh.parms[key]))
rh.printLn("N", " ")
# Call the subfunction handler
subfuncHandler[rh.subfunction][1](rh)
rh.printSysLog("Exit cmdVM.doIt, rc: " + str(rh.results['overallRC']))
return rh.results['overallRC']
def getVersion(rh):
"""
Get the version of this function.
Input:
Request Handle
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printLn("N", "Version: " + version)
return 0
def help(rh):
"""
Produce help output specifically for CmdVM functions.
Input:
Request Handle
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
showInvLines(rh)
showOperandLines(rh)
return 0
def invokeCmd(rh):
"""
Invoke the command in the virtual machine's operating system.
Input:
Request Handle with the following properties:
function - 'CMDVM'
subfunction - 'CMD'
userid - userid of the virtual machine
parms['cmd'] - Command to send
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter cmdVM.invokeCmd, userid: " + rh.userid)
timeout = rh.parms.get('timeout', None)
results = execCmdThruIUCV(rh, rh.userid, rh.parms['cmd'],
timeout=timeout)
if results['overallRC'] == 0:
rh.printLn("N", results['response'])
else:
rh.printLn("ES", results['response'])
rh.updateResults(results)
rh.printSysLog("Exit cmdVM.invokeCmd, rc: " + str(results['overallRC']))
return results['overallRC']
def parseCmdline(rh):
"""
Parse the request command input.
Input:
Request Handle
Output:
Request Handle updated with parsed input.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter cmdVM.parseCmdline")
if rh.totalParms >= 2:
rh.userid = rh.request[1].upper()
else:
# Userid is missing.
msg = msgs.msg['0010'][1] % modId
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0010'][0])
rh.printSysLog("Exit cmdVM.parseCmdLine, rc: " +
rh.results['overallRC'])
return rh.results['overallRC']
if rh.totalParms == 2:
rh.subfunction = rh.userid
rh.userid = ''
if rh.totalParms >= 3:
rh.subfunction = rh.request[2].upper()
# Verify the subfunction is valid.
if rh.subfunction not in subfuncHandler:
# Subfunction is missing.
subList = ', '.join(sorted(subfuncHandler.keys()))
msg = msgs.msg['0011'][1] % (modId, subList)
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0011'][0])
# Parse the rest of the command line.
if rh.results['overallRC'] == 0:
rh.argPos = 3 # Begin Parsing at 4th operand
generalUtils.parseCmdline(rh, posOpsList, keyOpsList)
rh.printSysLog("Exit cmdVM.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def showInvLines(rh):
"""
Produce help output related to command synopsis
Input:
Request Handle
"""
if rh.subfunction != '':
rh.printLn("N", "Usage:")
rh.printLn("N", " python " + rh.cmdName +
" CmdVM <userid> cmd <cmdToSend>")
rh.printLn("N", " python " + rh.cmdName +
" CmdVM help")
rh.printLn("N", " python " + rh.cmdName +
" CmdVM version")
return
def showOperandLines(rh):
"""
Produce help output related to operands.
Input:
Request Handle
"""
if rh.function == 'HELP':
rh.printLn("N", " For the CmdVM function:")
else:
rh.printLn("N", "Sub-Functions(s):")
rh.printLn("N", " cmd - " +
"Send a command to a virtual machine's operating system.")
rh.printLn("N", " help - " +
"Displays this help information.")
rh.printLn("N", " version - " +
"show the version of the power function")
if rh.subfunction != '':
rh.printLn("N", "Operand(s):")
rh.printLn("N", " <userid> - " +
"Userid of the target virtual machine")
rh.printLn("N", " <cmdToSend> - " +
"Command to send to the virtual machine's OS.")
return | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/smtLayer/cmdVM.py | cmdVM.py |
from smtLayer import generalUtils
from smtLayer import msgs
from smtLayer.vmUtils import invokeSMCLI
modId = 'SMP'
version = "1.0.0"
"""
List of subfunction handlers.
Each subfunction contains a list that has:
Readable name of the routine that handles the subfunction,
Code for the function call.
"""
subfuncHandler = {
'API': ['invokeSmapiApi', lambda rh: invokeSmapiApi(rh)],
'HELP': ['help', lambda rh: help(rh)],
'VERSION': ['getVersion', lambda rh: getVersion(rh)]}
"""
List of positional operands based on subfunction.
Each subfunction contains a list which has a dictionary with the following
information for the positional operands:
- Human readable name of the operand,
- Property in the parms dictionary to hold the value,
- Is it required (True) or optional (False),
- Type of data (1: int, 2: string).
"""
posOpsList = {
'API': [
['API Name', 'apiName', True, 2]]
}
"""
List of additional operands/options supported by the various subfunctions.
The dictionary following the subfunction name uses the keyword from the
command as a key.
Each keyword has a dictionary that lists:
- the related parms item that stores the value,
- how many values follow the keyword, and
- the type of data for those values (1: int, 2: string)
"""
keyOpsList = {
'API': {
'--operands': ['operands', -1, 2],
'--showparms': ['showParms', 0, 0]}
}
def doIt(rh):
"""
Perform the requested function by invoking the subfunction handler.
Input:
Request Handle
Output:
Request Handle updated with parsed input.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter smapi.doIt")
# Show the invocation parameters, if requested.
if 'showParms' in rh.parms and rh.parms['showParms']:
rh.printLn("N", "Invocation parameters: ")
rh.printLn("N", " Routine: smapi." +
str(subfuncHandler[rh.subfunction][0]) + "(reqHandle)")
rh.printLn("N", " function: " + rh.function)
rh.printLn("N", " userid: " + rh.userid)
rh.printLn("N", " subfunction: " + rh.subfunction)
rh.printLn("N", " parms{}: ")
for key in rh.parms:
if key != 'showParms':
rh.printLn("N", " " + key + ": " + str(rh.parms[key]))
rh.printLn("N", " ")
# Call the subfunction handler
subfuncHandler[rh.subfunction][1](rh)
rh.printSysLog("Exit smapi.doIt, rc: " + str(rh.results['overallRC']))
return rh.results['overallRC']
def getVersion(rh):
"""
Get the version of this function.
Input:
Request Handle
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printLn("N", "Version: " + version)
return 0
def help(rh):
"""
Produce help output specifically for SMAPI functions.
Input:
Request Handle
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
showInvLines(rh)
showOperandLines(rh)
return 0
def invokeSmapiApi(rh):
"""
Invoke a SMAPI API.
Input:
Request Handle with the following properties:
function - 'SMAPI'
subfunction - 'API'
userid - 'HYPERVISOR'
parms['apiName'] - Name of API as defined by SMCLI
parms['operands'] - List (array) of operands to send or
an empty list.
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter smapi.invokeSmapiApi")
if rh.userid != 'HYPERVISOR':
userid = rh.userid
else:
userid = 'dummy'
parms = ["-T", userid]
if 'operands' in rh.parms:
parms.extend(rh.parms['operands'])
# SSI_Query does not need any param
if rh.parms['apiName'] == 'SSI_Query':
parms = []
results = invokeSMCLI(rh, rh.parms['apiName'], parms)
if results['overallRC'] == 0:
rh.printLn("N", results['response'])
else:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
rh.printSysLog("Exit smapi.invokeCmd, rc: " + str(rh.results['overallRC']))
return rh.results['overallRC']
def parseCmdline(rh):
"""
Parse the request command input.
Input:
Request Handle
Output:
Request Handle updated with parsed input.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter smapi.parseCmdline")
if rh.totalParms >= 2:
rh.userid = rh.request[1].upper()
else:
# Userid is missing.
msg = msgs.msg['0010'][1] % modId
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0010'][0])
rh.printSysLog("Exit smapi.parseCmdLine, rc: " +
rh.results['overallRC'])
return rh.results['overallRC']
if rh.totalParms == 2:
rh.subfunction = rh.userid
rh.userid = ''
if rh.totalParms >= 3:
rh.subfunction = rh.request[2].upper()
# Verify the subfunction is valid.
if rh.subfunction not in subfuncHandler:
# Subfunction is missing.
subList = ', '.join(sorted(subfuncHandler.keys()))
msg = msgs.msg['0011'][1] % (modId, subList)
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0011'][0])
# Parse the rest of the command line.
if rh.results['overallRC'] == 0:
rh.argPos = 3 # Begin Parsing at 4th operand
generalUtils.parseCmdline(rh, posOpsList, keyOpsList)
rh.printSysLog("Exit smapi.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def showInvLines(rh):
"""
Produce help output related to command synopsis
Input:
Request Handle
"""
if rh.subfunction != '':
rh.printLn("N", "Usage:")
rh.printLn("N", " python " + rh.cmdName + " SMAPI <userid> " +
"api <apiName> [--operands <apiOperands>]")
rh.printLn("N", " python " + rh.cmdName + " SMAPI help")
rh.printLn("N", " python " + rh.cmdName + " SMAPI version")
return
def showOperandLines(rh):
"""
Produce help output related to operands.
Input:
Request Handle
"""
if rh.function == 'HELP':
rh.printLn("N", " For the " + rh.function + " function:")
else:
rh.printLn("N", "Sub-Functions(s):")
rh.printLn("N", " api - Invoke a SMAPI API.")
rh.printLn("N", " help - Displays this help information.")
rh.printLn("N", " version - " +
"show the version of the power function")
if rh.subfunction != '':
rh.printLn("N", "Operand(s):")
rh.printLn("N", " <userid> - " +
"Userid of the target virtual machine")
rh.printLn("N", " <apiName> - Name of the API to invoke")
rh.printLn("N", " --operands <apiOperands> - Additional API operands")
return | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/smtLayer/smapi.py | smapi.py |
from smtLayer import generalUtils
from smtLayer import msgs
from smtLayer.vmUtils import invokeSMCLI, isLoggedOn, purgeReader
modId = "DVM"
version = "1.0.0"
# List of subfunction handlers.
# Each subfunction contains a list that has:
# Readable name of the routine that handles the subfunction,
# Code for the function call.
subfuncHandler = {
'DIRECTORY': ['deleteMachine', lambda rh: deleteMachine(rh)],
'HELP': ['help', lambda rh: help(rh)],
'VERSION': ['getVersion', lambda rh: getVersion(rh)],
}
"""
List of positional operands based on subfunction.
Each subfunction contains a list which has a dictionary with the following
information for the positional operands:
- Human readable name of the operand,
- Property in the parms dictionary to hold the value,
- Is it required (True) or optional (False),
- Type of data (1: int, 2: string).
"""
posOpsList = {}
"""
List of additional operands/options supported by the various subfunctions.
The dictionary followng the subfunction name uses the keyword from the
command as a key. Each keyword has a dictionary that lists:
- the related parms item that stores the value,
- how many values follow the keyword, and
- the type of data for those values (1: int, 2: string)
"""
keyOpsList = {
'DIRECTORY': {'--showparms': ['showParms', 0, 0]},
'HELP': {},
'VERSION': {},
}
def deleteMachine(rh):
"""
Delete a virtual machine from the user directory.
Input:
Request Handle with the following properties:
function - 'DELETEVM'
subfunction - 'DIRECTORY'
userid - userid of the virtual machine to be deleted.
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter deleteVM.deleteMachine")
results = {'overallRC': 0, 'rc': 0, 'rs': 0}
# Is image logged on ?
state = 'on' # Assume 'on'.
results = isLoggedOn(rh, rh.userid)
if results['overallRC'] != 0:
# Cannot determine the log on/off state.
# Message already included. Act as if it is 'on'.
pass
elif results['rs'] == 0:
# State is powered on.
pass
else:
state = 'off'
# Reset values for rest of subfunction
results['overallRC'] = 0
results['rc'] = 0
results['rs'] = 0
if state == 'on':
parms = ["-T", rh.userid, "-f IMMED"]
results = invokeSMCLI(rh, "Image_Deactivate", parms)
if results['overallRC'] == 0:
pass
elif (results['overallRC'] == 8 and results['rc'] == 200 and
(results['rs'] == 12 or results['rs'] == 16)):
# Tolerable error. Machine is already in or going into the state
# that we want it to enter.
rh.updateResults({}, reset=1)
else:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results returned by invokeSMCLI
# Clean up the reader before delete
if results['overallRC'] == 0:
result = purgeReader(rh)
if result['overallRC'] != 0:
# Tolerable the purge failure error
rh.updateResults({}, reset=1)
if results['overallRC'] == 0:
parms = ["-T", rh.userid, "-e", "0"]
results = invokeSMCLI(rh, "Image_Delete_DM", parms)
if results['overallRC'] != 0:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results returned by invokeSMCLI
rh.printSysLog("Exit deleteVM.deleteMachine, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def doIt(rh):
"""
Perform the requested function by invoking the subfunction handler.
Input:
Request Handle
Output:
Request Handle updated with parsed input.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter deleteVM.doIt")
# Show the invocation parameters, if requested.
if 'showParms' in rh.parms and rh.parms['showParms'] is True:
rh.printLn("N", "Invocation parameters: ")
rh.printLn("N", " Routine: deleteVM." +
str(subfuncHandler[rh.subfunction][0]) + "(reqHandle)")
rh.printLn("N", " function: " + rh.function)
rh.printLn("N", " userid: " + rh.userid)
rh.printLn("N", " subfunction: " + rh.subfunction)
rh.printLn("N", " parms{}: ")
for key in rh.parms:
if key != 'showParms':
rh.printLn("N", " " + key + ": " +
str(rh.parms[key]))
rh.printLn("N", " ")
# Call the subfunction handler
subfuncHandler[rh.subfunction][1](rh)
rh.printSysLog("Exit deleteVM.doIt, rc: " + str(rh.results['overallRC']))
return rh.results['overallRC']
def getVersion(rh):
"""
Get the version of this function.
Input:
Request Handle
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printLn("N", "Version: " + version)
return 0
def help(rh):
"""
Produce help output specifically for DeleteVM functions.
Input:
Request Handle
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
showInvLines(rh)
showOperandLines(rh)
return 0
def parseCmdline(rh):
"""
Parse the request command input.
Input:
Request Handle
Output:
Request Handle updated with parsed input.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter deleteVM.parseCmdline")
if rh.totalParms >= 2:
rh.userid = rh.request[1].upper()
else:
# Userid is missing.
msg = msgs.msg['0010'][1] % modId
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0010'][0])
rh.printSysLog("Exit deleteVM.parseCmdLine, rc: " +
rh.results['overallRC'])
return rh.results['overallRC']
if rh.totalParms == 2:
rh.subfunction = rh.userid
rh.userid = ''
if rh.totalParms >= 3:
rh.subfunction = rh.request[2].upper()
# Verify the subfunction is valid.
if rh.subfunction not in subfuncHandler:
# Subfunction is missing.
subList = ', '.join(sorted(subfuncHandler.keys()))
msg = msgs.msg['0011'][1] % (modId, subList)
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0011'][0])
# Parse the rest of the command line.
if rh.results['overallRC'] == 0:
rh.argPos = 3 # Begin Parsing at 4th operand
generalUtils.parseCmdline(rh, posOpsList, keyOpsList)
rh.printSysLog("Exit deleteVM.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def showInvLines(rh):
"""
Produce help output related to command synopsis
Input:
Request Handle
"""
if rh.subfunction != '':
rh.printLn("N", "Usage:")
rh.printLn("N", " python " + rh.cmdName +
" DeleteVM <userid> directory")
rh.printLn("N", " python " + rh.cmdName + " DeleteVM help")
rh.printLn("N", " python " + rh.cmdName +
" DeleteVM version")
return
def showOperandLines(rh):
"""
Produce help output related to operands.
Input:
Request Handle
"""
if rh.function == 'HELP':
rh.printLn("N", " For the DeleteVM function:")
else:
rh.printLn("N", "Sub-Functions(s):")
rh.printLn("N", " directory - " +
"Delete a virtual machine from the user directory.")
rh.printLn("N", " help - " +
"Displays this help information.")
rh.printLn("N", " version - " +
"Show the version of the power function")
if rh.subfunction != '':
rh.printLn("N", "Operand(s):")
rh.printLn("N", " <userid> - " +
"Userid of the target virtual machine")
return | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/smtLayer/deleteVM.py | deleteVM.py |
import re
import subprocess
from smtLayer import generalUtils
from smtLayer import msgs
from smtLayer.vmUtils import execCmdThruIUCV, getPerfInfo, invokeSMCLI
from smtLayer.vmUtils import isLoggedOn
modId = 'GVM'
version = "1.0.0"
"""
List of subfunction handlers.
Each subfunction contains a list that has:
Readable name of the routine that handles the subfunction,
Code for the function call.
"""
subfuncHandler = {
'CONSOLEOUTPUT': ['getConsole', lambda rh: getConsole(rh)],
'DIRECTORY': ['getDirectory', lambda rh: getDirectory(rh)],
'ALLDIRECTORY': ['getAllDirectory', lambda rh: getAllDirectory(rh)],
'HELP': ['help', lambda rh: help(rh)],
'ISREACHABLE': ['checkIsReachable', lambda rh: checkIsReachable(rh)],
'STATUS': ['getStatus', lambda rh: getStatus(rh)],
'VERSION': ['getVersion', lambda rh: getVersion(rh)],
'FCPINFO': ['fcpinfo', lambda rh: fcpinfo(rh)],
}
"""
List of positional operands based on subfunction.
Each subfunction contains a list which has a dictionary with the following
information for the positional operands:
- Human readable name of the operand,
- Property in the parms dictionary to hold the value,
- Is it required (True) or optional (False),
- Type of data (1: int, 2: string).
"""
posOpsList = {
'FCPINFO': [
['Status filter', 'status', True, 2],
]}
"""
List of additional operands/options supported by the various subfunctions.
The dictionary followng the subfunction name uses the keyword from the command
as a key. Each keyword has a dictionary that lists:
- the related parms item that stores the value,
- how many values follow the keyword, and
- the type of data for those values (1: int, 2: string)
"""
keyOpsList = {
'CONSOLEOUTPUT': {'--showparms': ['showParms', 0, 0]},
'DIRECTORY': {'--showparms': ['showParms', 0, 0]},
'HELP': {},
'ISREACHABLE': {'--showparms': ['showParms', 0, 0]},
'STATUS': {
'--all': ['allBasic', 0, 0],
'--cpu': ['cpu', 0, 0],
'--memory': ['memory', 0, 0],
'--power': ['power', 0, 0],
'--showparms': ['showParms', 0, 0]},
'VERSION': {},
}
def checkIsReachable(rh):
"""
Check if a virtual machine is reachable.
Input:
Request Handle
Output:
Request Handle updated with the results.
overallRC - 0: determined the status, non-zero: some weird failure
while trying to execute a command
on the guest via IUCV
rc - RC returned from execCmdThruIUCV
rs - 0: not reachable, 1: reachable
"""
rh.printSysLog("Enter getVM.checkIsReachable, userid: " + rh.userid)
strCmd = "echo 'ping'"
results = execCmdThruIUCV(rh, rh.userid, strCmd)
if results['overallRC'] == 0:
rh.printLn("N", rh.userid + ": reachable")
reachable = 1
else:
# A failure from execCmdThruIUCV is acceptable way of determining
# that the system is unreachable. We won't pass along the
# error message.
rh.printLn("N", rh.userid + ": unreachable")
reachable = 0
rh.updateResults({"rs": reachable})
rh.printSysLog("Exit getVM.checkIsReachable, rc: 0")
return 0
def doIt(rh):
"""
Perform the requested function by invoking the subfunction handler.
Input:
Request Handle
Output:
Request Handle updated with parsed input.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter getVM.doIt")
# Show the invocation parameters, if requested.
if 'showParms' in rh.parms and rh.parms['showParms'] is True:
rh.printLn("N", "Invocation parameters: ")
rh.printLn("N", " Routine: getVM." +
str(subfuncHandler[rh.subfunction][0]) + "(reqHandle)")
rh.printLn("N", " function: " + rh.function)
rh.printLn("N", " userid: " + rh.userid)
rh.printLn("N", " subfunction: " + rh.subfunction)
rh.printLn("N", " parms{}: ")
for key in rh.parms:
if key != 'showParms':
rh.printLn("N", " " + key + ": " + str(rh.parms[key]))
rh.printLn("N", " ")
# Call the subfunction handler
subfuncHandler[rh.subfunction][1](rh)
rh.printSysLog("Exit getVM.doIt, rc: " + str(rh.results['overallRC']))
return rh.results['overallRC']
def getConsole(rh):
"""
Get the virtual machine's console output.
Input:
Request Handle with the following properties:
function - 'CMDVM'
subfunction - 'CMD'
userid - userid of the virtual machine
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter getVM.getConsole")
# Transfer the console to this virtual machine.
parms = ["-T", rh.userid]
results = invokeSMCLI(rh, "Image_Console_Get", parms)
if results['overallRC'] != 0:
if (results['overallRC'] == 8 and results['rc'] == 8 and
results['rs'] == 8):
# Give a more specific message. Userid is either
# not logged on or not spooling their console.
msg = msgs.msg['0409'][1] % (modId, rh.userid)
else:
msg = results['response']
rh.updateResults(results) # Use results from invokeSMCLI
rh.printLn("ES", msg)
rh.printSysLog("Exit getVM.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
# Check whether the reader is online
with open('/sys/bus/ccw/drivers/vmur/0.0.000c/online', 'r') as myfile:
out = myfile.read().replace('\n', '')
myfile.close()
# Nope, offline, error out and exit
if int(out) != 1:
msg = msgs.msg['0411'][1]
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0411'][0])
rh.printSysLog("Exit getVM.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
# We should set class to *, otherwise we will get errors like:
# vmur: Reader device class does not match spool file class
cmd = ["sudo", "/sbin/vmcp", "spool reader class *"]
strCmd = ' '.join(cmd)
rh.printSysLog("Invoking: " + strCmd)
try:
subprocess.check_output(
cmd,
close_fds=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
# If we couldn't change the class, that's not fatal
# But we want to warn about possibly incomplete
# results
msg = msgs.msg['0407'][1] % (modId, strCmd, e.output)
rh.printLn("WS", msg)
except Exception as e:
# All other exceptions.
# If we couldn't change the class, that's not fatal
# But we want to warn about possibly incomplete
# results
rh.printLn("ES", msgs.msg['0422'][1] % (modId, strCmd,
type(e).__name__, str(e)))
rh.printLn("ES", msgs.msg['0423'][1] % modId, strCmd,
type(e).__name__, str(e))
# List the spool files in the reader
cmd = ["sudo", "/usr/sbin/vmur", "list"]
strCmd = ' '.join(cmd)
rh.printSysLog("Invoking: " + strCmd)
try:
files = subprocess.check_output(
cmd,
close_fds=True,
stderr=subprocess.STDOUT)
files = bytes.decode(files)
except subprocess.CalledProcessError as e:
# Uh oh, vmur list command failed for some reason
msg = msgs.msg['0408'][1] % (modId, rh.userid,
strCmd, e.output)
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0408'][0])
rh.printSysLog("Exit getVM.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
except Exception as e:
# All other exceptions.
rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd,
type(e).__name__, str(e)))
rh.updateResults(msgs.msg['0421'][0])
rh.printSysLog("Exit getVM.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
# Now for each line that contains our user and is a
# class T console file, add the spool id to our list
spoolFiles = files.split('\n')
outstr = ""
for myfile in spoolFiles:
if (myfile != "" and
myfile.split()[0] == rh.userid and
myfile.split()[2] == "T" and
myfile.split()[3] == "CON"):
fileId = myfile.split()[1]
outstr += fileId + " "
# No files in our list
if outstr == "":
msg = msgs.msg['0410'][1] % (modId, rh.userid)
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0410'][0])
rh.printSysLog("Exit getVM.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
# Output the list
rh.printLn("N", "List of spool files containing "
"console logs from %s: %s" % (rh.userid, outstr))
rh.results['overallRC'] = 0
rh.printSysLog("Exit getVM.getConsole, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def getDirectory(rh):
"""
Get the virtual machine's directory statements.
Input:
Request Handle with the following properties:
function - 'CMDVM'
subfunction - 'CMD'
userid - userid of the virtual machine
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter getVM.getDirectory")
parms = ["-T", rh.userid]
results = invokeSMCLI(rh, "Image_Query_DM", parms)
if results['overallRC'] == 0:
results['response'] = re.sub('\*DVHOPT.*', '', results['response'])
rh.printLn("N", results['response'])
else:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
rh.printSysLog("Exit getVM.getDirectory, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def getAllDirectory(rh):
"""
Get a list of defined virtual images.
Input:
Request Handle with the following properties:
function - 'CMDVM'
subfunction - 'CMD'
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter getVM.getAllDirectory")
parms = []
results = invokeSMCLI(rh, "Image_Name_Query_DM", parms)
if results['overallRC'] == 0:
results['response'] = re.sub('\*DVHOPT.*', '', results['response'])
rh.printLn("N", results['response'])
else:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
rh.printSysLog("Exit getVM.getAllDirectory, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def getStatus(rh):
"""
Get the basic status of a virtual machine.
Input:
Request Handle with the following properties:
function - 'CMDVM'
subfunction - 'CMD'
userid - userid of the virtual machine
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter getVM.getStatus, userid: " + rh.userid)
results = isLoggedOn(rh, rh.userid)
if results['rc'] != 0:
# Uhoh, can't determine if guest is logged on or not
rh.updateResults(results)
rh.printSysLog("Exit getVM.getStatus, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
if results['rs'] == 1:
# Guest is logged off, everything is 0
powerStr = "Power state: off"
memStr = "Total Memory: 0M"
usedMemStr = "Used Memory: 0M"
procStr = "Processors: 0"
timeStr = "CPU Used Time: 0 sec"
else:
powerStr = "Power state: on"
if 'power' in rh.parms:
# Test here to see if we only need power state
# Then we can return early
rh.printLn("N", powerStr)
rh.updateResults(results)
rh.printSysLog("Exit getVM.getStatus, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
if results['rs'] != 1:
# Guest is logged on, go get more info
results = getPerfInfo(rh, rh.userid)
if results['overallRC'] != 0:
# Something went wrong in subroutine, exit
rh.updateResults(results)
rh.printSysLog("Exit getVM.getStatus, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
else:
# Everything went well, response should be good
memStr = results['response'].split("\n")[0]
usedMemStr = results['response'].split("\n")[1]
procStr = results['response'].split("\n")[2]
timeStr = results['response'].split("\n")[3]
# Build our output string according
# to what information was asked for
if 'memory' in rh.parms:
outStr = memStr + "\n" + usedMemStr
elif 'cpu' in rh.parms:
outStr = procStr + "\n" + timeStr
else:
# Default to all
outStr = powerStr + "\n" + memStr + "\n" + usedMemStr
outStr += "\n" + procStr + "\n" + timeStr
rh.printLn("N", outStr)
rh.printSysLog("Exit getVM.getStatus, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def getVersion(rh):
"""
Get the version of this function.
Input:
Request Handle
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printLn("N", "Version: " + version)
return 0
def help(rh):
"""
Produce help output specifically for GetVM functions.
Input:
Request Handle
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
showInvLines(rh)
showOperandLines(rh)
return 0
def parseCmdline(rh):
"""
Parse the request command input.
Input:
Request Handle
Output:
Request Handle updated with parsed input.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter getVM.parseCmdline")
if rh.totalParms >= 2:
rh.userid = rh.request[1].upper()
else:
# Userid is missing.
msg = msgs.msg['0010'][1] % modId
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0010'][0])
rh.printSysLog("Exit getVM.parseCmdLine, rc: " +
rh.results['overallRC'])
return rh.results['overallRC']
if rh.totalParms == 2:
rh.subfunction = rh.userid
rh.userid = ''
if rh.totalParms >= 3:
rh.subfunction = rh.request[2].upper()
# Verify the subfunction is valid.
if rh.subfunction not in subfuncHandler:
# Subfunction is missing.
subList = ', '.join(sorted(subfuncHandler.keys()))
msg = msgs.msg['0011'][1] % (modId, subList)
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0011'][0])
# Parse the rest of the command line.
if rh.results['overallRC'] == 0:
rh.argPos = 3 # Begin Parsing at 4th operand
generalUtils.parseCmdline(rh, posOpsList, keyOpsList)
rh.printSysLog("Exit getVM.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def showInvLines(rh):
"""
Produce help output related to command synopsis
Input:
Request Handle
"""
if rh.subfunction != '':
rh.printLn("N", "Usage:")
rh.printLn("N", " python " + rh.cmdName +
" GetVM <userid> [ consoleoutput | directory | isreachable ]")
rh.printLn("N", " python " + rh.cmdName +
" GetVM <userid>")
rh.printLn("N", " " +
"status [ --all | --cpu | --memory | --power ]")
rh.printLn("N", " python " + rh.cmdName + " GetVM help")
rh.printLn("N", " python " + rh.cmdName + " GetVM version")
return
def showOperandLines(rh):
"""
Produce help output related to operands.
Input:
Request Handle
"""
if rh.function == 'HELP':
rh.printLn("N", " For the GetVM function:")
else:
rh.printLn("N", "Sub-Functions(s):")
rh.printLn("N", " consoleoutput - " +
"Obtains the console log from the virtual machine.")
rh.printLn("N", " directory - " +
"Displays the user directory lines for the virtual machine.")
rh.printLn("N", " help - " +
"Displays this help information.")
rh.printLn("N", " isreachable - " +
"Determine whether the virtual OS in a virtual machine")
rh.printLn("N", " is reachable")
rh.printLn("N", " status - " +
"show the log on/off status of the virtual machine")
rh.printLn("N", " version - " +
"show the version of the power function")
if rh.subfunction != '':
rh.printLn("N", "Operand(s):")
rh.printLn("N", " <userid> - " +
"Userid of the target virtual machine")
rh.printLn("N", " [ --all | --cpu | " +
"--memory | --power ]")
rh.printLn("N", " - " +
"Returns information machine related to the number")
rh.printLn("N", " " +
"of virtual CPUs, memory size, power status or all of the")
rh.printLn("N", " information.")
return
def extract_fcp_data(rh, raw_data, status):
"""
extract data from smcli System_WWPN_Query output.
we always specify OWNER=YES.
Input:
raw data returned from smcli
Output:
data extracted would be a string like:
'FCP device number: 1B0E\n
Status: Active\n
NPIV world wide port number: C05076DE330005EA\n
Channel path ID: 27\n
Physical world wide port number:C05076DE33002E41\n
Owner: TEST0008\n
FCP device number: 1B0F\n
Status: Active\n
NPIV world wide port number: C05076DE330005EB\n
Channel path ID: 27\n
Physical world wide port number:C05076DE33002E41\n'
Owner: NONE\n
"""
raw_data = raw_data.split('\n')
# clear blank lines
data = []
for i in raw_data:
i = i.strip(' \n')
if i == '':
continue
else:
data.append(i)
# put matched data into one list of strings
results = []
lines_per_item = 6
for i in range(0, len(data), lines_per_item):
if (i + lines_per_item) > len(data):
# sometimes the SMCLI output:
#
# FCP device number: 1B0F
# Status: Active
# NPIV world wide port number: C05076DE330005EA
# Channel path ID: 27
# Physical world wide port number:
# Owner: turns
#
# which are more than 5 lines
# we still do not know the reason, but we need handle this
msg = ("extract_fcp_data interrupt because abnormal formatted "
"output %s.", data)
rh.printLn("WS", msg)
break
temp = data[i + 1].split(':')[-1].strip()
# only return results match the status
if status.lower() == "all" or temp.lower() == status.lower():
results.extend(data[i:i + lines_per_item])
return '\n'.join(results)
def fcpinfo(rh):
"""
Get fcp info and filter by the status.
Input:
Request Handle with the following properties:
function - 'GETVM'
subfunction - 'FCPINFO'
userid - userid of the virtual machine
parms['status'] - The status for filter results.
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter changeVM.dedicate")
parms = ["-T", rh.userid, "-k OWNER=YES"]
hideList = []
results = invokeSMCLI(rh,
"System_WWPN_Query",
parms,
hideInLog=hideList)
if results['overallRC'] != 0:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
if results['overallRC'] == 0:
# extract data from smcli return
ret = extract_fcp_data(rh, results['response'], rh.parms['status'])
# write the ret into results['response']
rh.printLn("N", ret)
else:
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
return rh.results['overallRC'] | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/smtLayer/getVM.py | getVM.py |
import os
from tempfile import mkstemp
from smtLayer import generalUtils
from smtLayer import msgs
from smtLayer.vmUtils import invokeSMCLI
from zvmsdk import config
from zvmsdk import utils as zvmutils
modId = 'MVM'
version = "1.0.0"
# max vidks blocks can't exceed 4194296
MAX_VDISK_BLOCKS = 4194296
"""
List of subfunction handlers.
Each subfunction contains a list that has:
Readable name of the routine that handles the subfunction,
Code for the function call.
"""
subfuncHandler = {
'DIRECTORY': ['createVM', lambda rh: createVM(rh)],
'HELP': ['help', lambda rh: help(rh)],
'VERSION': ['getVersion', lambda rh: getVersion(rh)]}
"""
List of positional operands based on subfunction.
Each subfunction contains a list which has a dictionary with the following
information for the positional operands:
- Human readable name of the operand,
- Property in the parms dictionary to hold the value,
- Is it required (True) or optional (False),
- Type of data (1: int, 2: string).
"""
posOpsList = {
'DIRECTORY': [
['password', 'pw', True, 2],
['Primary Memory Size (e.g. 2G)', 'priMemSize', True, 2],
['Privilege Class(es)', 'privClasses', True, 2]],
}
"""
List of additional operands/options supported by the various subfunctions.
The dictionary followng the subfunction name uses the keyword from the
command as a key. Each keyword has a dictionary that lists:
- the related parms item that stores the value,
- how many values follow the keyword, and
- the type of data for those values (1: int, 2: string)
"""
keyOpsList = {
'DIRECTORY': {
'--cpus': ['cpuCnt', 1, 1],
'--ipl': ['ipl', 1, 2],
'--logonby': ['byUsers', 1, 2],
'--maxMemSize': ['maxMemSize', 1, 2],
'--profile': ['profName', 1, 2],
'--maxCPU': ['maxCPU', 1, 1],
'--setReservedMem': ['setReservedMem', 0, 0],
'--showparms': ['showParms', 0, 0],
'--iplParam': ['iplParam', 1, 2],
'--iplLoadparam': ['iplLoadparam', 1, 2],
'--dedicate': ['dedicate', 1, 2],
'--loadportname': ['loadportname', 1, 2],
'--loadlun': ['loadlun', 1, 2],
'--vdisk': ['vdisk', 1, 2],
'--account': ['account', 1, 2],
'--comment': ['comment', 1, 2],
'--commandSchedule': ['commandSchedule', 1, 2],
'--commandSetShare': ['commandSetShare', 1, 2],
'--commandRelocationDomain': ['commandRDomain', 1, 2],
'--commandPcif': ['commandSchedule', 1, 2]},
'HELP': {},
'VERSION': {},
}
def createVM(rh):
"""
Create a virtual machine in z/VM.
Input:
Request Handle with the following properties:
function - 'CMDVM'
subfunction - 'CMD'
userid - userid of the virtual machine
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter makeVM.createVM")
dirLines = []
dirLines.append("USER " + rh.userid + " " + rh.parms['pw'] +
" " + rh.parms['priMemSize'] + " " +
rh.parms['maxMemSize'] + " " + rh.parms['privClasses'])
if 'profName' in rh.parms:
dirLines.append("INCLUDE " + rh.parms['profName'].upper())
if 'maxCPU' in rh.parms:
dirLines.append("MACHINE ESA %i" % rh.parms['maxCPU'])
if 'account' in rh.parms:
dirLines.append("ACCOUNT %s" % rh.parms['account'].upper())
dirLines.append("COMMAND SET VCONFIG MODE LINUX")
dirLines.append("COMMAND DEFINE CPU 00 TYPE IFL")
if 'cpuCnt' in rh.parms:
for i in range(1, rh.parms['cpuCnt']):
dirLines.append("COMMAND DEFINE CPU %0.2X TYPE IFL" % i)
if 'commandSchedule' in rh.parms:
v = rh.parms['commandSchedule']
dirLines.append("COMMAND SCHEDULE * WITHIN POOL %s" % v)
if 'commandSetShare' in rh.parms:
v = rh.parms['commandSetShare']
dirLines.append("SHARE %s" % v)
if 'commandRDomain' in rh.parms:
v = rh.parms['commandRDomain']
dirLines.append("COMMAND SET VMRELOCATE * DOMAIN %s" % v)
if 'commandPcif' in rh.parms:
v = rh.parms['commandPcif']
s = v.split(':')
dirLines.append("COMMAND ATTACH PCIF %s * AS %s" % (s[0], s[1]))
if 'ipl' in rh.parms:
ipl_string = "IPL %s " % rh.parms['ipl']
if 'iplParam' in rh.parms:
ipl_string += ("PARM %s " % rh.parms['iplParam'])
if 'iplLoadparam' in rh.parms:
ipl_string += ("LOADPARM %s " % rh.parms['iplLoadparam'])
dirLines.append(ipl_string)
if 'byUsers' in rh.parms:
users = ' '.join(rh.parms['byUsers'])
dirLines.append("LOGONBY " + users)
priMem = rh.parms['priMemSize'].upper()
maxMem = rh.parms['maxMemSize'].upper()
if 'setReservedMem' in rh.parms:
reservedSize = getReservedMemSize(rh, priMem, maxMem)
if rh.results['overallRC'] != 0:
rh.printSysLog("Exit makeVM.createVM, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
# Even reservedSize is 0M, still write the line "COMMAND DEF
# STOR RESERVED 0M" in direct entry, in case cold resize of
# memory decreases the defined memory, then reserved memory
# size would be > 0, this line in direct entry need be updated.
# If no such line defined in user direct, resizing would report
# error due to it can't get the original reserved memory value.
dirLines.append("COMMAND DEF STOR RESERVED %s" % reservedSize)
if 'loadportname' in rh.parms:
wwpn = rh.parms['loadportname'].replace("0x", "")
dirLines.append("LOADDEV PORTname %s" % wwpn)
if 'loadlun' in rh.parms:
lun = rh.parms['loadlun'].replace("0x", "")
dirLines.append("LOADDEV LUN %s" % lun)
if 'dedicate' in rh.parms:
vdevs = rh.parms['dedicate'].split()
# add a DEDICATE statement for each vdev
for vdev in vdevs:
dirLines.append("DEDICATE %s %s" % (vdev, vdev))
if 'vdisk' in rh.parms:
v = rh.parms['vdisk'].split(':')
sizeUpper = v[1].strip().upper()
sizeUnit = sizeUpper[-1]
# blocks = size / 512, as we are using M,
# it means 1024*1024 / 512 = 2048
if sizeUnit == 'M':
blocks = int(sizeUpper[0:len(sizeUpper) - 1]) * 2048
else:
blocks = int(sizeUpper[0:len(sizeUpper) - 1]) * 2097152
if blocks > 4194304:
# not support exceed 2G disk size
msg = msgs.msg['0207'][1] % (modId)
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0207'][0])
rh.printSysLog("Exit makeVM.createVM, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
# https://www.ibm.com/support/knowledgecenter/SSB27U_6.4.0/
# com.ibm.zvm.v640.hcpb7/defvdsk.htm#defvdsk
# the maximum number of VDISK blocks is 4194296
if blocks > MAX_VDISK_BLOCKS:
blocks = MAX_VDISK_BLOCKS
dirLines.append("MDISK %s FB-512 V-DISK %s MWV" % (v[0], blocks))
if 'comment' in rh.parms:
for comment in rh.parms['comment'].split("$@$@$"):
if comment:
dirLines.append("* %s" % comment.upper())
# Construct the temporary file for the USER entry.
fd, tempFile = mkstemp()
to_write = '\n'.join(dirLines) + '\n'
os.write(fd, to_write.encode())
os.close(fd)
parms = ["-T", rh.userid, "-f", tempFile]
results = invokeSMCLI(rh, "Image_Create_DM", parms)
if results['overallRC'] != 0:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
os.remove(tempFile)
rh.printSysLog("Exit makeVM.createVM, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def doIt(rh):
"""
Perform the requested function by invoking the subfunction handler.
Input:
Request Handle
Output:
Request Handle updated with parsed input.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter makeVM.doIt")
# Show the invocation parameters, if requested.
if 'showParms' in rh.parms and rh.parms['showParms'] is True:
rh.printLn("N", "Invocation parameters: ")
rh.printLn("N", " Routine: makeVM." +
str(subfuncHandler[rh.subfunction][0]) + "(reqHandle)")
rh.printLn("N", " function: " + rh.function)
rh.printLn("N", " userid: " + rh.userid)
rh.printLn("N", " subfunction: " + rh.subfunction)
rh.printLn("N", " parms{}: ")
for key in rh.parms:
if key != 'showParms':
rh.printLn("N", " " + key + ": " +
str(rh.parms[key]))
rh.printLn("N", " ")
# Call the subfunction handler
subfuncHandler[rh.subfunction][1](rh)
rh.printSysLog("Exit makeVM.doIt, rc: " + str(rh.results['overallRC']))
return rh.results['overallRC']
def getVersion(rh):
"""
Get the version of this function.
Input:
Request Handle
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printLn("N", "Version: " + version)
return 0
def help(rh):
"""
Produce help output specifically for MakeVM functions.
Input:
Request Handle
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
showInvLines(rh)
showOperandLines(rh)
return 0
def parseCmdline(rh):
"""
Parse the request command input.
Input:
Request Handle
Output:
Request Handle updated with parsed input.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter makeVM.parseCmdline")
if rh.totalParms >= 2:
rh.userid = rh.request[1].upper()
else:
# Userid is missing.
msg = msgs.msg['0010'][1] % modId
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0010'][0])
rh.printSysLog("Exit makeVM.parseCmdLine, rc: " +
rh.results['overallRC'])
return rh.results['overallRC']
if rh.totalParms == 2:
rh.subfunction = rh.userid
rh.userid = ''
if rh.totalParms >= 3:
rh.subfunction = rh.request[2].upper()
# Verify the subfunction is valid.
if rh.subfunction not in subfuncHandler:
# Subfunction is missing.
subList = ', '.join(sorted(subfuncHandler.keys()))
msg = msgs.msg['0011'][1] % (modId, subList)
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0011'][0])
# Parse the rest of the command line.
if rh.results['overallRC'] == 0:
rh.argPos = 3 # Begin Parsing at 4th operand
generalUtils.parseCmdline(rh, posOpsList, keyOpsList)
if 'byUsers' in rh.parms:
users = []
for user in rh.parms['byUsers'].split(':'):
users.append(user)
rh.parms['byUsers'] = []
rh.parms['byUsers'].extend(users)
if rh.subfunction == 'DIRECTORY' and 'maxMemSize' not in rh.parms:
rh.parms['maxMemSize'] = rh.parms['priMemSize']
rh.printSysLog("Exit makeVM.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def showInvLines(rh):
"""
Produce help output related to command synopsis
Input:
Request Handle
"""
if rh.subfunction != '':
rh.printLn("N", "Usage:")
rh.printLn("N", " python " + rh.cmdName +
" MakeVM <userid> directory <password> <priMemSize>")
rh.printLn("N", " <privClasses> --cpus <cpuCnt> " +
"--ipl <ipl> --logonby <byUsers>")
rh.printLn("N", " --maxMemSize <maxMemSize> " +
"--profile <profName>")
rh.printLn("N", " --maxCPU <maxCPUCnt> " +
"--setReservedMem")
rh.printLn("N", " --dedicate <vdevs> ")
rh.printLn("N", " --loadportname <wwpn> " +
"--loadlun <lun>")
rh.printLn("N", " python " + rh.cmdName + " MakeVM help")
rh.printLn("N", " python " + rh.cmdName + " MakeVM version")
return
def showOperandLines(rh):
"""
Produce help output related to operands.
Input:
Request Handle
"""
if rh.function == 'HELP':
rh.printLn("N", " For the MakeVM function:")
else:
rh.printLn("N", "Sub-Functions(s):")
rh.printLn("N", " directory - " +
"Create a virtual machine in the z/VM user directory.")
rh.printLn("N", " help - Displays this help information.")
rh.printLn("N", " version - " +
"show the version of the makeVM function")
if rh.subfunction != '':
rh.printLn("N", "Operand(s):")
rh.printLn("N", " --cpus <cpuCnt> - " +
"Specifies the desired number of virtual CPUs the")
rh.printLn("N", " " +
"guest will have.")
rh.printLn("N", " --maxcpu <maxCpuCnt> - " +
"Specifies the maximum number of virtual CPUs the")
rh.printLn("N", " " +
"guest is allowed to define.")
rh.printLn("N", " --ipl <ipl> - " +
"Specifies an IPL disk or NSS for the virtual")
rh.printLn("N", " " +
"machine's directory entry.")
rh.printLn("N", " --dedicate <vdevs> - " +
"Specifies a device vdev list to dedicate to the ")
rh.printLn("N", " " +
"virtual machine.")
rh.printLn("N", " --loadportname <wwpn> - " +
"Specifies a one- to eight-byte fibre channel port ")
rh.printLn("N", " " +
"name of the FCP-I/O device to define with a LOADDEV ")
rh.printLn("N", " " +
"statement in the virtual machine's definition")
rh.printLn("N", " --loadlun <lun> - " +
"Specifies a one- to eight-byte logical unit number ")
rh.printLn("N", " " +
"name of the FCP-I/O device to define with a LOADDEV ")
rh.printLn("N", " " +
"statement in the virtual machine's definition")
rh.printLn("N", " --logonby <byUsers> - " +
"Specifies a list of up to 8 z/VM userids who can log")
rh.printLn("N", " " +
"on to the virtual machine using their id and password.")
rh.printLn("N", " --maxMemSize <maxMem> - " +
"Specifies the maximum memory the virtual machine")
rh.printLn("N", " " +
"is allowed to define.")
rh.printLn("N", " --setReservedMem - " +
"Set the additional memory space (maxMemSize - priMemSize)")
rh.printLn("N", " " +
"as reserved memory of the virtual machine.")
rh.printLn("N", " <password> - " +
"Specifies the password for the new virtual")
rh.printLn("N", " " +
"machine.")
rh.printLn("N", " <priMemSize> - " +
"Specifies the initial memory size for the new virtual")
rh.printLn("N", " " +
"machine.")
rh.printLn("N", " <privClasses> - " +
"Specifies the privilege classes for the new virtual")
rh.printLn("N", " " +
"machine.")
rh.printLn("N", " --profile <profName> - " +
"Specifies the z/VM PROFILE to include in the")
rh.printLn("N", " " +
"virtual machine's directory entry.")
rh.printLn("N", " <userid> - " +
"Userid of the virtual machine to create.")
return
def getReservedMemSize(rh, mem, maxMem):
rh.printSysLog("Enter makeVM.getReservedMemSize")
gap = '0M'
# Check size suffix
memSuffix = mem[-1].upper()
maxMemSuffix = maxMem[-1].upper()
if (memSuffix not in ['M', 'G']) or (maxMemSuffix not in ['M', 'G']):
# Suffix is not 'M' or 'G'
msg = msgs.msg['0205'][1] % modId
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0205'][0])
rh.printSysLog("Exit makeVM.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return gap
# Convert both size to 'M'
memMb = int(mem[:-1])
maxMemMb = int(maxMem[:-1])
if memSuffix == 'G':
memMb = memMb * 1024
if maxMemSuffix == 'G':
maxMemMb = maxMemMb * 1024
# Check maxsize is greater than initial mem size
if maxMemMb < memMb:
msg = msgs.msg['0206'][1] % (modId, maxMem, mem)
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0206'][0])
rh.printSysLog("Exit makeVM.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return gap
# The define storage command can support 1-7 digits decimal number
# So we will use 'M' as suffix unless the gap size exceeds 9999999
# then convert to Gb.
gapSize = maxMemMb - memMb
# get make max reserved memory value
MAX_STOR_RESERVED = int(zvmutils.convert_to_mb(
config.CONF.zvm.user_default_max_reserved_memory))
if gapSize > MAX_STOR_RESERVED:
gapSize = MAX_STOR_RESERVED
if gapSize > 9999999:
gapSize = gapSize / 1024
gap = "%iG" % gapSize
else:
gap = "%iM" % gapSize
rh.printSysLog("Exit makeVM.getReservedMemSize, rc: " +
str(rh.results['overallRC']))
return gap | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/smtLayer/makeVM.py | makeVM.py |
import os
import subprocess
from smtLayer import generalUtils
from smtLayer import msgs
from smtLayer.vmUtils import invokeSMCLI
from zvmsdk import config
modId = 'GHO'
version = "1.0.0"
# maximum I knew is 60019, so make it double
maximumCyl = 130000
"""
List of subfunction handlers.
Each subfunction contains a list that has:
Readable name of the routine that handles the subfunction,
Code for the function call.
"""
subfuncHandler = {
'DISKPOOLNAMES': ['help', lambda rh: getDiskPoolNames(rh)],
'DISKPOOLVOLUMES': ['help', lambda rh: getDiskPoolVolumes(rh)],
'VOLUMEINFO': ['help', lambda rh: getVolumeInfo(rh)],
'DISKPOOLSPACE': ['help', lambda rh: getDiskPoolSpace(rh)],
'FCPDEVICES': ['help', lambda rh: getFcpDevices(rh)],
'GENERAL': ['help', lambda rh: getGeneralInfo(rh)],
'HELP': ['help', lambda rh: help(rh)],
'VERSION': ['getVersion', lambda rh: getVersion(rh)],
}
"""
List of positional operands based on subfunction.
Each subfunction contains a list which has a dictionary with the following
information for the positional operands:
- Human readable name of the operand,
- Property in the parms dictionary to hold the value,
- Is it required (True) or optional (False),
- Type of data (1: int, 2: string).
"""
posOpsList = {
'DISKPOOLSPACE': [
['Disk Pool Name', 'poolName', False, 2]
],
'DISKPOOLVOLUMES': [
['Disk Pool Name', 'poolName', False, 2]
]
}
"""
List of additional operands/options supported by the various subfunctions.
The dictionary followng the subfunction name uses the keyword from the
command as a key. Each keyword has a dictionary that lists:
- the related parms item that stores the value,
- how many values follow the keyword, and
- the type of data for those values (1: int, 2: string)
"""
keyOpsList = {
'DISKPOOLNAMES': {'--showparms': ['showParms', 0, 0]},
'DISKPOOLSPACE': {'--showparms': ['showParms', 0, 0]},
'VOLUMEINFO': {'--showparms': ['showParms', 0, 0]},
'FCPDEVICES': {'--showparms': ['showParms', 0, 0]},
'GENERAL': {'--showparms': ['showParms', 0, 0]},
'HELP': {'--showparms': ['showParms', 0, 0]},
'VERSION': {'--showparms': ['showParms', 0, 0]},
}
def doIt(rh):
"""
Perform the requested function by invoking the subfunction handler.
Input:
Request Handle
Output:
Request Handle updated with parsed input.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter getHost.doIt")
# Show the invocation parameters, if requested.
if 'showParms' in rh.parms and rh.parms['showParms'] is True:
rh.printLn("N", "Invocation parameters: ")
rh.printLn("N", " Routine: getHost." +
str(subfuncHandler[rh.subfunction][0]) + "(rh)")
rh.printLn("N", " function: " + rh.function)
rh.printLn("N", " userid: " + rh.userid)
rh.printLn("N", " subfunction: " + rh.subfunction)
rh.printLn("N", " parms{}: ")
for key in rh.parms:
if key != 'showParms':
rh.printLn("N", " " + key + ": " + str(rh.parms[key]))
rh.printLn("N", " ")
# Call the subfunction handler
subfuncHandler[rh.subfunction][1](rh)
rh.printSysLog("Exit getHost.doIt, rc: " + str(rh.results['overallRC']))
return rh.results['overallRC']
def getDiskPoolNames(rh):
"""
Obtain the list of disk pools known to the directory manager.
Input:
Request Handle with the following properties:
function - 'GETHOST'
subfunction - 'DISKPOOLNAMES'
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter getHost.getDiskPoolNames")
parms = ["-q", "1", "-e", "3", "-T", "dummy"]
results = invokeSMCLI(rh, "Image_Volume_Space_Query_DM", parms)
if results['overallRC'] == 0:
for line in results['response'].splitlines():
poolName = line.partition(' ')[0]
rh.printLn("N", poolName)
else:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
rh.printSysLog("Exit getHost.getDiskPoolNames, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def getDiskPoolVolumes(rh):
"""
Obtain the list of volumes for the disk_pools on the hypervisor.
Input:
Request Handle with the following properties:
function - 'GETHOST'
subfunction - 'DISKPOOLVOLUMES'
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter getHost.getDiskPoolVolumes")
if 'poolName' not in rh.parms:
poolNames = ["*"]
else:
if isinstance(rh.parms['poolName'], list):
poolNames = rh.parms['poolName']
else:
poolNames = [rh.parms['poolName']]
parms = ["-q", "1", "-e", "3", "-T", "dummy", "-n", " ".join(poolNames)]
results = invokeSMCLI(rh, "Image_Volume_Space_Query_DM", parms)
if results['overallRC'] == 0:
for line in results['response'].splitlines():
poolVolumes = line.strip().split()
poolVolumes.pop(0)
poolVolumes = ' '.join(poolVolumes)
# Create output string
outstr = 'Diskpool Volumes:' + poolVolumes
rh.printLn("N", outstr)
else:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
rh.printSysLog("Exit getHost.getDiskPoolVolumes, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def getVolumeInfo(rh):
"""
Obtain the description info of the volume on the hypervisor.
Input:
Request Handle with the following properties:
function - 'GETHOST'
subfunction - 'VOLUMEINFO'
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter getHost.getVolumeInfo")
if 'volumeName' not in rh.parms:
volumeName = ["*"]
else:
if isinstance(rh.parms['volumeName'], list):
volumeName = rh.parms['volumeName']
else:
volumeName = [rh.parms['volumeName']]
parms = ["-q", "1", "-e", "1", "-T", "dummy", "-n", " ".join(volumeName)]
results = invokeSMCLI(rh, "Image_Volume_Space_Query_DM", parms)
if results['overallRC'] == 0:
for line in results['response'].splitlines():
volumeInfo = line.strip().split()
volumeName = volumeInfo[0]
volumeType = volumeInfo[1]
volumeSize = volumeInfo[2]
# Create output string
outstr1 = 'volume_name:' + volumeName
outstr2 = 'volume_type:' + volumeType
outstr3 = 'volume_size:' + volumeSize
rh.printLn("N", outstr1)
rh.printLn("N", outstr2)
rh.printLn("N", outstr3)
else:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
rh.printSysLog("Exit getHost.getVolumeInfo, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def _getDiskSize(parts):
size = 0
if parts[1][:4] == "3390":
size = int(parts[3]) * 737280
elif parts[1][:4] == "9336":
size = int(parts[3]) * 512
else:
# now we don't know the type, it might be caused
# by SMAPI layer and we got a ??? type
# then let's guess the type if > maximumCyl
# then think it's a 9336, otherwise, take as 3390
if int(parts[3]) > maximumCyl:
size = int(parts[3]) * 512
else:
size = int(parts[3]) * 737280
return size
def getDiskPoolSpace(rh):
"""
Obtain disk pool space information for all or a specific disk pool.
Input:
Request Handle with the following properties:
function - 'GETHOST'
subfunction - 'DISKPOOLSPACE'
parms['poolName'] - Name of the disk pool. Optional,
if not present then information for all
disk pools is obtained.
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter getHost.getDiskPoolSpace")
results = {'overallRC': 0}
if 'poolName' not in rh.parms:
poolNames = ["*"]
else:
if isinstance(rh.parms['poolName'], list):
poolNames = rh.parms['poolName']
else:
poolNames = [rh.parms['poolName']]
if results['overallRC'] == 0:
# Loop thru each pool getting total. Do it for query 2 & 3
totals = {}
for qType in ["2", "3"]:
parms = [
"-q", qType,
"-e", "3",
"-T", "DUMMY",
"-n", " ".join(poolNames)]
results = invokeSMCLI(rh, "Image_Volume_Space_Query_DM", parms)
if results['overallRC'] == 0:
for line in results['response'].splitlines():
parts = line.split()
if len(parts) == 9:
poolName = parts[7]
else:
poolName = parts[4]
if poolName not in totals:
totals[poolName] = {"2": 0., "3": 0.}
totals[poolName][qType] += _getDiskSize(parts)
else:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
break
if results['overallRC'] == 0:
if len(totals) == 0:
# No pool information found.
msg = msgs.msg['0402'][1] % (modId, " ".join(poolNames))
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0402'][0])
else:
# Produce a summary for each pool
for poolName in sorted(totals):
total = totals[poolName]["2"] + totals[poolName]["3"]
rh.printLn("N", poolName + " Total: " +
generalUtils.cvtToMag(rh, total))
rh.printLn("N", poolName + " Used: " +
generalUtils.cvtToMag(rh, totals[poolName]["3"]))
rh.printLn("N", poolName + " Free: " +
generalUtils.cvtToMag(rh, totals[poolName]["2"]))
rh.printSysLog("Exit getHost.getDiskPoolSpace, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def getFcpDevices(rh):
"""
Lists the FCP device channels that are active, free, or offline.
Input:
Request Handle with the following properties:
function - 'GETHOST'
subfunction - 'FCPDEVICES'
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter getHost.getFcpDevices")
parms = ["-T", "dummy"]
results = invokeSMCLI(rh, "System_WWPN_Query", parms)
if results['overallRC'] == 0:
rh.printLn("N", results['response'])
else:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
rh.printSysLog("Exit getHost.getFcpDevices, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def getCPUCount(rh):
"""
Obtain general information about the host.
Input:
Request Handle
Output:
Request Handle updated with the results.
Return code - not 0: ok
Return code - 0,0: problem getting some info by System_Processor_Query
"""
rh.printSysLog("Enter getHost.lparCPUCount")
rh.results['overallRC'] = 0
# LPAR CPUs total and used is not support mixed CP + IFL
# So get cpu num from System_Processor_Query
# to override LPAR CPUs total and used
parms = []
results = invokeSMCLI(rh, "System_Processor_Query", parms)
cpu_total = 0
cpu_use = 0
if results['overallRC'] == 0:
flag = 0
for line in results['response'].splitlines():
line_value = line.partition(' ')[2]
if not line_value.strip():
continue
else:
type_row = line_value.split(' ')
if len(type_row) > 1:
type_row = line_value.split(' ')[1]
if type_row == 'TYPE':
flag = 1
if flag == 1:
status_row = line_value.split(' ')[0]
if (status_row.find('MASTER') != -1 or
status_row == 'ALTERNATE' or
status_row == 'PARKED'):
cpu_use = cpu_use + 1
if (type_row == 'CP' or type_row == 'IFL'):
cpu_total = cpu_total + 1
return cpu_total, cpu_use
def getGeneralInfo(rh):
"""
Obtain general information about the host.
Input:
Request Handle with the following properties:
function - 'GETHOST'
subfunction - 'GENERAL'
Output:
Request Handle updated with the results.
Return code - 0: ok
Return code - 4: problem getting some info
"""
rh.printSysLog("Enter getHost.getGeneralInfo")
# Get host using VMCP
rh.results['overallRC'] = 0
cmd = ["sudo", "/sbin/vmcp", "query userid"]
strCmd = ' '.join(cmd)
rh.printSysLog("Invoking: " + strCmd)
try:
host = subprocess.check_output(
cmd,
close_fds=True,
stderr=subprocess.STDOUT)
host = bytes.decode(host)
userid = host.split()[0]
host = host.split()[2]
except subprocess.CalledProcessError as e:
msg = msgs.msg['0405'][1] % (modId, "Hypervisor Name",
strCmd, e.output)
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0405'][0])
host = "no info"
except Exception as e:
# All other exceptions.
rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd,
type(e).__name__, str(e)))
rh.updateResults(msgs.msg['0421'][0])
host = "no info"
# Get a bunch of info from /proc/sysinfo
lparCpuTotal = "no info"
lparCpuUsed = "no info"
cecModel = "no info"
cecVendor = "no info"
hvInfo = "no info"
with open('/proc/sysinfo', 'r') as myFile:
for num, line in enumerate(myFile, 1):
# Get total physical CPU in this LPAR
if "LPAR CPUs Total" in line:
lparCpuTotal = line.split()[3]
# Get used physical CPU in this LPAR
if "LPAR CPUs Configured" in line:
lparCpuUsed = line.split()[3]
# Get CEC model
if "Type:" in line:
cecModel = line.split()[1]
# Get vendor of CEC
if "Manufacturer:" in line:
cecVendor = line.split()[1]
# Get hypervisor type and version
if "VM00 Control Program" in line:
hvInfo = line.split()[3] + " " + line.split()[4]
# update cpu number from getCPUCount by call API System_Processor_Query
cpu_total = 0
cpu_used = 0
cpu_total, cpu_used = getCPUCount(rh)
if (cpu_total != 0):
lparCpuTotal = str(cpu_total)
if (cpu_used != 0):
lparCpuUsed = str(cpu_used)
if lparCpuTotal == "no info":
msg = msgs.msg['0405'][1] % (modId, "LPAR CPUs Total",
"cat /proc/sysinfo", "not found")
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0405'][0])
if lparCpuUsed == "no info":
msg = msgs.msg['0405'][1] % (modId, "LPAR CPUs Configured",
"cat /proc/sysinfo", "not found")
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0405'][0])
if cecModel == "no info":
msg = msgs.msg['0405'][1] % (modId, "Type:",
"cat /proc/sysinfo", "not found")
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0405'][0])
if cecVendor == "no info":
msg = msgs.msg['0405'][1] % (modId, "Manufacturer:",
"cat /proc/sysinfo", "not found")
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0405'][0])
if hvInfo == "no info":
msg = msgs.msg['0405'][1] % (modId, "VM00 Control Program",
"cat /proc/sysinfo", "not found")
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0405'][0])
# Get processor architecture
arch = str(os.uname()[4])
# Get LPAR memory total & offline
parm = ["-T", "dummy", "-k", "STORAGE="]
lparMemTotal = "no info"
lparMemStandby = "no info"
results = invokeSMCLI(rh, "System_Information_Query", parm)
if results['overallRC'] == 0:
for line in results['response'].splitlines():
if "STORAGE=" in line:
lparMemOnline = line.split()[0]
lparMemStandby = line.split()[4]
lparMemTotal = lparMemOnline.split("=")[2]
lparMemStandby = lparMemStandby.split("=")[1]
else:
# SMAPI API failed, so we put out messages
# 300 and 405 for consistency
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
msg = msgs.msg['0405'][1] % (modId, "LPAR memory",
"(see message 300)", results['response'])
rh.printLn("ES", msg)
# Get LPAR memory in use
parm = ["-T", "dummy", "-k", "detailed_cpu=show=no"]
lparMemUsed = "no info"
results = invokeSMCLI(rh, "System_Performance_Information_Query",
parm)
if results['overallRC'] == 0:
for line in results['response'].splitlines():
if "MEMORY_IN_USE=" in line:
lparMemUsed = line.split("=")[1]
lparMemUsed = generalUtils.getSizeFromPage(rh, lparMemUsed)
else:
if config.CONF.zvm.bypass_smapiout:
# we bypass the check of SMAPIOUT and use 0G directly
# This currently used for test when SMAPIOUT is not ready
lparMemUsed = '0G'
else:
# SMAPI API failed, so we put out messages
# 300 and 405 for consistency
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
msg = msgs.msg['0405'][1] % (modId, "LPAR memory in use",
"(see message 300)", results['response'])
rh.printLn("ES", msg)
# Get IPL Time
ipl = ""
cmd = ["sudo", "/sbin/vmcp", "query cplevel"]
strCmd = ' '.join(cmd)
rh.printSysLog("Invoking: " + strCmd)
try:
ipl = subprocess.check_output(
cmd,
close_fds=True,
stderr=subprocess.STDOUT)
ipl = bytes.decode(ipl).split("\n")[2]
except subprocess.CalledProcessError as e:
msg = msgs.msg['0405'][1] % (modId, "IPL Time",
strCmd, e.output)
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0405'][0])
except Exception as e:
# All other exceptions.
rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd,
type(e).__name__, str(e)))
rh.updateResults(msgs.msg['0421'][0])
# Create output string
outstr = "ZCC USERID: " + userid
outstr += "\nz/VM Host: " + host
outstr += "\nArchitecture: " + arch
outstr += "\nCEC Vendor: " + cecVendor
outstr += "\nCEC Model: " + cecModel
outstr += "\nHypervisor OS: " + hvInfo
outstr += "\nHypervisor Name: " + host
outstr += "\nLPAR CPU Total: " + lparCpuTotal
outstr += "\nLPAR CPU Used: " + lparCpuUsed
outstr += "\nLPAR Memory Total: " + lparMemTotal
outstr += "\nLPAR Memory Offline: " + lparMemStandby
outstr += "\nLPAR Memory Used: " + lparMemUsed
outstr += "\nIPL Time: " + ipl
rh.printLn("N", outstr)
rh.printSysLog("Exit getHost.getGeneralInfo, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def getVersion(rh):
"""
Get the version of this function.
Input:
Request Handle
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printLn("N", "Version: " + version)
return 0
def help(rh):
"""
Produce help output specifically for GetHost functions.
Input:
Request Handle
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
showInvLines(rh)
showOperandLines(rh)
return 0
def parseCmdline(rh):
"""
Parse the request command input.
Input:
Request Handle
Output:
Request Handle updated with parsed input.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter getHost.parseCmdline")
rh.userid = ''
if rh.totalParms >= 2:
rh.subfunction = rh.request[1].upper()
# Verify the subfunction is valid.
if rh.subfunction not in subfuncHandler:
# Subfunction is missing.
subList = ', '.join(sorted(subfuncHandler.keys()))
msg = msgs.msg['0011'][1] % (modId, subList)
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0011'][0])
# Parse the rest of the command line.
if rh.results['overallRC'] == 0:
rh.argPos = 2 # Begin Parsing at 3rd operand
generalUtils.parseCmdline(rh, posOpsList, keyOpsList)
rh.printSysLog("Exit getHost.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def showInvLines(rh):
"""
Produce help output related to command synopsis
Input:
Request Handle
"""
if rh.subfunction != '':
rh.printLn("N", "Usage:")
rh.printLn("N", " python " + rh.cmdName + " GetHost " +
"diskpoolnames")
rh.printLn("N", " python " + rh.cmdName + " GetHost " +
"diskpoolspace <poolName>")
rh.printLn("N", " python " + rh.cmdName + " GetHost fcpdevices")
rh.printLn("N", " python " + rh.cmdName + " GetHost general")
rh.printLn("N", " python " + rh.cmdName + " GetHost help")
rh.printLn("N", " python " + rh.cmdName + " GetHost version")
return
def showOperandLines(rh):
"""
Produce help output related to operands.
Input:
Request Handle
"""
if rh.function == 'HELP':
rh.printLn("N", " For the GetHost function:")
else:
rh.printLn("N", "Sub-Functions(s):")
rh.printLn("N", " diskpoolnames - " +
"Returns the names of the directory manager disk pools.")
rh.printLn("N", " diskpoolspace - " +
"Returns disk pool size information.")
rh.printLn("N", " fcpdevices - " +
"Lists the FCP device channels that are active, free, or")
rh.printLn("N", " offline.")
rh.printLn("N", " general - " +
"Returns the general information related to the z/VM")
rh.printLn("N", " hypervisor environment.")
rh.printLn("N", " help - Returns this help information.")
rh.printLn("N", " version - Show the version of this function")
if rh.subfunction != '':
rh.printLn("N", "Operand(s):")
rh.printLn("N", " <poolName> - Name of the disk pool.")
return | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/smtLayer/getHost.py | getHost.py |
msg = {
# 0001-0099: Parsing Messages
'0001': [{'overallRC': 4, 'rc': 4, 'rs': 1},
"ULT%s0001E %s %s subfunction's operand at position %i (%s) " +
"is not an integer: %s",
('RQH', 'FUNCTION_NAME', 'SUBFUNCTION_NAME', 'OPERAND_POSITION',
'OPERAND', 'OPERAND_VALUE')],
# Explain: An error was detected while parsing the command.
# The indicated operand is not an integer.
# SysAct: Processing of the subfunction terminates.
# UserResp: Correct the syntax so that the indicated operand is
# an integer, e.g., 10 and reissue the command.
'0002': [{'overallRC': 4, 'rc': 4, 'rs': 2},
"ULT%s0002E %s's %s subfunction is missing positional " +
"operand (%s) at position %i.",
('RQH', 'FUNCTION_NAME', 'SUBFUNCTION_NAME', 'OPERAND',
'OPERAND_POSITION')],
# Explain: An error was detected while parsing the command.
# A positional operand is missing.
# SysAct: Processing of the subfunction terminates.
# UserResp: Correct the syntax by specifying the missing operand
# and reissue the command.
'0003': [{'overallRC': 4, 'rc': 4, 'rs': 3},
"ULT%s0003E %s's %s subfunction %s keyword operand is " +
"missing a value.",
('RQH', 'FUNCTION_NAME', 'SUBFUNCTION_NAME', 'KEYWORD')],
# Explain: An error was detected while parsing the command.
# A keyword operand that requires a value was specified without
# the value.
# SysAct: Processing of the subfunction terminates.
# UserResp: Correct the syntax to provide a value for the specified
# keyword operand and reissue the command.
'0004': [{'overallRC': 4, 'rc': 4, 'rs': 4},
"ULT%s0004E %s's %s subfunction %s keyword operand is not " +
"an integer: %s",
('RQH', 'FUNCTION_NAME', 'SUBFUNCTION_NAME', 'KEYWORD',
'KEYWORD_VALUE')],
# Explain: An error was detected while parsing the command.
# The specified operand for a keyword is not an integer.
# SysAct: Processing of the subfunction terminates.
# UserResp: Correct the syntax so that the keyword operand is
# an integer, e.g., 10 and reissue the command.
'0005': [{'overallRC': 4, 'rc': 4, 'rs': 5},
"ULT%s0005E %s's %s subfunction does not recognize keyword: %s",
('RQH', 'FUNCTION_NAME', 'SUBFUNCTION_NAME', 'KEYWORD')],
# Explain: An error was detected while parsing the command.
# An unrecognized keyword was encountered.
# SysAct: Processing of the subfunction terminates.
# UserResp: Correct the syntax to specify a recognized keyword
# and reissue the command.
'0006': [{'overallRC': 4, 'rc': 4, 'rs': 6},
"ULT%s0006E %s's %s subfunction encountered an unknown " +
"operand: %s",
('RQH', 'FUNCTION_NAME', 'SUBFUNCTION_NAME', 'OPERAND')],
# Explain: An error was detected while parsing the command.
# An unknown operand was encountered.
# SysAct: Processing of the subfunction terminates.
# UserResp: Correct the syntax and reissue the command.
'0007': [{'overallRC': 4, 'rc': 4, 'rs': 7},
"ULT%s0007E Unrecognized function: %s",
('RQH', 'FUNCTION_NAME')],
# Explain: An error was detected while parsing the command.
# The specified function is not known.
# SysAct: Processing of the subfunction terminates.
# UserResp: Correct the syntax and reissue the command.
'0008': [{'overallRC': 4, 'rc': 4, 'rs': 8},
"ULT%s0008E Specified function is not 'HELP' or 'VERSION': %s",
('RQH', 'SPECIFIED_FUNCTION')],
# Explain: An error was detected while parsing the command.
# The specified function was not 'HELP' or 'VERSION' which are the
# only valid functions for a command of the specified length.
# SysAct: Processing of the function terminates.
# UserResp: Correct the syntax and reissue the command.
'0009': [{'overallRC': 4, 'rc': 4, 'rs': 9},
"ULT%s0009E Too few arguments specified.",
('RQH')],
# Explain: An error was detected while parsing the command.
# The minimum number of arguments were not provided for the command.
# SysAct: Processing of the subfunction terminates.
# UserResp: Correct the syntax and reissue the command.
'0010': [{'overallRC': 4, 'rc': 4, 'rs': 10},
"ULT%s0010E Userid is missing",
('RQH')],
# Explain: An error was detected while parsing the command.
# A userid operand was not specified.
# SysAct: Processing of the subfunction terminates.
# UserResp: Correct the syntax and specify the userid along
# with any other required operands and reissue the command.
'0011': [{'overallRC': 4, 'rc': 4, 'rs': 11},
"ULT%s0011E Subfunction is missing. It should be one of " +
"the following: %s",
('RQH', 'VALID_SUBFUNCTIONS')],
# Explain: An error was detected while parsing the command.
# The name of the subfunction was not specified.
# SysAct: Processing of the function terminates.
# UserResp: Correct the syntax and specify the userid along
# with any other required operands and reissue the command.
'0012': [{'overallRC': 4, 'rc': 4, 'rs': 12},
"ULT%s0012E The request data is not one of the supported types " +
"of list or string: %s",
('RQH', 'REQUEST_DATA_TYPE')],
# Explain: The ReqHandle parseCmdline method was called with
# the request passed in a variable that was not a
# list or base string. Only these types of variables are
# supported for passing of the request to be parsed.
# SysAct: Processing of the function terminates.
# UserResp: Correct the calling function to use either a
# list or base string to hold the request to be processed
# and reinvoke the call.
'0013': [{'overallRC': 4, 'rc': 4, 'rs': 13},
"ULT%s0010E The desired state was: %s. Valid states are: %s",
('RQH', 'DESIRED_STATS', 'VALID_STATS')],
# Explain: An error was detected while parsing the command.
# The state operand value is not one of the accepted values.
# The valid values are shown in the message.
# SysAct: Processing of the subfunction terminates.
# UserResp: Correct the syntax to use one of the valid states
# and reissue the command.
'0014': [{'overallRC': 4, 'rc': 4, 'rs': 14},
"ULT%s0014E The option %s was specified but the option %s " +
"was not specified. These options must both be specified.",
('RQH', 'OPTION1', 'OPTION2')],
# Explain: An error was detected while parsing the command.
# An option was specified which required a related
# option that was not specified.
# SysAct: Processing of the subfunction terminates.
# UserResp: Correct the syntax to specify both options and
# reissue the command.
'0015': [{'overallRC': 4, 'rc': 4, 'rs': 15},
"ULT%s0015E The file system was not 'ext2', 'ext3', " +
"'ext4', 'xfs' or 'swap': %s",
('RQH', 'FILE_SYSTEM')],
# Explain: An error was detected while parsing the command.
# The type of file system does not match one of the valid
# values.
# SysAct: Processing of the subfunction terminates.
# UserResp: Correct the syntax to use a valid file system type
# and reissue the command.
'0016': [{'overallRC': 4, 'rc': 4, 'rs': 16},
"ULT%s0016E The scp Data Type was not 'hex', 'ebcdic', " +
"or 'delete': %s",
('RQH', 'DATA_TYPE')],
# Explain: An error was detected while parsing the command.
# The value specified for the scp data type is not one of the
# recognized values.
# SysAct: Processing of the subfunction terminates.
# UserResp: Correct the syntax to use a valid scp data type and
# reissue the command.
'0017': [{'overallRC': 4, 'rc': 4, 'rs': 17}, # dict is not used
"ULT%s0017W The maxwait time %i sec is not evenly divisible " +
"by the poll interval %i sec. Maximum wait time will be %i " +
"sec or %i poll intervals.",
('RQH', 'MAX_WAIT', 'POLL_INTERVAL', 'RECOMMEND_MAX_WAIT',
'RECOMMEND_POLL_INTERVAL')],
# Explain: When trying to determine how many polling intervals
# to wait for a desired guest power state, it was found that the
# specified maximum wait time was not evenly divisible by the
# number of polling interval seconds. The program instead
# rounded the maximum wait time up to be evenly divisble
# by the polling interval.
# SysAct: Processing of the function continues with the
# new wait time.
# UserResp: If the wait time is unacceptably long, invoke
# the function with a maximum wait time and polling
# interval time which are evenly divisible and of an
# acceptable duration.
# 0200-0299: Utility Messages
'0200': [{'overallRC': 4, 'rc': 4, 'rs': 200},
"ULT%s0200E The size of the disk is not valid: %s",
('GUT', 'DISK_SIZE')],
# Explain: An error was encountered while attempting
# to convert the size of a disk from bytes to cylinders
# (for 3390 type disks) or bytes to blocks (for FBA type disks).
# This error can be caused by specifying the size as only a
# magnitude, (e.g., 'G' or 'M') instead of an integer
# appended to a magnitude (e.g., '20G').
# SysAct: Processing of the subfunction terminates.
# UserResp: Correct the disk size to specify a disk magnitude
# that includes the integer portion of the size in addition
# to the magnitude and reissue the command.
'0201': [{'overallRC': 4, 'rc': 4, 'rs': 201},
"ULT%s0201E Failed to convert %s to a number of blocks.",
('GUT', 'DISK_SIZE')],
# Explain: An error was encountered while attempting
# to convert the size of a disk from bytes to blocks.
# The size ended with a magnitude character and should have
# had an integer value prepended to the magnitude character
# (e.g. '10M' or '10G').
# The probable cause of the error is that the integer
# portion of the size contains a non-numeric character.
# SysAct: Processing of the subfunction terminates.
# UserResp: Correct the disk size to specify a valid value
# and reissue the command.
'0202': [{'overallRC': 4, 'rc': 4, 'rs': 202},
"ULT%s0202E %s is not an integer size of blocks.",
('GUT', 'NUM_BLOCKS')],
# Explain: An error was encountered while attempting
# to convert the size of a disk from bytes to blocks.
# The size did not end with a valid magnitude character
# (i.e., 'M' or 'G') so it was treated as an integer
# value (e.g. '100000'). The probable cause of this
# error is that the size contains non-numeric
# characters.
# SysAct: Processing of the subfunction terminates.
# UserResp: Correct the disk size to specify a valid value
# and reissue the command.
'0203': [{'overallRC': 4, 'rc': 4, 'rs': 203},
"ULT%s0203E Failed to convert %s to a number of cylinders.",
('GUT', 'DISK_SIZE')],
# Explain: An error was encountered while attempting
# to convert the size of a disk from bytes to cylinders.
# The size ended with a magnitude character and should have
# had an integer value prepended to the magnitude character
# (e.g. '10M' or '10G').
# The probable cause of the error is that the integer
# portion of the size contains non-numeric characters.
# SysAct: Processing of the subfunction terminates.
# UserResp: Correct the disk size to specify a valid value
# and reissue the command.
'0204': [{'overallRC': 4, 'rc': 4, 'rs': 204},
"ULT%s0204E %s is not an integer size of cylinders.",
('GUT', 'DISK_SIZE')],
# Explain: An error was encountered while attempting
# to convert the size of a disk from bytes to cylinders.
# The size did not end with a valid magnitude character
# (i.e., 'M' or 'G') so it was treated as an integer
# value (e.g. '100000'). The probable cause of this
# error is that the size contains non-numeric
# characters.
# SysAct: Processing of the subfunction terminates.
# UserResp: Correct the disk size to specify a valid value
# and reissue the command.
'0205': [{'overallRC': 4, 'rc': 4, 'rs': 205},
"ULT%s0205E memory size did not end with suffix 'G' or 'M'.",
('MVM')],
# Explain: An error was encountered while handling memory size.
# The size did not end with a valid magnitude character
# (i.e., 'M' or 'G').
# SysAct: Processing of the subfunction terminates.
# UserResp: Correct the memory size to specify a valid value
# and reissue the command.
'0206': [{'overallRC': 4, 'rc': 4, 'rs': 206},
"ULT%s0206E Max memory size %s specified is less than " +
"initial memory size %s.",
('MVM', 'MAX_MEM_SIZE', 'INIT_MEM_SIZE')],
# Explain: An error was encountered while handling memory size.
# The size did not end with a valid magnitude character
# (i.e., 'M' or 'G').
# SysAct: Processing of the subfunction terminates.
# UserResp: Correct the memory size to specify a valid value
# and reissue the command.
'0207': [{'overallRC': 4, 'rc': 4, 'rs': 207},
"ULT%s0207E VDISK Size (swap disk) is greater than 2G.",
('MVM')],
# Explain: An error was encountered while handling swap disk
# The swap disk size can't be greater than 2G
# SysAct: Processing of the subfunction terminates.
# UserResp: Correct the swap size to specify a valid value
# and reissue the command.
# 0208-0299: Available
# SMCLI and SMAPI related messages.
'0300': [{'overallRC': 8}, # dict is not used.
"ULT%s0300E SMAPI API failed: %s, overall rc: %s, rc: %s, " +
"rs: %s, errno: %s, cmd: %s, out: %s",
('SMP', 'API_NAME', 'OVERALLRC', 'RC', 'RS', 'ERRNO', 'CMD',
'OUTPUT')],
# Explain: The smcli program was invoked to call z/VM SMAPI for
# the indicated API. An error was encountered. The overall rc
# indicates the location of the problem:
# 8 - SMAPI returned the error. The rc and rs values are
# the values provided by SMAPI. The z/VM Systems
# Management Application Programming book provides
# additional documentation related to the return code and
# reason code in the API description and in the "Return
# and Reason Code Summary" chapter.
# 24 - The smcli program identified a parameter validation
# error. A message will indicate what was detected.
# It could be a missing parameter, invalid parameter, etc.
# Invoke the smcli program using the -h parameter and the
# API name shown in the error message to obtain additional
# invocation help, e.g. "./smcli Image_Query_DM -h".
# In addition, the z/VM Systems Management Application
# Programming book provides additional documentation
# related to the return code and reason code in the API
# description.
# 25 - The smcli program encountered an internal error.
# The rc and errno contains information related to the
# problem. The error message from the smcli invocation
# and log entries in the system log provide the most
# useful information to debug this error.
# SysAct: Processing of the function terminates.
# UserResp: Determine the cause of the problem using the
# information described in the explanation section. Reinvoke
# the function after you correct the problem.
'0301': [{'overallRC': 25, 'rc': 301, 'rs': 0},
"ULT%s0301E SMAPI API failed: %s, response header does not " +
"have the expected 3 values before the (details) string. " +
"cmd: %s, response header: %s, out: %s",
('SMP', 'API_NAME', 'CMD', 'HEADER', 'OUTPUT')],
# Explain: The smcli program was invoked to call z/VM SMAPI for
# the indicated API. The expected response from the smcli
# program has a header that contains 3 integers followed by
# the string '(details)'. The response returned by the program
# does not have the expected header. This indicates a problem
# in the smcli program or a problem invoking the smcli program.
# SysAct: Processing of the function terminates.
# UserResp: Determine the cause of the failure. If it is not a
# Linux permission problem then investigate a possible coding
# error in the smcli program. Correct the problem and reinvoke
# the function.
'0302': [{'overallRC': 25, 'rc': 302, 'rs': 0},
"ULT%s0302E SMAPI API failed: %s, word 1 in " +
"the response header is not an integer or in the range of " +
"expected values. word 1: %s, cmd: %s, response " +
"header: %s, out: %s",
('SMP', 'API_NAME', 'WORD1', 'CMD', 'HEADER', 'OUTPUT')],
# Explain: The smcli program was invoked to call z/VM SMAPI for
# the indicated API. The expected response from the smcli
# program has a header that contains 3 integers followed by
# the string '(details)'. The first word should provide the
# overall return code of the smcli invocation that indicates
# where the failure occurred. However, it does not represent
# an integer value or is not the expected error values of
# 8, 24 or 25. This indicates a problem in the smcli program.
# SysAct: Processing of the function terminates.
# UserResp: Determine the cause of the failure.
# Investigate a possible coding error in the smcli program.
# Correct the problem and reinvoke the function.
'0303': [{'overallRC': 25, 'rc': 303, 'rs': 0},
"ULT%s0303E SMAPI API failed: %s, word 2 in the response " +
"header is not an integer. word 2: %s, cmd: %s, response " +
"header: %s, out: %s",
('SMP', 'API_NAME', 'WORD2', 'CMD', 'HEADER', 'OUTPUT')],
# Explain: The smcli program was invoked to call z/VM SMAPI for
# the indicated API. The expected response from the smcli
# program has a header that contains 3 integers followed by
# the string '(details)'. The second word should provide the
# specific return code of the smcli invocation. However, it
# does not represent an integer value. This indicates a
# problem in the smcli program.
# SysAct: Processing of the function terminates.
# UserResp: Determine the cause of the failure.
# You may need an update to the smcli program.
# Correct the problem and reinvoke the function.
'0304': [{'overallRC': 25, 'rc': 304, 'rs': 0},
"ULT%s0304E SMAPI API failed: %s, word 3 in the response " +
"header is not an integer. word 3: %s, cmd: %s, response " +
"header: %s, out: %s",
('SMP', 'API_NAME', 'WORD3', 'CMD', 'HEADER', 'OUTPUT')],
# Explain: The smcli program was invoked to call z/VM SMAPI for
# the indicated API. The expected response from the smcli
# program has a header that contains 3 integers followed by
# the string '(details)'. The third word should provide
# the reason code or errno, depending on the error. However,
# it does not represent an integer value. This indicates
# a problem in the smcli program.
# SysAct: Processing of the function terminates.
# UserResp: Determine the cause of the failure.
# You may need an update to the smcli program.
# Correct the problem and reinvoke the function.
'0305': [{'overallRC': 99, 'rc': 305, 'rs': 0},
"ULT%s0305E Exception received on an attempt to " +
"communicate with SMAPI, cmd: %s, exception: %s, " +
"details: %s",
('SMP', 'CMD', 'EXCEPTION', 'EXCEPTION_DETAILS')],
# Explain: The function attempted to invoke the smcli
# program to communicate with z/VM SMAPI. This failed
# due to the exception shown in the message.
# SysAct: Processing of the function terminates.
# UserResp: Determine the cause of the failure using
# the exception and exception details provided in the message.
# Reinvoke the function after correcting the problem.
# 0306-0310: Available
# IUCV related messages
'0311': [{'overallRC': 2, 'rc': 2, 'rs': 99}, # dict is not used.
"ULT%s0311E On %s, command sent through IUCV failed, " +
"rc in response string is not an integer. " +
"cmd: %s, rc: %s, out: %s",
('SMP', 'USERID', 'CMD', 'RC', 'OUTPUT')],
# Explain: The IUCV client returned a non-integer return
# code value.
# SysAct: Processing of the function terminates.
# UserResp: Contact the support team with the information
# included in the message. Investigate the problem in the
# IUCVCLNT program, fix the code and reinvoke the function.
'0312': [{'overallRC': 2, 'rc': 2, 'rs': 99}, # dict is not used.
"ULT%s0312E On %s, command sent through IUCV failed, " +
"reason code in response string is not an integer. " +
"cmd: %s, rc: %s, rs: %s, out: %s",
('SMP', 'USERID', 'CMD', 'RC', 'RS', 'OUTPUT')],
# Explain: The IUCV client returned a non-integer reason
# code value.
# SysAct: Processing of the function terminates.
# UserResp: Contact the support team with the information
# included in the message. The IUCVCLNT program is the probable
# cause of the failure. This will require a code change.
'0313': [{'overallRC': 2, 'rc': 1}, # dict is not used.
"ULT%s0313E On %s, command sent through IUCV was not " +
"authorized or a generic Linux error occurred. " +
"cmd: %s, rc: %s, rs: %s, out: %s",
('SMP', 'USERID', 'CMD', 'RC', 'RS', 'OUTPUT')],
# Explain: The command that was sent to the target system failed.
# The cause of the failure is either a Linux permission problem
# for the command being executed or a generic Linux error.
# SysAct: Processing of the function terminates.
# UserResp: Use the information included in the message to determine
# the cause of the failure on the target system and correct the
# problem. After correcting the problem, you should be able to
# reinvoke the failing function.
'0314': [{'overallRC': 2, 'rc': 2}, # dict is not used.
"ULT%s0314E IUCV client parameter error sending command to %s. " +
"cmd: %s, rc: %s, rs: %s, out: %s",
('SMP', 'USERID', 'CMD', 'RC', 'RS', 'OUTPUT')],
# Explain: The IUCVCLNT program communicates with managed
# systems using IUCV. The program detected invocation
# errors. This can be caused by a problem in the level of the
# IUCVCLNT program or the function that invoked it.
# SysAct: Processing of the function terminates.
# UserResp: Use the information included in the message to determine
# the cause of the failure. This could require the support
# team to provide a code change to either the IUCVCLNT program
# or the code that invoked it.
'0315': [{'overallRC': 2, 'rc': 4}, # dict is not used.
"ULT%s0315E IUCV socket error sending command to %s. " +
"cmd: %s, rc: %s, rs: %s, out: %s",
('SMP', 'USERID', 'CMD', 'RC', 'RS', 'OUTPUT')],
# Explain: The IUCVCLNT program communicates with managed
# systems using IUCV. The program encountered an IUCV
# communication failure when it attempted to send a
# command to the managed system.
# This is probably caused by a failure in the managed system
# that prevents the system from receiving the command.
# One cause could be that the system logged off z/VM.
# Another cause is that the managed system is not running the
# related IUCV daemon or has not authorized access by
# the system contacting it in the /etc/iucv_authorized_userid
# file.
# SysAct: Processing of the function terminates.
# UserResp: Use the information included in the message to
# determine the cause of the failure. Reinvoke the function
# after you correct the problem.
'0316': [{'overallRC': 2, 'rc': 8}, # dict is not used.
"ULT%s0316E On %s, command sent through IUCV failed. " +
"cmd: %s, rc: %s, rs: %s, out: %s",
('SMP', 'USERID', 'CMD', 'RC', 'RS', 'OUTPUT')],
# Explain: The command that was sent to the target system failed.
# SysAct: Processing of the function terminates.
# UserResp: Use the information included in the message to
# determine the cause of the failure. Reinvoke the function
# after you correct the problem.
'0317': [{'overallRC': 2, 'rc': 16}, # dict is not used.
"ULT%s0317E File transport failure while processing " +
"command for %s. " +
"cmd: %s, rc: %s, rs: %s, out: %s",
('SMP', 'USERID', 'CMD', 'RC', 'RS', 'OUTPUT')],
# Explain: The IUCVCLNT program failed to send a file to
# the target system.
# SysAct: Processing of the function terminates.
# UserResp: Use the information included in the message to
# determine the cause of the failure. Reinvoke the function
# after you correct the problem.
'0318': [{'overallRC': 2, 'rc': 32}, # dict is not used.
"ULT%s0318E On %s, IUCV server file was not found. " +
"cmd: %s, rc: %s, rs: %s, out: %s",
('SMP', 'USERID', 'CMD', 'RC', 'RS', 'OUTPUT')],
# Explain: The IUCVCLNT program failed to find the IUCVSERV
# file on the local system. This file is expected to exist
# in the same directory as the IUCVCLNT program.
# SysAct: Processing of the function terminates.
# UserResp: Determine the reason that the IUCVSERV file could
# not be located and correct the problem. Reinvoke the
# function after you correct the problem.
'0319': [{'overallRC': 2}, # dict is not used.
"ULT%s0319E Unrecognized IUCV client error encountered " +
"while sending a command through IUCV to %s. " +
"cmd: %s, rc: %s, rs: %s, out: %s",
('SMP', 'USERID', 'CMD', 'RC', 'RS', 'OUTPUT')],
# Explain: The IUCVCLNT program returned a non-zero return code
# that does not correspond to a recognized error value.
# SysAct: Processing of the function terminates.
# UserResp: Determine the cause of the error using the
# information in the message. Log files on the local system
# and the target system may contain useful information to
# identify the failure. Reinvoke the function after you
# correct the problem.
'0320': [{'overallRC': 3, 'rc': 64},
"ULT%s0320E On %s, command sent through IUCV failed because " +
"timeout. cmd: %s, rc: %s, rs: %s, out: %s",
('SMP', 'USERID', 'CMD', 'RC', 'RS', 'OUTPUT')],
# Explain: The command that was sent to the target system failed.
# SysAct: Processing of the function terminates.
# UserResp: Use the information included in the message to
# determine the cause of the failure. Reinvoke the function
# after you correct the problem.
# General subfunction processing messages
'0400': [{'overallRC': 4, 'rc': 4, 'rs': 400},
"ULT%s0400E The worker script %s does not exist.",
('GUT', 'SCRIPT_NAME')],
# Explain: The activation engine modification script specified
# for "aeScript" cannot be found.
# SysAct: Processing of the function ends with no action
# taken.
# UserResp: Correct the function call to point to an existing script
# and reinvoke the function.
'0401': [{'overallRC': 4, 'rc': 7, 'rs': 401},
"ULT%s0401E Failed to punch %s file to guest: %s, out: %s",
('GUT', 'FILE_LOCATION', 'USERID', 'OUTPUT')],
# Explain: The vmur punch command failed for the specified
# reason.
# SysAct: Processing of the function ends with no action
# taken.
# UserResp: Look up the reason the vmur command failed, correct
# the problem and reinvoke the function.
'0402': [{'overallRC': 4, 'rc': 5, 'rs': 402},
"ULT%s0402E No information was found for the specified " +
"pool(s): %s",
('GUT', 'DISK_POOL')],
# Explain: Image_Volume_Space_Query_DM returned successfully
# but the list of pools of the specified names was empty.
# SysAct: Processing terminates with an error.
# UserResp: Correct the function call to query existing pools and
# reinvoke the function.
'0403': [{'overallRC': 4, 'rc': 99, 'rs': 403}, # dict is not used.
"ULT%s0403E Failed to purge reader file %s, out: %s",
('GUT', 'SPOOL_ID', 'OUTPUT')],
# Explain: The vmcp purge reader file command failed.
# The system was already in the process of cleaning up from a
# failed attempt to punch a file, so the error processing
# continues.
# SysAct: Error processing continues.
# UserResp: Manually clean up the specified reader file using
# CP commands to avoid problems with old files or spool space
# filling up.
'0404': [{'overallRC': 4, 'rc': 8, 'rs': 404},
"ULT%s0404E Failed to spool the punch to the specified class %s" +
", out:%s ",
('GUT', 'SPOOL_CLASS', 'OUTPUT')],
# Explain: The vmcp change reader command failed with the
# specified output.
# SysAct: Processing of the function ends with no action
# taken.
# UserResp: Look up the reason the change reader command failed
# in the CP messages book or vmcp help. Correct the problem
# and reinvoke the function.
'0405': [{'overallRC': 4, 'rc': 6, 'rs': 405},
"ULT%s0405E Unable to obtain information related to: " +
"%s. Command used was: %s. Output was: %s",
('GUT', 'KEYWORD', 'CMD', 'OUTPUT')],
# Explain: While gathering hypervisor information, one of the
# commands used failed and that piece of information could
# not be queried.
# SysAct: The getHost GENERAL function returns "no info" for
# the specified hypervisor information.
# UserResp: If the information is needed, investigate the
# failure, correct it and reinvoke the function.
'0406': [{'overallRC': 4, 'rc': 9, 'rs': 406},
"ULT%s0406E Failed to punch %s because of VMUR timeout ",
('GUT', 'FILE_LOCATION')],
# Explain: When punching a file to the reader, the vmur punch
# command is issued up to 5 times with increasing timeouts.
# This error comes after the 5th try if the vmur command
# was still unsuccessful.
# SysAct: Processing of the function ends with no action taken.
# UserResp: This error could be because of another process
# also issuing vmur commands at the same time. Wait a few
# seconds and reinvoke the function.
'0407': [{'overallRC': 4, 'rc': 4, 'rs': 407}, # dict is not used.
"ULT%s0407W Unable to spool reader to all classes, " +
"it is possible that there may be additional console " +
"files available that are not listed in the response. " +
"Response from %s is %s",
('GUT', 'CMD', 'OUTPUT')],
# Explain: The vmcp spool reader class * command was not
# successful. This means the reader could not be changed
# to get files of all classes, and thus there could be
# files that are ignored.
# SysAct: Processing of the function continues.
# UserResp: If missing files are suspected, investigate the
# cause of the failure in the CP messages book or vmcp
# help and reinvoke the function.
'0408': [{'overallRC': 4, 'rc': 4, 'rs': 408},
"ULT%s0408E Error getting list of files in the reader " +
"to search for logs from user %s. Response from %s is %s",
('GUT', 'USERID', 'CMD', 'OUTPUT')],
# Explain: The vmur list command failed. The list of files
# in the user's reader could not be determined.
# SysAct: Processing of the function ends with no action taken.
# UserResp: Investigate the failure in vmur and correct the
# problem, then reinvoke the function.
'0409': [{'overallRC': 4, 'rc': 4, 'rs': 409},
"ULT%s0409E Unable to get console log for user %s. " +
"The userid is either: not logged on, not spooling " +
"its console, or has not created any console output. " +
"Error rc=rs=8 returned from " +
"Image_Console_Get.",
('GUT', 'USERID')],
# Explain: The Image_Console_Get SMAPI call returned that
# there were no spool files available for that user.
# SysAct: Processing of the function ends with no action taken.
# UserResp: Check that the user is logged on, has issued a
# SPOOL CONSOLE command and has done some actions that
# would result in console output, then reinvoke the function.
'0410': [{'overallRC': 4, 'rc': 4, 'rs': 410},
"ULT%s0410E Unable to get console log for user %s " +
"no spool files were found in our reader from this " +
"user, it is possible another process has already " +
"received them.",
('GUT', 'USERID')],
# Explain: The Image_Console_Get SMAPI call should have
# put files of class T and "CON" with the userid as the
# filename in our reader. However no files were found
# in the vmur list output with these characteristcs.
# SysAct: Processing of the function ends with no action taken.
# UserResp: Likely another process in this virtual machine
# has already processed the spool files. They are gone.
'0411': [{'overallRC': 4, 'rc': 4, 'rs': 411},
"ULT%s0411E Unable to receive console output file. " +
"Reader not online. /sys/bus/ccw/drivers/vmur/0.0.000c" +
"/online = 0",
('GUT')],
# Explain: The reader is typically at virtual device address
# x'000C'. Linux does not believe this device is online.
# SysAct: Processing of the function ends with no action taken.
# UserResp: If the reader is at a different virtual device
# address, update the SMT code to recognize the alternative
# device address, otherwise bring the reader at x'000C' online
# to Linux. Then, reinvoke the function.
'0412': [{'overallRC': 4, 'rc': 4, 'rs': 412}, # dict is not used.
"ULT%s0412E Malformed reply from SMAPI, unable to fill " +
"in performance information, exception: %s, " +
"details: %s, Response: %s",
('GUT', 'EXCEPTION', 'EXCEPTION_DETAILS', 'OUTPUT')],
# Explain: An error was encountered while processing the
# response information from the SMAPI Image_Performance_Query
# API. The response is not in the expected format.
# The exception that occurred during processing of the
# response, its details and the response are included
# in the message.
# SysAct: Processing of the function terminates.
# UserResp: Determine the cause of the failure. A code change
# may be needed in the function or in the z/VM SMAPI code.
# After correcting the code, reinvoke the function.
'0413': [{'overallRC': 99, 'rc': 99, 'rs': 413},
"ULT%s0413E Userid '%s' did not enter the expected " +
"operating system state of '%s' in %i seconds.",
('GUT', 'USERID', 'DESIRED_STATE', 'MAX_WAIT')],
# Explain: The managed system did not enter the operating
# system state that was shown in the message in the
# maximum number of seconds allowed for this to happen.
# The maximum number of seconds a combination of the,
# specified or defaulted, polling interval and maximum
# maximum number of polling attempts.
# SysAct: Processing of the function terminates.
# UserResp: Determine the cause of the failure and correct
# the cause.
'0414': [{'overallRC': 99, 'rc': 99, 'rs': 414},
"ULT%s0414E Userid '%s' did not enter the expected " +
"virtual machine state of '%s' in %i seconds.",
('GUT', 'USERID', 'DESIRED_STATE', 'MAX_WAIT')],
# Explain: The managed system did not enter the virtual
# machine log on/off state that was shown in the message
# in the maximum number of seconds allowed for this to happen.
# The maximum number of seconds a combination of the,
# specified or defaulted, polling interval and maximum
# maximum number of polling attempts.
# SysAct: Processing of the function terminates.
# UserResp: Determine the cause of the failure and correct
# the cause.
'0415': [{'overallRC': 3, 'rc': 415}, # rs comes from failing rc
"ULT%s0415E Command failed: '%s', rc: %i out: %s",
('GUT', 'CMD', 'RC', 'OUTPUT')],
# Explain: The indicated command failed. The return code
# and output from the command are shown.
# SysAct: Function processing terminates.
# UserResp: Use the information provided with the message
# to determine the cause of the failure and correct the
# problem. Reinvoke the function after you correct the
# problem.
'0416': [{'overallRC': 99, 'rc': 99, 'rs': 416},
"ULT%s0416E Command returned a response " +
"containing '%s' but did not have at least %i words " +
"following it. cmd: '%s', out: '%s'",
('GUT', 'KEYWORD', 'NUM', 'CMD', 'OUTPUT')],
# Explain: A command was invoked that returned a successful
# return code indication. The response contained the
# expected string but did not contain the expected number
# of words that follow the string.
# SysAct: Processing of the function terminates.
# UserResp: Use the information provided in the message
# to determine the cause of the problem and correct it.
# Reinvoke the function after you correct the problem.
'0417': [{'overallRC': 99, 'rc': 99, 'rs': 417},
"ULT%s0417E Command did not return the expected response " +
"containing '%s', cmd: '%s', out: '%s'",
('GUT', 'KEYWORD', 'CMD', 'OUTPUT')],
# Explain: A command was invoked that returned a successful
# return code indication. The response did not contain the
# expected string.
# SysAct: Processing of the function terminates.
# UserResp: Use the information provided in the message
# to determine the reason the identified string was not
# present in the response to identify the cause.
# Reinvoke the function after you correct the problem.
'0418': [{'overallRC': 99, 'rc': 99, 'rs': 418},
"ULT%s0418E Userid %s is not logged on to this system.",
('GUT', 'USERID')],
# Explain: A CP message HCP0045E was returned, indicating the
# userid specified is not logged on to this z/VM system,
# thus it cannot be relocated.
# SysAct: Processing of the function ends with no action taken.
# UserResp: Correct the function call to specify a correct userid and
# reinvoke the function.
'0419': [{'overallRC': 99, 'rc': 99, 'rs': 419},
"ULT%s0419E A relocation is not in progress for userid %s.",
('GUT', 'USERID')],
# Explain: An attempt was made to query or cancel a relocation
# for a user, but the SMAPI command indicated that no
# relocation was in progress.
# SysAct: Processing of the function ends with no action taken.
# UserResp: Reinvoke the function for a relocation that is in
# progress.
'0420': [{'overallRC': 99, 'rc': 99, 'rs': 420}, # dict is not used.
"ULT%s0420E An error occurred issuing a %s for userid %s. " +
"Please look up message(s): %s in the CP Messages book for " +
"more information.",
('GUT', 'CMD', 'USERID', 'ERROR_CODE')],
# Explain: The VMRELOCATE command returns a list of messages
# containing all the problems encountered when trying to issue
# the command.
# SysAct: Processing of the function ends with no action taken.
# UserResp: Look up the codes provided in the CP messages book,
# correct the problems and reinvoke the function.
'0421': [{'overallRC': 99, 'rc': 421, 'rs': 0},
"ULT%s0421E Exception received on an attempt to " +
"execute a cmd: %s, exception: %s, " +
"details: %s",
('GUT', 'CMD', 'EXCEPTION', 'EXCEPTION_DETAILS')],
# Explain: The command indicated by the message failed.
# The error message contains exception name and details
# contained in the exception.
# SysAct: Processing of the function ends with no further
# action taken.
# UserResp: Use the information in the message to determine
# the cause of the error and correct the problem.
# Reinvoke the function after you have corrected the problem.
'0422': [{'overallRC': 99, 'rc': 422, 'rs': 0},
"ULT%s0422W Exception received on an attempt to " +
"execute a cmd: %s, exception: %s, " +
"details: %s. Will attempt to continue processing.",
('GUT', 'CMD', 'EXCEPTION', 'EXCEPTION_DETAILS')],
# Explain: While trying to execute a vmcp command, an error
# occurred. However the vmcp command was not central
# to processing the subfunction, so processing
# continues.
# SysAct: Function processing continues.
# UserResp: If there is reason to suspect the function did
# not execute completely, investigate the error. Otherwise
# ignore this message.
'0423': [{'overallRC': 4, 'rc': 4, 'rs': 423}, # dict is not used.
"ULT%s0423W Unable to spool reader to all classes, " +
"it is possible that there may be additional console " +
"files available that are not listed in the response. " +
"Command: %s, exception %s, details %s. Will attempt " +
"to continue processing.",
('GUT', 'CMD', 'EXCEPTION', 'EXCEPTION_DETAILS')],
# Explain: The vmcp spool reader class * command was not
# successful. This means the reader could not be changed
# to get files of all classes, and thus there could be
# files that are ignored. The exception was of a different
# type than in message 407.
# SysAct: Processing of the function continues.
# UserResp: If missing files are suspected, investigate the
# cause of the failure in the CP messages book or vmcp
# help and reinvoke the function.
'0424': [{'overallRC': 4, 'rc': 4, 'rs': 424},
"ULT%s0424E Failed to transfer %s file to guest: %s, out: %s",
('GUT', 'FILE_LOCATION', 'USERID', 'OUTPUT')],
# Explain: The vmcp transfer command failed for the specified
# reason.
# SysAct: Processing of the function ends with no action
# taken.
# UserResp: Look up the reason the vmcp transfer command failed,
# correct the problem and reinvoke the function.
'0501': [{'overallRC': 5, 'rc': 1, 'rs': 501},
"ULT%s0501E Timeout Exception recevied on an attempt to " +
"execute a cmd: %s, exception: %s, " +
"details: %s",
('GUT', 'CMD', 'EXCEPTION', 'EXCEPTION_DETAILS')],
# Explain: The command indicated by the message failed of timeout.
# The error message contains exception name and details
# contained in the exception.
# SysAct: Processing of the function ends with no further
# action taken.
# UserResp: Use the information in the message to determine
# the cause of the error and correct the problem.
# Reinvoke the function after you have corrected the problem.
# 5000-6100: Reserved for SMCLI
} | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/smtLayer/msgs.py | msgs.py |
from time import time
from smtLayer.ReqHandle import ReqHandle
from zvmsdk import config
from zvmsdk import log
version = '1.0.0' # Version of this function.
class SMT(object):
"""
Systems Management Ultra Thin daemon.
"""
def __init__(self, **kwArgs):
"""
Constructor
Input:
cmdName=<Name of command> -
Specifies the name of the command that drives SMT.
captureLogs=<True|False>
Enables or disables log capture for all requests.
"""
self.reqIdPrefix = int(time() * 100)
self.reqCnt = 0 # Number of requests so far
logger = log.Logger('SMT')
logger.setup(log_dir=config.CONF.logging.log_dir,
log_level='logging.DEBUG',
log_file_name='smt.log')
self.logger = logger.getlog()
# Initialize the command name associated with this SMT instance.
if 'cmdName' in kwArgs.keys():
self.cmdName = kwArgs['cmdName']
else:
self.cmdName = ""
# Setup the log capture flag.
if 'captureLogs' in kwArgs.keys():
self.captureLogs = kwArgs['captureLogs']
else:
self.captureLogs = False # Don't capture & return Syslog entries
def disableLogCapture(self):
"""
Disable capturing of log entries for all requests. """
self.captureLogs = False # Don't capture Syslog entries
def enableLogCapture(self):
"""
Enable capturing of log entries for all requests. """
self.captureLogs = True # Begin capturing & returning Syslog entries
def request(self, requestData, **kwArgs):
"""
Process a request.
Input:
Request as either a string or a list.
captureLogs=<True|False>
Enables or disables log capture per request.
This overrides the value from SMT.
requestId=<id> to pass a value for the request Id instead of
using one generated by SMT.
Output:
Dictionary containing the results. See ReqHandle.buildReturnDict()
for information on the contents of the dictionary.
"""
self.reqCnt = self.reqCnt + 1
# Determine whether the request will be capturing logs
if 'captureLogs' in kwArgs.keys():
logFlag = kwArgs['captureLogs']
else:
logFlag = self.captureLogs
# Pass along or generate a request Id
if 'requestId' in kwArgs.keys():
requestId = kwArgs['requestId']
else:
requestId = str(self.reqIdPrefix) + str(self.reqCnt)
rh = ReqHandle(
requestId=requestId,
captureLogs=logFlag,
smt=self)
rh.parseCmdline(requestData)
if rh.results['overallRC'] == 0:
rh.printSysLog("Processing: " + rh.requestString)
rh.driveFunction()
return rh.results | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/smtLayer/smt.py | smt.py |
import os.path
import re
import shutil
import tarfile
import tempfile
from smtLayer import generalUtils
from smtLayer import msgs
from smtLayer.vmUtils import disableEnableDisk, execCmdThruIUCV, installFS
from smtLayer.vmUtils import invokeSMCLI, isLoggedOn
from smtLayer.vmUtils import punch2reader, purgeReader
modId = "CVM"
version = "1.0.0"
"""
List of subfunction handlers.
Each subfunction contains a list that has:
Readable name of the routine that handles the subfunction,
Code for the function call.
"""
subfuncHandler = {
'ADD3390': ['add3390', lambda rh: add3390(rh)],
'ADD9336': ['add9336', lambda rh: add9336(rh)],
'DEDICATE': ['dedicate', lambda rh: dedicate(rh)],
'UNDEDICATE': ['undedicate', lambda rh: undedicate(rh)],
'AEMOD': ['addAEMOD', lambda rh: addAEMOD(rh)],
'IPL': ['addIPL', lambda rh: addIPL(rh)],
'LOADDEV': ['addLOADDEV', lambda rh: addLOADDEV(rh)],
'HELP': ['help', lambda rh: help(rh)],
'PUNCHFILE': ['punchFile', lambda rh: punchFile(rh)],
'PURGERDR': ['purgeRDR', lambda rh: purgeRDR(rh)],
'REMOVEDISK': ['removeDisk', lambda rh: removeDisk(rh)],
'REMOVEIPL': ['removeIPL', lambda rh: removeIPL(rh)],
'VERSION': ['getVersion', lambda rh: getVersion(rh)],
}
"""
List of positional operands based on subfunction.
Each subfunction contains a list which has a dictionary with the following
information for the positional operands:
- Human readable name of the operand,
- Property in the parms dictionary to hold the value,
- Is it required (True) or optional (False),
- Type of data (1: int, 2: string).
"""
posOpsList = {
'ADD3390': [
['Disk pool name', 'diskPool', True, 2],
['Virtual address', 'vaddr', True, 2],
['Disk size', 'diskSize', True, 2]],
'ADD9336': [
['Disk pool name', 'diskPool', True, 2],
['Virtual address', 'vaddr', True, 2],
['Disk size', 'diskSize', True, 2]],
'DEDICATE': [
['Virtual device address', 'vaddr', True, 2],
['Real device address', 'raddr', True, 2],
['Read only mode', 'mode', True, 2]],
'UNDEDICATE': [
['Virtual device address', 'vaddr', True, 2]],
'AEMOD': [
['Activation Engine Modification Script',
'aeScript', True, 2]],
'IPL': [
['Virtual Address or NSS name', 'addrOrNSS', True, 2]],
'PUNCHFILE': [
['File to punch', 'file', True, 2]],
'REMOVEDISK': [
['Virtual address', 'vaddr', True, 2]],
'REMOVEIPL': [],
}
"""
List of additional operands/options supported by the various subfunctions.
The dictionary following the subfunction name uses the keyword from the
command as a key. Each keyword has a dictionary that lists:
- the related parms item that stores the value,
- how many values follow the keyword, and
- the type of data for those values (1: int, 2: string)
"""
keyOpsList = {
'ADD3390': {
'--filesystem': ['fileSystem', 1, 2],
'--mode': ['mode', 1, 2],
'--multipw': ['multiPW', 1, 2],
'--readpw': ['readPW', 1, 2],
'--showparms': ['showParms', 0, 0],
'--writepw': ['writePW', 1, 2]},
'ADD9336': {
'--filesystem': ['fileSystem', 1, 2],
'--mode': ['mode', 1, 2],
'--multipw': ['multiPW', 1, 2],
'--readpw': ['readPW', 1, 2],
'--showparms': ['showParms', 0, 0],
'--writepw': ['writePW', 1, 2]},
'AEMOD': {
'--invparms': ['invParms', 1, 2],
'--showparms': ['showParms', 0, 0]},
'HELP': {},
'IPL': {
'--loadparms': ['loadParms', 1, 2],
'--parms': ['parms', 1, 2],
'--showparms': ['showParms', 0, 0]},
'LOADDEV': {
'--boot': ['boot', 1, 2],
'--addr': ['addr', 1, 2],
'--lun': ['lun', 1, 2],
'--wwpn': ['wwpn', 1, 2],
'--scpDataType': ['scpDataType', 1, 2],
'--scpData': ['scpData', 1, 2],
'--showparms': ['showParms', 0, 0]},
'PUNCHFILE': {
'--class': ['class', 1, 2],
'--showparms': ['showParms', 0, 0], },
'PURGERDR': {'--showparms': ['showParms', 0, 0]},
'REMOVEDISK': {'--showparms': ['showParms', 0, 0]},
'REMOVEIPL': {'--showparms': ['showParms', 0, 0]},
'VERSION': {},
}
def add3390(rh):
"""
Adds a 3390 (ECKD) disk to a virtual machine's directory entry.
Input:
Request Handle with the following properties:
function - 'CHANGEVM'
subfunction - 'ADD3390'
userid - userid of the virtual machine
parms['diskPool'] - Disk pool
parms['diskSize'] - size of the disk in cylinders or bytes.
parms['fileSystem'] - Linux filesystem to install on the disk.
parms['mode'] - Disk access mode
parms['multiPW'] - Multi-write password
parms['readPW'] - Read password
parms['vaddr'] - Virtual address
parms['writePW'] - Write password
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter changeVM.add3390")
results, cyl = generalUtils.cvtToCyl(rh, rh.parms['diskSize'])
if results['overallRC'] != 0:
# message already sent. Only need to update the final results.
rh.updateResults(results)
if results['overallRC'] == 0:
parms = [
"-T", rh.userid,
"-v", rh.parms['vaddr'],
"-t", "3390",
"-a", "AUTOG",
"-r", rh.parms['diskPool'],
"-u", "1",
"-z", cyl,
"-f", "1"]
hideList = []
if 'mode' in rh.parms:
parms.extend(["-m", rh.parms['mode']])
else:
parms.extend(["-m", 'W'])
if 'readPW' in rh.parms:
parms.extend(["-R", rh.parms['readPW']])
hideList.append(len(parms) - 1)
if 'writePW' in rh.parms:
parms.extend(["-W", rh.parms['writePW']])
hideList.append(len(parms) - 1)
if 'multiPW' in rh.parms:
parms.extend(["-M", rh.parms['multiPW']])
hideList.append(len(parms) - 1)
results = invokeSMCLI(rh,
"Image_Disk_Create_DM",
parms,
hideInLog=hideList)
if results['overallRC'] != 0:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results returned by invokeSMCLI
if (results['overallRC'] == 0 and 'fileSystem' in rh.parms):
results = installFS(
rh,
rh.parms['vaddr'],
rh.parms['mode'],
rh.parms['fileSystem'],
"3390")
if results['overallRC'] == 0:
results = isLoggedOn(rh, rh.userid)
if results['overallRC'] != 0:
# Cannot determine if VM is logged on or off.
# We have partially failed. Pass back the results.
rh.updateResults(results)
elif results['rs'] == 0:
# Add the disk to the active configuration.
parms = [
"-T", rh.userid,
"-v", rh.parms['vaddr'],
"-m", rh.parms['mode']]
results = invokeSMCLI(rh, "Image_Disk_Create", parms)
if results['overallRC'] == 0:
rh.printLn("N", "Added dasd " + rh.parms['vaddr'] +
" to the active configuration.")
else:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
rh.printSysLog("Exit changeVM.add3390, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def add9336(rh):
"""
Adds a 9336 (FBA) disk to virtual machine's directory entry.
Input:
Request Handle with the following properties:
function - 'CHANGEVM'
subfunction - 'ADD9336'
userid - userid of the virtual machine
parms['diskPool'] - Disk pool
parms['diskSize'] - size of the disk in blocks or bytes.
parms['fileSystem'] - Linux filesystem to install on the disk.
parms['mode'] - Disk access mode
parms['multiPW'] - Multi-write password
parms['readPW'] - Read password
parms['vaddr'] - Virtual address
parms['writePW'] - Write password
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter changeVM.add9336")
results, blocks = generalUtils.cvtToBlocks(rh, rh.parms['diskSize'])
if results['overallRC'] != 0:
# message already sent. Only need to update the final results.
rh.updateResults(results)
if results['overallRC'] == 0:
parms = [
"-T", rh.userid,
"-v", rh.parms['vaddr'],
"-t", "9336",
"-a", "AUTOG",
"-r", rh.parms['diskPool'],
"-u", "1",
"-z", blocks,
"-f", "1"]
hideList = []
if 'mode' in rh.parms:
parms.extend(["-m", rh.parms['mode']])
else:
parms.extend(["-m", 'W'])
if 'readPW' in rh.parms:
parms.extend(["-R", rh.parms['readPW']])
hideList.append(len(parms) - 1)
if 'writePW' in rh.parms:
parms.extend(["-W", rh.parms['writePW']])
hideList.append(len(parms) - 1)
if 'multiPW' in rh.parms:
parms.extend(["-M", rh.parms['multiPW']])
hideList.append(len(parms) - 1)
results = invokeSMCLI(rh,
"Image_Disk_Create_DM",
parms,
hideInLog=hideList)
if results['overallRC'] != 0:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
if (results['overallRC'] == 0 and 'fileSystem' in rh.parms):
# Install the file system
results = installFS(
rh,
rh.parms['vaddr'],
rh.parms['mode'],
rh.parms['fileSystem'],
"9336")
if results['overallRC'] == 0:
results = isLoggedOn(rh, rh.userid)
if (results['overallRC'] == 0 and results['rs'] == 0):
# Add the disk to the active configuration.
parms = [
"-T", rh.userid,
"-v", rh.parms['vaddr'],
"-m", rh.parms['mode']]
results = invokeSMCLI(rh, "Image_Disk_Create", parms)
if results['overallRC'] == 0:
rh.printLn("N", "Added dasd " + rh.parms['vaddr'] +
" to the active configuration.")
else:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
rh.printSysLog("Exit changeVM.add9336, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def dedicate(rh):
"""
Dedicate device.
Input:
Request Handle with the following properties:
function - 'CHANGEVM'
subfunction - 'DEDICATEDM'
userid - userid of the virtual machine
parms['vaddr'] - Virtual address
parms['raddr'] - Real address
parms['mode'] - Read only mode or not.
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter changeVM.dedicate")
parms = [
"-T", rh.userid,
"-v", rh.parms['vaddr'],
"-r", rh.parms['raddr'],
"-R", rh.parms['mode']]
hideList = []
results = invokeSMCLI(rh,
"Image_Device_Dedicate_DM",
parms,
hideInLog=hideList)
if results['overallRC'] != 0:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
if results['overallRC'] == 0:
results = isLoggedOn(rh, rh.userid)
if (results['overallRC'] == 0 and results['rs'] == 0):
# Dedicate device to active configuration.
parms = [
"-T", rh.userid,
"-v", rh.parms['vaddr'],
"-r", rh.parms['raddr'],
"-R", rh.parms['mode']]
results = invokeSMCLI(rh, "Image_Device_Dedicate", parms)
if results['overallRC'] == 0:
rh.printLn("N", "Dedicated device " + rh.parms['vaddr'] +
" to the active configuration.")
else:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
rh.printSysLog("Exit changeVM.dedicate, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def undedicate(rh):
"""
Unedicate device.
Input:
Request Handle with the following properties:
function - 'CHANGEVM'
subfunction - 'UNDEDICATE'
userid - userid of the virtual machine
parms['vaddr'] - Virtual address
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter changeVM.undedicate")
parms = [
"-T", rh.userid,
"-v", rh.parms['vaddr']]
hideList = []
results = invokeSMCLI(rh,
"Image_Device_Undedicate_DM",
parms,
hideInLog=hideList)
if results['overallRC'] != 0:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
if results['overallRC'] == 0:
results = isLoggedOn(rh, rh.userid)
if (results['overallRC'] == 0 and results['rs'] == 0):
# Dedicate device to active configuration.
parms = [
"-T", rh.userid,
"-v", rh.parms['vaddr']]
results = invokeSMCLI(rh, "Image_Device_Undedicate", parms)
if results['overallRC'] == 0:
rh.printLn("N", "Dedicated device " + rh.parms['vaddr'] +
" to the active configuration.")
else:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
rh.printSysLog("Exit changeVM.undedicate, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def addAEMOD(rh):
"""
Send an Activation Modification Script to the virtual machine.
Input:
Request Handle with the following properties:
function - 'CHANGEVM'
subfunction - 'AEMOD'
userid - userid of the virtual machine
parms['aeScript'] - File specification of the AE script
parms['invparms'] - invparms operand
Output:
Request Handle updated with the results.
Return code - 0: ok
Return code - 4: input error, rs - 11 AE script not found
"""
rh.printSysLog("Enter changeVM.addAEMOD")
invokeScript = "invokeScript.sh"
trunkFile = "aemod.doscript"
fileClass = "X"
tempDir = tempfile.mkdtemp()
if os.path.isfile(rh.parms['aeScript']):
# Get the short name of our activation engine modifier script
if rh.parms['aeScript'].startswith("/"):
s = rh.parms['aeScript']
tmpAEScript = s[s.rindex("/") + 1:]
else:
tmpAEScript = rh.parms['aeScript']
# Copy the mod script to our temp directory
shutil.copyfile(rh.parms['aeScript'], tempDir + "/" + tmpAEScript)
# Create the invocation script.
conf = "#!/bin/bash \n"
baseName = os.path.basename(rh.parms['aeScript'])
parm = "/bin/bash %s %s \n" % (baseName, rh.parms['invParms'])
fh = open(tempDir + "/" + invokeScript, "w")
fh.write(conf)
fh.write(parm)
fh.close()
# Generate the tar package for punch
tar = tarfile.open(tempDir + "/" + trunkFile, "w")
for file in os.listdir(tempDir):
tar.add(tempDir + "/" + file, arcname=file)
tar.close()
# Punch file to reader
punch2reader(rh, rh.userid, tempDir + "/" + trunkFile, fileClass)
shutil.rmtree(tempDir)
else:
# Worker script does not exist.
shutil.rmtree(tempDir)
msg = msgs.msg['0400'][1] % (modId, rh.parms['aeScript'])
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0400'][0])
rh.printSysLog("Exit changeVM.addAEMOD, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def addIPL(rh):
"""
Sets the IPL statement in the virtual machine's directory entry.
Input:
Request Handle with the following properties:
function - 'CHANGEVM'
subfunction - 'IPL'
userid - userid of the virtual machine
parms['addrOrNSS'] - Address or NSS name
parms['loadparms'] - Loadparms operand (optional)
parms['parms'] - Parms operand (optional)
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter changeVM.addIPL")
parms = ["-T", rh.userid, "-s", rh.parms['addrOrNSS']]
if 'loadparms' in rh.parms:
parms.extend(["-l", rh.parms['loadparms']])
if 'parms' in rh.parms:
parms.extend(["-p", rh.parms['parms']])
results = invokeSMCLI(rh, "Image_IPL_Set_DM", parms)
if results['overallRC'] != 0:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
rh.printSysLog("Exit changeVM.addIPL, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def addLOADDEV(rh):
"""
Sets the LOADDEV statement in the virtual machine's directory entry.
Input:
Request Handle with the following properties:
function - 'CHANGEVM'
subfunction - 'ADDLOADDEV'
userid - userid of the virtual machine
parms['boot'] - Boot program number
parms['addr'] - Logical block address of the boot record
parms['lun'] - One to eight-byte logical unit number
of the FCP-I/O device.
parms['wwpn'] - World-Wide Port Number
parms['scpDataType'] - SCP data type
parms['scpData'] - Designates information to be passed to the
program is loaded during guest IPL.
Note that any of the parms may be left blank, in which case
we will not update them.
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter changeVM.addLOADDEV")
# scpDataType and scpData must appear or disappear concurrently
if ('scpData' in rh.parms and 'scpDataType' not in rh.parms):
msg = msgs.msg['0014'][1] % (modId, "scpData", "scpDataType")
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0014'][0])
return
if ('scpDataType' in rh.parms and 'scpData' not in rh.parms):
if rh.parms['scpDataType'].lower() == "delete":
scpDataType = 1
else:
# scpDataType and scpData must appear or disappear
# concurrently unless we're deleting data
msg = msgs.msg['0014'][1] % (modId, "scpDataType", "scpData")
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0014'][0])
return
scpData = ""
if 'scpDataType' in rh.parms:
if rh.parms['scpDataType'].lower() == "hex":
scpData = rh.parms['scpData']
scpDataType = 3
elif rh.parms['scpDataType'].lower() == "ebcdic":
scpData = rh.parms['scpData']
scpDataType = 2
# scpDataType not hex, ebcdic or delete
elif rh.parms['scpDataType'].lower() != "delete":
msg = msgs.msg['0016'][1] % (modId, rh.parms['scpDataType'])
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0016'][0])
return
else:
# Not specified, 0 for do nothing
scpDataType = 0
scpData = ""
if 'boot' not in rh.parms:
boot = ""
else:
boot = rh.parms['boot']
if 'addr' not in rh.parms:
block = ""
else:
block = rh.parms['addr']
if 'lun' not in rh.parms:
lun = ""
else:
lun = rh.parms['lun']
# Make sure it doesn't have the 0x prefix
lun.replace("0x", "")
if 'wwpn' not in rh.parms:
wwpn = ""
else:
wwpn = rh.parms['wwpn']
# Make sure it doesn't have the 0x prefix
wwpn.replace("0x", "")
parms = [
"-T", rh.userid,
"-b", boot,
"-k", block,
"-l", lun,
"-p", wwpn,
"-s", str(scpDataType)]
if scpData != "":
parms.extend(["-d", scpData])
results = invokeSMCLI(rh, "Image_SCSI_Characteristics_Define_DM", parms)
# SMAPI API failed.
if results['overallRC'] != 0:
rh.printLn("ES", results['response'])
rh.updateResults(results)
rh.printSysLog("Exit changeVM.addLOADDEV, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def doIt(rh):
"""
Perform the requested function by invoking the subfunction handler.
Input:
Request Handle
Output:
Request Handle updated with parsed input.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter changeVM.doIt")
# Show the invocation parameters, if requested.
if 'showParms' in rh.parms and rh.parms['showParms'] is True:
rh.printLn("N", "Invocation parameters: ")
rh.printLn("N", " Routine: changeVM." +
str(subfuncHandler[rh.subfunction][0]) + "(reqHandle)")
rh.printLn("N", " function: " + rh.function)
rh.printLn("N", " userid: " + rh.userid)
rh.printLn("N", " subfunction: " + rh.subfunction)
rh.printLn("N", " parms{}: ")
for key in rh.parms:
if key != 'showParms':
rh.printLn("N", " " + key + ": " +
str(rh.parms[key]))
rh.printLn("N", " ")
# Call the subfunction handler
subfuncHandler[rh.subfunction][1](rh)
rh.printSysLog("Exit changeVM.doIt, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def getVersion(rh):
"""
Get the version of this function.
Input:
Request Handle
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printLn("N", "Version: " + version)
return 0
def help(rh):
"""
Produce help output specifically for ChangeVM functions.
Input:
Request Handle
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
showInvLines(rh)
showOperandLines(rh)
return 0
def parseCmdline(rh):
"""
Parse the request command input.
Input:
Request Handle
Output:
Request Handle updated with parsed input.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter changeVM.parseCmdline")
if rh.totalParms >= 2:
rh.userid = rh.request[1].upper()
else:
# Userid is missing.
msg = msgs.msg['0010'][1] % modId
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0010'][0])
rh.printSysLog("Exit changeVM.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
if rh.totalParms == 2:
rh.subfunction = rh.userid
rh.userid = ''
if rh.totalParms >= 3:
rh.subfunction = rh.request[2].upper()
# Verify the subfunction is valid.
if rh.subfunction not in subfuncHandler:
# Subfunction is missing.
subList = ', '.join(sorted(subfuncHandler.keys()))
msg = msgs.msg['0011'][1] % (modId, subList)
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0011'][0])
# Parse the rest of the command line.
if rh.results['overallRC'] == 0:
rh.argPos = 3 # Begin Parsing at 4th operand
generalUtils.parseCmdline(rh, posOpsList, keyOpsList)
if rh.results['overallRC'] == 0:
if rh.subfunction in ['ADD3390', 'ADD9336']:
if ('fileSystem' in rh.parms and rh.parms['fileSystem'] not in
['ext2', 'ext3', 'ext4', 'xfs', 'swap']):
# Invalid file system specified.
msg = msgs.msg['0015'][1] % (modId, rh.parms['fileSystem'])
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0015'][0])
rh.printSysLog("Exit changeVM.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def punchFile(rh):
"""
Punch a file to a virtual reader of the specified virtual machine.
Input:
Request Handle with the following properties:
function - 'CHANGEVM'
subfunction - 'PUNCHFILE'
userid - userid of the virtual machine
parms['class'] - Spool class (optional)
parms['file'] - Filespec of the file to punch.
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter changeVM.punchFile")
# Default spool class in "A" , if specified change to specified class
spoolClass = "A"
if 'class' in rh.parms:
spoolClass = str(rh.parms['class'])
punch2reader(rh, rh.userid, rh.parms['file'], spoolClass)
rh.printSysLog("Exit changeVM.punchFile, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def purgeRDR(rh):
"""
Purge the reader belonging to the virtual machine.
Input:
Request Handle with the following properties:
function - 'CHANGEVM'
subfunction - 'PURGERDR'
userid - userid of the virtual machine
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter changeVM.purgeRDR")
results = purgeReader(rh)
rh.updateResults(results)
rh.printSysLog("Exit changeVM.purgeRDR, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def removeDisk(rh):
"""
Remove a disk from a virtual machine.
Input:
Request Handle with the following properties:
function - 'CHANGEVM'
subfunction - 'REMOVEDISK'
userid - userid of the virtual machine
parms['vaddr'] - Virtual address
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter changeVM.removeDisk")
results = {'overallRC': 0, 'rc': 0, 'rs': 0}
# Is image logged on
loggedOn = False
results = isLoggedOn(rh, rh.userid)
if results['overallRC'] == 0:
if results['rs'] == 0:
loggedOn = True
results = disableEnableDisk(
rh,
rh.userid,
rh.parms['vaddr'],
'-d')
if results['overallRC'] != 0:
rh.printLn("ES", results['response'])
rh.updateResults(results)
if results['overallRC'] == 0 and loggedOn:
strCmd = "/sbin/vmcp detach " + rh.parms['vaddr']
results = execCmdThruIUCV(rh, rh.userid, strCmd)
if results['overallRC'] != 0:
if re.search('(^HCP\w\w\w040E)', results['response']):
# Device does not exist, ignore the error
results = {'overallRC': 0, 'rc': 0, 'rs': 0, 'response': ''}
else:
rh.printLn("ES", results['response'])
rh.updateResults(results)
if results['overallRC'] == 0:
# Remove the disk from the user entry.
parms = [
"-T", rh.userid,
"-v", rh.parms['vaddr'],
"-e", "0"]
results = invokeSMCLI(rh, "Image_Disk_Delete_DM", parms)
if results['overallRC'] != 0:
if (results['overallRC'] == 8 and results['rc'] == 208 and
results['rs'] == 36):
# Disk does not exist, ignore the error
results = {'overallRC': 0, 'rc': 0, 'rs': 0, 'response': ''}
else:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
else:
# Unexpected error. Message already sent.
rh.updateResults(results)
rh.printSysLog("Exit changeVM.removeDisk, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def removeIPL(rh):
"""
Sets the IPL statement in the virtual machine's directory entry.
Input:
Request Handle with the following properties:
function - 'CHANGEVM'
subfunction - 'REMOVEIPL'
userid - userid of the virtual machine
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter changeVM.removeIPL")
parms = ["-T", rh.userid]
results = invokeSMCLI(rh, "Image_IPL_Delete_DM", parms)
if results['overallRC'] != 0:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
rh.printSysLog("Exit changeVM.removeIPL, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
def showInvLines(rh):
"""
Produce help output related to command synopsis
Input:
Request Handle
"""
if rh.subfunction != '':
rh.printLn("N", "Usage:")
rh.printLn("N", " python " + rh.cmdName +
" ChangeVM <userid> add3390 <diskPool> <vAddr>")
rh.printLn("N", " <diskSize3390> --mode " +
"<mode> --readpw <read_pw>")
rh.printLn("N", " --writepw <write_pw> " +
"--multipw <multi_pw> --filesystem <fsType>")
rh.printLn("N", " python " + rh.cmdName +
" ChangeVM <userid> add9336 <diskPool> <vAddr>")
rh.printLn("N", " <diskSize9336> --mode " +
"<mode> --readpw <read_pw>")
rh.printLn("N", " --writepw <write_pw> " +
"--multipw <multi_pw> --filesystem <fsType>")
rh.printLn("N", " python " + rh.cmdName +
" ChangeVM <userid> aemod <aeScript> --invparms <invParms>")
rh.printLn("N", " python " + rh.cmdName +
" ChangeVM <userid> IPL <addrOrNSS> --loadparms <loadParms>")
rh.printLn("N", " --parms <parmString>")
rh.printLn("N", " python " + rh.cmdName +
" ChangeVM <userid> loaddev --boot <boot> --addr <addr>")
rh.printLn("N", " --wwpn <wwpn> --lun <lun> " +
"--scpdatatype <scpDatatype> --scpdata <scp_data>")
rh.printLn("N", " python " + rh.cmdName +
" ChangeVM <userid> punchFile <file> --class <class>")
rh.printLn("N", " python " + rh.cmdName +
" ChangeVM <userid> purgeRDR")
rh.printLn("N", " python " + rh.cmdName +
" ChangeVM <userid> removedisk <vAddr>")
rh.printLn("N", " python " + rh.cmdName +
" ChangeVM <userid> removeIPL <vAddr>")
rh.printLn("N", " python " + rh.cmdName + " ChangeVM help")
rh.printLn("N", " python " + rh.cmdName +
" ChangeVM version")
return
def showOperandLines(rh):
"""
Produce help output related to operands.
Input:
Request Handle
"""
if rh.function == 'HELP':
rh.printLn("N", " For the ChangeVM function:")
else:
rh.printLn("N", "Sub-Functions(s):")
rh.printLn("N", " add3390 - Add a 3390 (ECKD) disk " +
"to a virtual machine's directory")
rh.printLn("N", " entry.")
rh.printLn("N", " add9336 - Add a 9336 (FBA) disk " +
"to virtual machine's directory")
rh.printLn("N", " entry.")
rh.printLn("N", " aemod - Sends an activation " +
"engine script to the managed virtual")
rh.printLn("N", " machine.")
rh.printLn("N", " help - Displays this help " +
"information.")
rh.printLn("N", " ipl - Sets the IPL statement in " +
"the virtual machine's")
rh.printLn("N", " directory entry.")
rh.printLn("N", " loaddev - Sets the LOADDEV statement " +
"in the virtual machine's")
rh.printLn("N", " directory entry.")
rh.printLn("N", " punchfile - Punch a file to a virtual " +
"reader of the specified")
rh.printLn("N", " virtual machine.")
rh.printLn("N", " purgerdr - Purges the reader " +
"belonging to the virtual machine.")
rh.printLn("N", " removedisk - " +
"Remove an mdisk from a virtual machine.")
rh.printLn("N", " removeIPL - " +
"Remove an IPL from a virtual machine's directory entry.")
rh.printLn("N", " version - " +
"show the version of the power function")
if rh.subfunction != '':
rh.printLn("N", "Operand(s):")
rh.printLn("N", " -addr <addr> - " +
"Specifies the logical block address of the")
rh.printLn("N", " " +
"boot record.")
rh.printLn("N", " <addrOrNSS> - " +
"Specifies the virtual address or NSS name")
rh.printLn("N", " to IPL.")
rh.printLn("N", " <aeScript> - " +
"aeScript is the fully qualified file")
rh.printLn("N", " " +
"specification of the script to be sent")
rh.printLn("N", " --boot <boot> - " +
"Boot program number")
rh.printLn("N", " --class <class> - " +
"The class is optional and specifies the spool")
rh.printLn("N", " " +
"class for the reader file.")
rh.printLn("N", " <diskPool> - " +
"Specifies the directory manager disk pool to")
rh.printLn("N", " " +
"use to obtain the disk.")
rh.printLn("N", " <diskSize3390> - " +
"Specifies the size of the ECKD minidisk. ")
rh.printLn("N", " <diskSize9336> - " +
"Specifies the size of the FBA type minidisk.")
rh.printLn("N", " <file> - " +
"File to punch to the target system.")
rh.printLn("N", " --filesystem <fsType> - " +
"Specifies type of filesystem to be created on")
rh.printLn("N", " the minidisk.")
rh.printLn("N", " --invparms <invParms> - " +
"Specifies the parameters to be specified in the")
rh.printLn("N", " " +
"invocation script to call the aeScript.")
rh.printLn("N", " --loadparms <loadParms> - " +
"Specifies a 1 to 8-character load parameter that")
rh.printLn("N", " " +
"is used by the IPL'd system.")
rh.printLn("N", " --lun <lun> - " +
"One to eight-byte logical unit number of the")
rh.printLn("N", " FCP-I/O device.")
rh.printLn("N", " --mode <mode> - " +
"Specifies the access mode for the minidisk.")
rh.printLn("N", " --multipw <multi_pw> - " +
"Specifies the password that allows sharing the")
rh.printLn("N", " " +
"minidisk in multiple-write mode.")
rh.printLn("N", " --parms <parmString> - " +
"Specifies a parameter string to pass to the")
rh.printLn("N", " " +
"virtual machine in general-purpose registers at")
rh.printLn("N", " " +
"user's the completion of the IPL.")
rh.printLn("N", " --readpw <read_pw> - " +
"Specifies the password that allows sharing the")
rh.printLn("N", " " +
"minidisk in read mode.")
rh.printLn("N", " --scpdata <scpdata> - " +
"Provides the SCP data information.")
rh.printLn("N", " --scpdatatype <scpdatatype> - " +
"Specifies whether the scp data is in hex,")
rh.printLn("N", " " +
"EBCDIC, or should be deleted.")
rh.printLn("N", " <userid> - " +
"Userid of the target virtual machine.")
rh.printLn("N", " <vAddr> - " +
"Virtual address of the device.")
rh.printLn("N", " --writepw <write_pw> - " +
"Specifies is the password that allows sharing")
rh.printLn("N", " " +
"the minidisk in write mode.")
rh.printLn("N", " --wwpn <wwpn> - " +
"The world-wide port number.")
return | zVMCloudConnector | /zVMCloudConnector-1.6.3.tar.gz/zVMCloudConnector-1.6.3/smtLayer/changeVM.py | changeVM.py |
import copy
import zWell_model.utils as zu
from zWell_model.allModel import AllModel
class DenseNet(AllModel):
"""
稠密神经网络对象,其通过稠密层与过渡层组合,共同实现提升网络深度与降低网络复杂度的目的。
"""
def __init__(self, stride, input_shape, classes,
red=True,
reg=0.0001, bn_eps=2e-5,
bn_mom=0.9, model_layers_num=4,
ckp=2, init_k_len=64, dense_len=512
):
"""
构造出来一个残差神经网络对象
:param stride: 每一个残差块的卷积步长 需要注意的是,这是一个list,作为每一个残差块的卷积步长。
:param input_shape: 残差神经网络的输入维度元组
:param classes: 残差神经网络的分类方式
:param red: 是否使用单通道方式进行处理
:param reg: 残差块中的L2正则化系数
:param bn_eps: 用于避免批量归一化除以0
:param bn_mom: 定义批量归一化操作时的动态均值的动量
:param model_layers_num: 残差神经网络层数量
:param ckp: 卷积层之间的卷积核数量等比数值
:param init_k_len 第一层残差块中的卷积核数量
:param dense_len 残差结束之后,全连接神经元第一层的神经元数量
"""
super().__init__()
# 检查步长 并 赋值步长
zu.check_list_len(stride, model_layers_num, "Convolutional step size for each residual block:[stride]")
self.stride = stride
self.input_shape = input_shape
self.classes = classes
self.red = red
self.reg = reg
self.bn_eps = bn_eps
self.bn_mom = bn_mom
self.model_layers_num = model_layers_num
self.ckp = ckp
self.init_k_len = init_k_len
self.dense_len = dense_len
def __rshift__(self, other):
other.stride = copy.copy(self.stride)
other.input_shape = self.input_shape
other.classes = self.classes
other.red = self.red, other.reg = self.reg
other.bn_eps = self.bn_eps
other.bn_mom = self.bn_mom
other.model_layers_num = other.model_layers_num
other.ckp = self.ckp
other.init_k_len = self.init_k_len
other.dense_len = self.dense_len
def __str__(self) -> str:
return "zWell_model.denseNet.DenseNet.DenseNet(\n" \
f"\tstride={self.stride}\n" \
f"\tinput_shape={self.input_shape}\n" \
f"\tclasses={self.classes}\n" \
f"\tred={self.red}\n" \
f"\treg={self.reg}\n" \
f"\tbn_eps={self.bn_eps}\n" \
f"\tbn_mom={self.bn_mom}\n" \
f"\tmodel_layers_num={self.model_layers_num}\n" \
f"\tckp={self.ckp}\n" \
f"\tinit_k_len={self.init_k_len}\n" \
f"\tdense_len={self.dense_len}\n" \
")" | zWell-model | /zWell_model-0.0.2.20230528-py3-none-any.whl/zWell_model/denseNet/DenseNet.py | DenseNet.py |
def res_module(
data, k, stride, chan_dim,
red=False, reg=0.0001, bn_eps=2e-5, bn_mom=0.9
):
"""
生成一个残差块
:param data:上一层数据的输入
:param k: 残差快的数据输出通道数
:param stride: 卷积步长
:param chan_dim: 批量归一化层中,使用的归一化轴。
:param red: 是否对恒等映射进行 1x1 卷积,调整通道数量
:param reg: 定义正则化超参数
:param bn_eps: 用于避免批量归一化除以0
:param bn_mom: 定义批量归一化操作时的动态均值的动量
:return: 残差块的输出
"""
from keras.layers import BatchNormalization, add, AveragePooling2D
from keras.layers.convolutional import Conv2D
from keras.layers.core import Activation
from keras.regularizers import l2
# 将残差计算需要的恒等数值获取到,这里是将输入数据作为恒等数值
shortcut = data
# 残差块中的第一层卷积 1x1 包含 归一化 激活 卷积
bn1 = BatchNormalization(axis=chan_dim, epsilon=bn_eps, momentum=bn_mom)(data)
act1 = Activation('relu')(bn1)
conv1 = Conv2D(
filters=int(k * 0.25), kernel_size=(1, 1),
# 不适用bias参数 指定L2 正则
use_bias=False, kernel_regularizer=l2(reg)
)(act1)
# 残差块中的第二层卷积 3x3 包含 归一化 激活 卷积
bn2 = BatchNormalization(axis=chan_dim, epsilon=bn_eps, momentum=bn_mom)(conv1)
act2 = Activation('relu')(bn2)
conv2 = Conv2D(
filters=int(k * 0.25), kernel_size=(3, 3),
strides=stride, padding='same', use_bias=False, kernel_regularizer=l2(reg)
)(act2)
# 残差块中的第三层卷积 1x1 包含归一化 激活 卷积
bn3 = BatchNormalization(axis=chan_dim, epsilon=bn_eps, momentum=bn_mom)(conv2)
act3 = Activation('relu')(bn3)
conv3 = Conv2D(
filters=k, kernel_size=(1, 1),
strides=stride, padding='same', use_bias=False, kernel_regularizer=l2(reg)
)(act3)
# 判断是否需要进行参数减小,如果需要就再进行 1x1 卷积 并将卷积结果作为恒等参数 使用这个新恒等计算
if red:
shortcut = Conv2D(k, kernel_size=(1, 1), strides=stride, use_bias=False, kernel_regularizer=l2(reg))(act1)
# 判断恒等参数是否需要尺寸调整,如果需要就将恒等数值进行池化 使得恒等参数的维度与卷积结果相同
if stride != 1:
shortcut = AveragePooling2D(pool_size=stride)(shortcut)
# 计算出残差 并将残差返回出去 残差 = 恒等映射 + 卷积输出
return Activation('relu')(add([shortcut, conv3])) | zWell-model | /zWell_model-0.0.2.20230528-py3-none-any.whl/zWell_model/resNet/utils.py | utils.py |
import copy
import zWell_model.utils as zu
from zWell_model.allModel import AllModel
class ResNet(AllModel):
"""
残差神经网络最基础的架构对象。
"""
def __init__(self, k, stride, input_shape, classes,
chan_dim=-1, red=True,
reg=0.0001, bn_eps=2e-5,
bn_mom=0.9, model_layers_num=4,
ckp=2, init_k_len=64, dense_len=512
):
"""
构造出来一个残差神经网络对象
:param k: 每一个残差块中的输出通道数量,需要注意的是,这是一个list 作为每一个残差块的输出描述。
:param stride: 每一个残差块的卷积步长 需要注意的是,这是一个list,作为每一个残差块的卷积步长。
:param chan_dim: 每一个残差块在进行批量归一化操作时依赖的轴 需要注意的是,这是一个list,作为每一个残差块的归一化轴编号。
:param input_shape: 残差神经网络的输入维度元组
:param classes: 残差神经网络的分类方式
:param red: 是否使用单通道方式进行处理
:param reg: 残差块中的L2正则化系数
:param bn_eps: 用于避免批量归一化除以0
:param bn_mom: 定义批量归一化操作时的动态均值的动量
:param model_layers_num: 残差神经网络层数量
:param ckp: 卷积层之间的卷积核数量等比数值
:param init_k_len 第一层残差块中的卷积核数量
:param dense_len 残差结束之后,全连接神经元第一层的神经元数量
"""
super().__init__()
# 检查 k 并赋值k
zu.check_list_len(k, model_layers_num, "Number of output channels in each residual block:[k]")
self.k = k
# 检查步长 并 赋值步长
zu.check_list_len(stride, model_layers_num, "Convolutional step size for each residual block:[stride]")
self.stride = stride
self.input_shape = input_shape
self.classes = classes
self.chan_dim = chan_dim
self.red = red
self.reg = reg
self.bn_eps = bn_eps
self.bn_mom = bn_mom
self.model_layers_num = model_layers_num
self.ckp = ckp
self.init_k_len = init_k_len
self.dense_len = dense_len
def __rshift__(self, other):
"""
使用拷贝的方式将当亲残差网络模型中的所有属性拷贝到另一个残差网络模型对象中,常用于不同配置的网络之间的属性覆写操作。
能够在不重新创建新神经网络对象的前提下复制神经网络对象
:param other: 拷贝之后的新神经网络模型。
:return: 拷贝之后的新神经网络模型。
"""
other.k = copy.copy(self.k)
other.stride = copy.copy(self.stride)
other.input_shape = self.input_shape
other.classes = self.classes
other.chan_dim = self.chan_dim
other.red = self.red
other.reg = self.reg
other.bn_eps = self.bn_eps
other.bn_mom = self.bn_mom
other.model_layers_num = other.model_layers_num
other.ckp = self.ckp
other.init_k_len = self.init_k_len
other.dense_len = self.dense_len
def __str__(self) -> str:
return "zWell_model.resNet.resNetWork.ResNet(\n" \
f"\tk={self.k}\n" \
f"\tstride={self.stride}\n" \
f"\tinput_shape={self.input_shape}\n" \
f"\tclasses={self.classes}\n" \
f"\tchan_dim={self.chan_dim}\n" \
f"\tred={self.red}\n" \
f"\treg={self.reg}\n" \
f"\tbn_eps={self.bn_eps}\n" \
f"\tbn_mom={self.bn_mom}\n" \
f"\tmodel_layers_num={self.model_layers_num}\n" \
f"\tckp={self.ckp}\n" \
f"\tinit_k_len={self.init_k_len}\n" \
f"\tdense_len={self.dense_len}\n" \
")" | zWell-model | /zWell_model-0.0.2.20230528-py3-none-any.whl/zWell_model/resNet/resNetWork.py | resNetWork.py |
from zWell_model.convNet.convNetWork import ConvNet
class ConvNetV1(ConvNet):
"""
第一种基本的卷积神经网络模型
"""
def to_keras_model(self, add_fully_connected=True, **args):
from keras import Sequential
from keras.layers import Convolution2D, Activation, MaxPooling2D, Flatten, Dense
init_filters = self.init_k_len
model = Sequential(name=args.get('name', 'ConvNetV1'))
# 添加第一层神经元 这里第一层是卷积
model.add(
Convolution2D(
# 指定 初始 个滤波器(卷积核)
filters=init_filters,
# 指定卷积核大小
kernel_size=2,
# 指定生成规则 设成same会自动加padding,保证输出是同样的大小。
padding='same',
# 设置卷积层第一次的输入数据维度
batch_input_shape=self.input_shape,
)
)
# 添加一层激活函数
model.add(Activation('relu'))
# 添加一层池化层
model.add(
MaxPooling2D(
# 指定池化层核的尺寸 这里是 2x2
pool_size=2,
# 指定步长 2x2
strides=2,
# 指定池化层生成规则
padding='same'
)
)
# 添加所有剩余层的卷积层
if self.ckp == 2:
for i in range(self.model_layers_num):
# 添加一层卷积
init_filters <<= 1
model.add(Convolution2D(filters=init_filters, kernel_size=2, padding='same', strides=self.stride[i]))
# 添加一层激活函数
model.add(Activation("relu"))
# 添加一层池化
model.add(MaxPooling2D(pool_size=2, padding='same'))
else:
for i in range(self.model_layers_num):
# 添加一层卷积
init_filters *= self.ckp
model.add(Convolution2D(filters=init_filters, kernel_size=2, padding='same', strides=self.stride[i]))
# 添加一层激活函数
model.add(Activation("relu"))
# 添加一层池化
model.add(MaxPooling2D(pool_size=2, padding='same'))
if add_fully_connected:
# 将矩阵扁平化准备全连接
model.add(Flatten())
# 正式进入全连接神经网络,添加全连接神经元(具有1024个神经元的层)
model.add(Dense(self.dense_len))
# 添加激活函数
model.add(Activation("relu"))
# 再一次添加一层 8 个神经元的网络层(每个神经元代表一个类别)
model.add(Dense(self.classes))
# 添加激活函数 softmax 用于计算概率得分
model.add(Activation("softmax"))
return model | zWell-model | /zWell_model-0.0.2.20230528-py3-none-any.whl/zWell_model/convNet/ConvNetV1.py | ConvNetV1.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.