code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def resource_node(element, resources): for attendee in resources: element.append( T.Attendee( T.Mailbox( T.EmailAddress(attendee.email) ) ) ) return element
Helper function to generate a person/conference room node from an email address <t:OptionalAttendees> <t:Attendee> <t:Mailbox> <t:EmailAddress>{{ attendee_email }}</t:EmailAddress> </t:Mailbox> </t:Attendee> </t:OptionalAttendees>
def delete_field(field_uri): root = T.DeleteItemField( T.FieldURI(FieldURI=field_uri) ) return root
Helper function to request deletion of a field. This is necessary when you want to overwrite values instead of appending. <t:DeleteItemField> <t:FieldURI FieldURI="calendar:Resources"/> </t:DeleteItemField>
def get_item(exchange_id, format=u"Default"): elements = list() if type(exchange_id) == list: for item in exchange_id: elements.append(T.ItemId(Id=item)) else: elements = [T.ItemId(Id=exchange_id)] root = M.GetItem( M.ItemShape( T.BaseShape(format) ), M.ItemIds( *elements ) ) return root
Requests a calendar item from the store. exchange_id is the id for this event in the Exchange store. format controls how much data you get back from Exchange. Full docs are here, but acceptible values are IdOnly, Default, and AllProperties. http://msdn.microsoft.com/en-us/library/aa564509(v=exchg.140).aspx <m:GetItem xmlns:m="http://schemas.microsoft.com/exchange/services/2006/messages" xmlns:t="http://schemas.microsoft.com/exchange/services/2006/types"> <m:ItemShape> <t:BaseShape>{format}</t:BaseShape> </m:ItemShape> <m:ItemIds> <t:ItemId Id="{exchange_id}"/> </m:ItemIds> </m:GetItem>
def get_master(exchange_id, format=u"Default"): root = M.GetItem( M.ItemShape( T.BaseShape(format) ), M.ItemIds( T.RecurringMasterItemId(OccurrenceId=exchange_id) ) ) return root
Requests a calendar item from the store. exchange_id is the id for this event in the Exchange store. format controls how much data you get back from Exchange. Full docs are here, but acceptible values are IdOnly, Default, and AllProperties. http://msdn.microsoft.com/en-us/library/aa564509(v=exchg.140).aspx <m:GetItem xmlns:m="http://schemas.microsoft.com/exchange/services/2006/messages" xmlns:t="http://schemas.microsoft.com/exchange/services/2006/types"> <m:ItemShape> <t:BaseShape>{format}</t:BaseShape> </m:ItemShape> <m:ItemIds> <t:RecurringMasterItemId OccurrenceId="{exchange_id}"/> </m:ItemIds> </m:GetItem>
def get_occurrence(exchange_id, instance_index, format=u"Default"): root = M.GetItem( M.ItemShape( T.BaseShape(format) ), M.ItemIds() ) items_node = root.xpath("//m:ItemIds", namespaces=NAMESPACES)[0] for index in instance_index: items_node.append(T.OccurrenceItemId(RecurringMasterId=exchange_id, InstanceIndex=str(index))) return root
Requests one or more calendar items from the store matching the master & index. exchange_id is the id for the master event in the Exchange store. format controls how much data you get back from Exchange. Full docs are here, but acceptible values are IdOnly, Default, and AllProperties. GetItem Doc: http://msdn.microsoft.com/en-us/library/aa564509(v=exchg.140).aspx OccurrenceItemId Doc: http://msdn.microsoft.com/en-us/library/office/aa580744(v=exchg.150).aspx <m:GetItem xmlns:m="http://schemas.microsoft.com/exchange/services/2006/messages" xmlns:t="http://schemas.microsoft.com/exchange/services/2006/types"> <m:ItemShape> <t:BaseShape>{format}</t:BaseShape> </m:ItemShape> <m:ItemIds> {% for index in instance_index %} <t:OccurrenceItemId RecurringMasterId="{exchange_id}" InstanceIndex="{{ index }}"/> {% endfor %} </m:ItemIds> </m:GetItem>
def update_property_node(node_to_insert, field_uri): root = T.SetItemField( T.FieldURI(FieldURI=field_uri), T.CalendarItem(node_to_insert) ) return root
Helper function - generates a SetItemField which tells Exchange you want to overwrite the contents of a field.
def _validate_example(rh, method, example_type): example = getattr(method, example_type + "_example") schema = getattr(method, example_type + "_schema") if example is None: return None try: validate(example, schema) except ValidationError as e: raise ValidationError( "{}_example for {}.{} could not be validated.\n{}".format( example_type, rh.__name__, method.__name__, str(e) ) ) return json.dumps(example, indent=4, sort_keys=True)
Validates example against schema :returns: Formatted example if example exists and validates, otherwise None :raises ValidationError: If example does not validate against the schema
def _get_rh_methods(rh): for k, v in vars(rh).items(): if all([ k in HTTP_METHODS, is_method(v), hasattr(v, "input_schema") ]): yield (k, v)
Yield all HTTP methods in ``rh`` that are decorated with schema.validate
def _get_tuple_from_route(route): if isinstance(route, tuple): assert len(route) >= 2 pattern, handler_class = route[:2] elif isinstance(route, tornado.web.URLSpec): pattern, handler_class = route.regex.pattern, route.handler_class else: raise TypeError("Unknown route type '{}'" .format(type(route).__name__)) methods = [] route_re = re.compile(pattern) route_params = set(list(route_re.groupindex.keys()) + ['self']) for http_method in HTTP_METHODS: method = getattr(handler_class, http_method, None) if method: method = extract_method(method) method_params = set(getattr(method, "__argspec_args", inspect.getargspec(method).args)) if route_params.issubset(method_params) and \ method_params.issubset(route_params): methods.append(http_method) return pattern, handler_class, methods
Return (pattern, handler_class, methods) tuple from ``route`` :type route: tuple|tornado.web.URLSpec :rtype: tuple :raises TypeError: If ``route`` is not a tuple or URLSpec
def _escape_markdown_literals(string): literals = list("\\`*_{}[]()<>#+-.!:|") escape = lambda c: '\\' + c if c in literals else c return "".join(map(escape, string))
Escape any markdown literals in ``string`` by prepending with \\ :type string: str :rtype: str
def _cleandoc(doc): indent_length = lambda s: len(s) - len(s.lstrip(" ")) not_empty = lambda s: s != "" lines = doc.split("\n") indent = min(map(indent_length, filter(not_empty, lines))) return "\n".join(s[indent:] for s in lines)
Remove uniform indents from ``doc`` lines that are not empty :returns: Cleaned ``doc``
def _add_indent(string, indent): lines = string.split("\n") first, lines = lines[0], lines[1:] lines = ["{indent}{s}".format(indent=" " * indent, s=s) for s in lines] lines = [first] + lines return "\n".join(lines)
Add indent of ``indent`` spaces to ``string.split("\n")[1:]`` Useful for formatting in strings to already indented blocks
def get_api_docs(routes): routes = map(_get_tuple_from_route, routes) documentation = [] for url, rh, methods in sorted(routes, key=lambda a: a[0]): if issubclass(rh, APIHandler): documentation.append(_get_route_doc(url, rh, methods)) documentation = ( "**This documentation is automatically generated.**\n\n" + "**Output schemas only represent `data` and not the full output; " + "see output examples and the JSend specification.**\n" + "\n<br>\n<br>\n".join(documentation) ) return documentation
Generates GitHub Markdown formatted API documentation using provided schemas in RequestHandler methods and their docstrings. :type routes: [(url, RequestHandler), ...] :param routes: List of routes (this is ideally all possible routes of the app) :rtype: str :returns: generated GFM-formatted documentation
def error(self, message, data=None, code=None): result = {'status': 'error', 'message': message} if data: result['data'] = data if code: result['code'] = code self.write(result) self.finish()
An error occurred in processing the request, i.e. an exception was thrown. :type data: A JSON-serializable object :param data: A generic container for any other information about the error, i.e. the conditions that caused the error, stack traces, etc. :type message: A JSON-serializable object :param message: A meaningful, end-user-readable (or at the least log-worthy) message, explaining what went wrong :type code: int :param code: A numeric code corresponding to the error, if applicable
def get_object_defaults(object_schema): default = {} for k, schema in object_schema.get('properties', {}).items(): if schema.get('type') == 'object': if 'default' in schema: default[k] = schema['default'] try: object_defaults = get_object_defaults(schema) except NoObjectDefaults: if 'default' not in schema: raise NoObjectDefaults else: if 'default' not in schema: default[k] = {} default[k].update(object_defaults) else: if 'default' in schema: default[k] = schema['default'] if default: return default raise NoObjectDefaults
Extracts default values dict (nested) from an type object schema. :param object_schema: Schema type object :type object_schema: dict :returns: Nested dict with defaults values
def input_schema_clean(input_, input_schema): if input_schema.get('type') == 'object': try: defaults = get_object_defaults(input_schema) except NoObjectDefaults: pass else: return deep_update(defaults, input_) return input_
Updates schema default values with input data. :param input_: Input data :type input_: dict :param input_schema: Input schema :type input_schema: dict :returns: Nested dict with data (defaul values updated with input data) :rtype: dict
def read(filename): return codecs.open(os.path.join(__DIR__, filename), 'r').read()
Read and return `filename` in root dir of project and return string
def deep_update(source, overrides): for key, value in overrides.items(): if isinstance(value, collections.Mapping) and value: returned = deep_update(source.get(key, {}), value) source[key] = returned else: source[key] = overrides[key] return source
Update a nested dictionary or similar mapping. Modify ``source`` in place. :type source: collections.Mapping :type overrides: collections.Mapping :rtype: collections.Mapping
def container(dec): # Credits: http://stackoverflow.com/a/1167248/1798683 @wraps(dec) def meta_decorator(f): decorator = dec(f) decorator.orig_func = f return decorator return meta_decorator
Meta-decorator (for decorating decorators) Keeps around original decorated function as a property ``orig_func`` :param dec: Decorator to decorate :type dec: function :returns: Decorated decorator
def is_handler_subclass(cls, classnames=("ViewHandler", "APIHandler")): if isinstance(cls, list): return any(is_handler_subclass(c) for c in cls) elif isinstance(cls, type): return any(c.__name__ in classnames for c in inspect.getmro(cls)) else: raise TypeError( "Unexpected type `{}` for class `{}`".format( type(cls), cls ) )
Determines if ``cls`` is indeed a subclass of ``classnames``
def write_error(self, status_code, **kwargs): def get_exc_message(exception): return exception.log_message if \ hasattr(exception, "log_message") else str(exception) self.clear() self.set_status(status_code) # Any APIError exceptions raised will result in a JSend fail written # back with the log_message as data. Hence, log_message should NEVER # expose internals. Since log_message is proprietary to HTTPError # class exceptions, all exceptions without it will return their # __str__ representation. # All other exceptions result in a JSend error being written back, # with log_message only written if debug mode is enabled exception = kwargs["exc_info"][1] if any(isinstance(exception, c) for c in [APIError, ValidationError]): # ValidationError is always due to a malformed request if isinstance(exception, ValidationError): self.set_status(400) self.fail(get_exc_message(exception)) else: self.error( message=self._reason, data=get_exc_message(exception) if self.settings.get("debug") else None, code=status_code )
Override of RequestHandler.write_error Calls ``error()`` or ``fail()`` from JSendMixin depending on which exception was raised with provided reason and status code. :type status_code: int :param status_code: HTTP status code
def gen_submodule_names(package): for importer, modname, ispkg in pkgutil.walk_packages( path=package.__path__, prefix=package.__name__ + '.', onerror=lambda x: None): yield modname
Walk package and yield names of all submodules :type package: package :param package: The package to get submodule names of :returns: Iterator that yields names of all submodules of ``package`` :rtype: Iterator that yields ``str``
def get(self, name): # Asynchronously yield a result from a method res = yield gen.Task(self.hello, name) # When using the `schema.validate` decorator asynchronously, # we can return the output desired by raising # `tornado.gen.Return(value)` which returns a # Future that the decorator will yield. # In Python 3.3, using `raise Return(value)` is no longer # necessary and can be replaced with simply `return value`. # For details, see: # http://www.tornadoweb.org/en/branch3.2/gen.html#tornado.gen.Return # return res # Python 3.3 raise gen.Return(res)
Shouts hello to the world (asynchronously)!
def coroutine(func, replace_callback=True): # gen.coroutine in tornado 3.x.x and 5.x.x have a different signature than 4.x.x if TORNADO_MAJOR != 4: wrapper = gen.coroutine(func) else: wrapper = gen.coroutine(func, replace_callback) wrapper.__argspec_args = inspect.getargspec(func).args return wrapper
Tornado-JSON compatible wrapper for ``tornado.gen.coroutine`` Annotates original argspec.args of ``func`` as attribute ``__argspec_args``
def main(): arg_parse = setup_argparse() args = arg_parse.parse_args() if not args.quiet: print('GNS3 Topology Converter') if args.debug: logging_level = logging.DEBUG else: logging_level = logging.WARNING logging.basicConfig(level=logging_level, format=LOG_MSG_FMT, datefmt=LOG_DATE_FMT) logging.getLogger(__name__) # Add the main topology to the list of files to convert if args.topology == 'topology.net': args.topology = os.path.join(os.getcwd(), 'topology.net') topology_files = [{'file': topology_abspath(args.topology), 'snapshot': False}] # Add any snapshot topologies to be converted topology_files.extend(get_snapshots(args.topology)) topology_name = name(args.topology, args.name) # Do the conversion for topology in topology_files: do_conversion(topology, topology_name, args.output, args.debug)
Entry point for gns3-converter
def setup_argparse(): parser = argparse.ArgumentParser( description='Convert old ini-style GNS3 topologies (<=0.8.7) to ' 'the newer version 1+ JSON format') parser.add_argument('--version', action='version', version='%(prog)s ' + __version__) parser.add_argument('-n', '--name', help='Topology name (default uses the ' 'name of the old project ' 'directory)') parser.add_argument('-o', '--output', help='Output directory') parser.add_argument('topology', nargs='?', default='topology.net', help='GNS3 .net topology file (default: topology.net)') parser.add_argument('--debug', help='Enable debugging output', action='store_true') parser.add_argument('-q', '--quiet', help='Quiet-mode (no output to console)', action='store_true') return parser
Setup the argparse argument parser :return: instance of argparse :rtype: ArgumentParser
def do_conversion(topology_def, topology_name, output_dir=None, debug=False, quiet=False): # Create a new instance of the the Converter gns3_conv = Converter(topology_def['file'], debug) # Read the old topology old_top = gns3_conv.read_topology() new_top = JSONTopology() # Process the sections (topology) = gns3_conv.process_topology(old_top) # Generate the nodes new_top.nodes = gns3_conv.generate_nodes(topology) # Generate the links new_top.links = gns3_conv.generate_links(new_top.nodes) new_top.notes = gns3_conv.generate_notes(topology['artwork']['NOTE']) new_top.shapes = gns3_conv.generate_shapes(topology['artwork']['SHAPE']) new_top.images = gns3_conv.generate_images(topology['artwork']['PIXMAP']) # Enter topology name new_top.name = topology_name # Save the new topology save(output_dir, gns3_conv, new_top, topology_def['snapshot'], quiet)
Convert the topology :param dict topology_def: Dict containing topology file and snapshot bool. For example: ``{'file': filename, 'snapshot': False}`` :param str topology_name: The name of the topology :param str output_dir: The directory in which to output the topology. (Default: None) :param bool debug: Enable debugging (Default: False)
def get_snapshots(topology): snapshots = [] snap_dir = os.path.join(topology_dirname(topology), 'snapshots') if os.path.exists(snap_dir): snaps = os.listdir(snap_dir) for directory in snaps: snap_top = os.path.join(snap_dir, directory, 'topology.net') if os.path.exists(snap_top): snapshots.append({'file': snap_top, 'snapshot': True}) return snapshots
Return the paths of any snapshot topologies :param str topology: topology file :return: list of dicts containing snapshot topologies :rtype: list
def name(topology_file, topology_name=None): if topology_name is not None: logging.debug('topology name supplied') topo_name = topology_name else: logging.debug('topology name not supplied') topo_name = os.path.basename(topology_dirname(topology_file)) return topo_name
Calculate the name to save the converted topology as using either either a specified name or the directory name of the current project :param str topology_file: Topology filename :param topology_name: Optional topology name (Default: None) :type topology_name: str or None :return: new topology name :rtype: str
def snapshot_name(topo_name): topo_name = os.path.basename(topology_dirname(topo_name)) snap_re = re.compile('^topology_(.+)(_snapshot_)(\d{6}_\d{6})$') result = snap_re.search(topo_name) if result is not None: snap_name = result.group(1) + '_' + result.group(3) else: raise ConvertError('Unable to get snapshot name') return snap_name
Get the snapshot name :param str topo_name: topology file location. The name is taken from the directory containing the topology file using the following format: topology_NAME_snapshot_DATE_TIME :return: snapshot name :raises ConvertError: when unable to determine the snapshot name
def copy_configs(configs, source, target): config_err = False if len(configs) > 0: config_dir = os.path.join(target, 'dynamips', 'configs') os.makedirs(config_dir) for config in configs: old_config_file = os.path.join(source, config['old']) new_config_file = os.path.join(config_dir, os.path.basename(config['new'])) if os.path.isfile(old_config_file): # Copy and rename the config shutil.copy(old_config_file, new_config_file) else: config_err = True logging.error('Unable to find %s' % config['old']) return config_err
Copy dynamips configs to converted topology :param configs: Configs to copy :param str source: Source topology directory :param str target: Target topology files directory :return: True when a config cannot be found, otherwise false :rtype: bool
def copy_vpcs_configs(source, target): # Prepare a list of files to copy vpcs_files = glob.glob(os.path.join(source, 'configs', '*.vpc')) vpcs_hist = os.path.join(source, 'configs', 'vpcs.hist') vpcs_config_path = os.path.join(target, 'vpcs', 'multi-host') if os.path.isfile(vpcs_hist): vpcs_files.append(vpcs_hist) # Create the directory tree if len(vpcs_files) > 0: os.makedirs(vpcs_config_path) # Copy the files for old_file in vpcs_files: new_file = os.path.join(vpcs_config_path, os.path.basename(old_file)) shutil.copy(old_file, new_file)
Copy any VPCS configs to the converted topology :param str source: Source topology directory :param str target: Target topology files directory
def copy_topology_image(source, target): files = glob.glob(os.path.join(source, '*.png')) for file in files: shutil.copy(file, target)
Copy any images of the topology to the converted topology :param str source: Source topology directory :param str target: Target Directory
def copy_images(images, source, target): image_err = False if len(images) > 0: images_dir = os.path.join(target, 'images') os.makedirs(images_dir) for image in images: if os.path.isabs(image): old_image_file = image else: old_image_file = os.path.join(source, image) new_image_file = os.path.join(images_dir, os.path.basename(image)) if os.path.isfile(os.path.abspath(old_image_file)): shutil.copy(old_image_file, new_image_file) else: image_err = True logging.error('Unable to find %s' % old_image_file) return image_err
Copy images to converted topology :param images: Images to copy :param source: Old Topology Directory :param target: Target topology files directory :return: True when an image cannot be found, otherwise false :rtype: bool
def make_vbox_dirs(max_vbox_id, output_dir, topology_name): if max_vbox_id is not None: for i in range(1, max_vbox_id + 1): vbox_dir = os.path.join(output_dir, topology_name + '-files', 'vbox', 'vm-%s' % i) os.makedirs(vbox_dir)
Create VirtualBox working directories if required :param int max_vbox_id: Number of directories to create :param str output_dir: Output directory :param str topology_name: Topology name
def make_qemu_dirs(max_qemu_id, output_dir, topology_name): if max_qemu_id is not None: for i in range(1, max_qemu_id + 1): qemu_dir = os.path.join(output_dir, topology_name + '-files', 'qemu', 'vm-%s' % i) os.makedirs(qemu_dir)
Create Qemu VM working directories if required :param int max_qemu_id: Number of directories to create :param str output_dir: Output directory :param str topology_name: Topology name
def add_wic(self, old_wic, wic): new_wic = 'wic' + old_wic[-1] self.node['properties'][new_wic] = wic
Convert the old style WIC slot to a new style WIC slot and add the WIC to the node properties :param str old_wic: Old WIC slot :param str wic: WIC name
def add_wic_ports(self, wic_slot): wic_slot_number = int(wic_slot[3]) wic_adapter = self.node['properties'][wic_slot] num_ports = ADAPTER_MATRIX[wic_adapter]['ports'] port_type = ADAPTER_MATRIX[wic_adapter]['type'] ports = [] # Dynamips WICs port number start on a multiple of 16. base = 16 * (wic_slot_number + 1) # WICs are always in adapter slot 0. slot = 0 for port_number in range(num_ports): phy_port_number = port_number + self.port_numbering[port_type] port_name = PORT_TYPES[port_type] + '%s/%s' % (slot, phy_port_number) port_temp = {'name': port_name, 'id': self.port_id, 'port_number': base + port_number, 'slot_number': slot} ports.append(port_temp) self.port_id += 1 self.port_numbering[port_type] += num_ports self.node['ports'].extend(ports)
Add the ports for a specific WIC to the node['ports'] dictionary :param str wic_slot: WIC Slot (wic0)
def add_slot_ports(self, slot): slot_nb = int(slot[4]) # slot_adapter = None # if slot in self.node['properties']: # slot_adapter = self.node['properties'][slot] # elif self.device_info['model'] == 'c7200': # if self.device_info['npe'] == 'npe-g2': # slot_adapter = 'C7200-IO-GE-E' # else: # slot_adapter = 'C7200-IO-2FE' slot_adapter = self.node['properties'][slot] num_ports = ADAPTER_MATRIX[slot_adapter]['ports'] port_type = ADAPTER_MATRIX[slot_adapter]['type'] ports = [] for i in range(num_ports): port_name = PORT_TYPES[port_type] + '%s/%s' % (slot_nb, i) port_temp = {'name': port_name, 'id': self.port_id, 'port_number': i, 'slot_number': slot_nb} ports.append(port_temp) self.port_id += 1 self.node['ports'].extend(ports)
Add the ports to be added for a adapter card :param str slot: Slot name
def add_info_from_hv(self): # Router Image if 'image' in self.hypervisor: self.node['properties']['image'] = \ os.path.basename(self.hypervisor['image']) # IDLE-PC if 'idlepc' in self.hypervisor: self.node['properties']['idlepc'] = self.hypervisor['idlepc'] # Router RAM if 'ram' in self.hypervisor: self.node['properties']['ram'] = self.hypervisor['ram'] # 7200 NPE if 'npe' in self.hypervisor: self.device_info['npe'] = self.hypervisor['npe'] # Device Chassis if 'chassis' in self.hypervisor: self.device_info['chassis'] = self.hypervisor['chassis'] if self.device_info['model'] == 'c3600': self.node['properties']['chassis'] = \ self.device_info['chassis']
Add the information we need from the old hypervisor section
def add_device_items(self, item, device): if item in ('aux', 'console'): self.node['properties'][item] = device[item] elif item.startswith('slot'): # if self.device_info['model'] == 'c7200': # if item != 'slot0': # self.node['properties'][item] = device[item] # else: self.node['properties'][item] = device[item] elif item == 'connections': self.connections = device[item] elif INTERFACE_RE.search(item) or VBQ_INT_RE.search(item): self.interfaces.append({'from': item, 'to': device[item]}) elif NUMBER_RE.search(item): if self.device_info['type'] == 'EthernetSwitch': self.calc_ethsw_port(item, device[item]) elif self.device_info['type'] == 'FrameRelaySwitch': self.calc_frsw_port(item, device[item]) elif MAPINT_RE.search(item): self.add_mapping((item, device[item])) elif item == 'cnfg': new_config = os.path.join('configs', 'i%s_startup-config.cfg' % self.node['id']) self.node['properties']['startup_config'] = new_config self.config.append({'old': fix_path(device[item]), 'new': new_config}) elif item.startswith('wic'): self.add_wic(item, device[item]) elif item == 'symbol': self.set_symbol(device[item]) elif item == 'nics': self.node['properties']['adapters'] = device[item] elif item == 'image': self.node['properties']['vmname'] = device[item] elif item == 'vbox_id' or item == 'qemu_id': self.node[item] = device[item]
Add the various items from the device to the node :param str item: item key :param dict device: dictionary containing items
def add_to_virtualbox(self): # VirtualBox Image if 'vmname' not in self.node['properties']: self.node['properties']['vmname'] = \ self.hypervisor['VBoxDevice']['image'] # Number of adapters if 'adapters' not in self.node['properties']: self.node['properties']['adapters'] = \ self.hypervisor['VBoxDevice']['nics'] # Console Port if 'console' not in self.node['properties']: self.node['properties']['console'] = \ self.base_ports['vbox_console'] + self.node['vbox_id'] - 1
Add additional parameters that were in the VBoxDevice section or not present
def add_vm_ethernet_ports(self): for i in range(self.node['properties']['adapters']): port = {'id': self.port_id, 'name': 'Ethernet%s' % i, 'port_number': i} self.node['ports'].append(port) self.port_id += 1
Add ethernet ports to Virtualbox and Qemu nodes
def set_qemu_symbol(self): valid_devices = {'ASA': 'asa', 'PIX': 'PIX_firewall', 'JUNOS': 'router', 'IDS': 'ids'} if self.device_info['from'] in valid_devices \ and 'default_symbol' not in self.node \ and 'hover_symbol' not in self.node: self.set_symbol(valid_devices[self.device_info['from']])
Set the appropriate symbol for QEMU Devices
def set_symbol(self, symbol): if symbol == 'EtherSwitch router': symbol = 'multilayer_switch' elif symbol == 'Host': symbol = 'computer' normal = ':/symbols/%s.normal.svg' % symbol selected = ':/symbols/%s.selected.svg' % symbol self.node['default_symbol'] = normal self.node['hover_symbol'] = selected
Set a symbol for a device :param str symbol: Symbol to use
def calc_ethsw_port(self, port_num, port_def): # Port String - access 1 SW2 1 # 0: type 1: vlan 2: destination device 3: destination port port_def = port_def.split(' ') if len(port_def) == 4: destination = {'device': port_def[2], 'port': port_def[3]} else: destination = {'device': 'NIO', 'port': port_def[2]} # port entry port = {'id': self.port_id, 'name': str(port_num), 'port_number': int(port_num), 'type': port_def[0], 'vlan': int(port_def[1])} self.node['ports'].append(port) self.calc_link(self.node['id'], self.port_id, port['name'], destination) self.port_id += 1
Split and create the port entry for an Ethernet Switch :param port_num: port number :type port_num: str or int :param str port_def: port definition
def calc_mb_ports(self): model = self.device_info['model'] chassis = self.device_info['chassis'] num_ports = MODEL_MATRIX[model][chassis]['ports'] ports = [] if num_ports > 0: port_type = MODEL_MATRIX[model][chassis]['type'] # Create the ports dict for i in range(num_ports): port_temp = {'name': PORT_TYPES[port_type] + '0/' + str(i), 'id': self.port_id, 'port_number': i, 'slot_number': 0} ports.append(port_temp) self.port_id += 1 self.node['ports'].extend(ports)
Add the default ports to add to a router
def calc_link(self, src_id, src_port, src_port_name, destination): if destination['device'] == 'NIO': destination['port'] = destination['port'].lower() link = {'source_node_id': src_id, 'source_port_id': src_port, 'source_port_name': src_port_name, 'source_dev': self.node['properties']['name'], 'dest_dev': destination['device'], 'dest_port': destination['port']} self.links.append(link)
Add a link item for processing later :param int src_id: Source node ID :param int src_port: Source port ID :param str src_port_name: Source port name :param dict destination: Destination
def set_description(self): if self.device_info['type'] == 'Router': self.node['description'] = '%s %s' % (self.device_info['type'], self.device_info['model']) else: self.node['description'] = self.device_info['desc']
Set the node description
def set_type(self): if self.device_info['type'] == 'Router': self.node['type'] = self.device_info['model'].upper() else: self.node['type'] = self.device_info['type']
Set the node type
def calc_device_links(self): for connection in self.interfaces: int_type = connection['from'][0] int_name = connection['from'].replace(int_type, PORT_TYPES[int_type.upper()]) # Get the source port id src_port = None for port in self.node['ports']: if int_name == port['name']: src_port = port['id'] break dest_temp = connection['to'].split(' ') if len(dest_temp) == 2: conn_to = {'device': dest_temp[0], 'port': dest_temp[1]} else: conn_to = {'device': 'NIO', 'port': dest_temp[0]} self.calc_link(self.node['id'], src_port, int_name, conn_to)
Calculate a router or VirtualBox link
def calc_cloud_connection(self): # Connection String - SW1:1:nio_gen_eth:eth0 # 0: Destination device 1: Destination port # 2: NIO 3: NIO Destination self.node['properties']['nios'] = [] if self.connections is None: return None else: self.connections = self.connections.split(' ') for connection in sorted(self.connections): connection = connection.split(':') connection_len = len(connection) if connection_len == 4: nio = '%s:%s' % (connection[2], connection[3]) elif connection_len == 6: nio = '%s:%s:%s:%s' % (connection[2].lower(), connection[3], connection[4], connection[5]) else: return RuntimeError('Error: Unknown connection string length ' '(Length: %s)' % connection_len) self.node['properties']['nios'].append(nio) # port entry self.node['ports'].append({'id': self.port_id, 'name': nio, 'stub': True}) self.port_id += 1 return None
Add the ports and nios for a cloud connection :return: None on success or RuntimeError on error
def process_mappings(self): for mapping_a in self.mappings: for mapping_b in self.mappings: if mapping_a['source'] == mapping_b['dest']: self.mappings.remove(mapping_b) break self.node['properties']['mappings'] = {} mappings = self.node['properties']['mappings'] for mapping in self.mappings: mappings[mapping['source']] = mapping['dest']
Process the mappings for a Frame Relay switch. Removes duplicates and adds the mappings to the node properties
def fix_path(path): if '\\' in path: path = path.replace('\\', '/') path = os.path.normpath(path) return path
Fix windows path's. Linux path's will remain unaltered :param str path: The path to be fixed :return: The fixed path :rtype: str
def read_topology(self): configspec = resource_stream(__name__, 'configspec') try: handle = open(self._topology) handle.close() try: config = ConfigObj(self._topology, configspec=configspec, raise_errors=True, list_values=False, encoding='utf-8') except SyntaxError: logging.error('Error loading .net file') sys.exit(1) except IOError: logging.error('Cannot open topology file') sys.exit(1) vtor = Validator() res = config.validate(vtor, preserve_errors=True) if res: logging.debug('Validation passed') elif not res: for entry in flatten_errors(config, res): # each entry is a tuple (section_list, key, error) = entry if key is not None: section_list.append(key) else: section_list.append('[missing section]') section_string = ', '.join(section_list) if error is False: error = 'Missing value or section' print(section_string, ' = ', error) input('Press ENTER to continue') sys.exit(1) configspec.close() return config
Read the ini-style topology file using ConfigObj :return config: Topology parsed by :py:mod:`ConfigObj` :rtype: ConfigObj
def process_topology(self, old_top): sections = self.get_sections(old_top) topo = LegacyTopology(sections, old_top) for instance in sorted(sections): if instance.startswith('vbox') or instance.startswith('qemu'): if instance.startswith('qemu') and \ 'qemupath' in old_top[instance]: topo.add_qemu_path(instance) for device in EXTRA_CONF: try: if isinstance(old_top[instance][device], dict): topo.add_conf_item(instance, device) old_top[instance].pop(device) except KeyError: pass for item in sorted(old_top[instance]): if isinstance(old_top[instance][item], dict): if item in MODEL_TRANSFORM: # A configuration item (topo.conf) topo.add_conf_item(instance, item) elif instance == 'GNS3-DATA' and \ (item.startswith('SHAPE') or item.startswith('NOTE') or item.startswith('PIXMAP')): # Item is an artwork item e.g. shapes and notes from # GNS3-DATA topo.add_artwork_item(instance, item) else: # It must be a physical item (topo.devices) topo.add_physical_item(instance, item) return topo.topology
Processes the sections returned by get_instances :param ConfigObj old_top: old topology as processed by :py:meth:`read_topology` :returns: tuple of dicts containing hypervisors, devices and artwork :rtype: tuple
def generate_links(self, nodes): new_links = [] for link in self.links: # Expand port name if required if INTERFACE_RE.search(link['dest_port'])\ or VBQ_INT_RE.search(link['dest_port']): int_type = link['dest_port'][0] dest_port = link['dest_port'].replace( int_type, PORT_TYPES[int_type.upper()]) else: dest_port = link['dest_port'] # Convert dest_dev and port to id's dest_details = self.convert_destination_to_id( link['dest_dev'], dest_port, nodes) desc = 'Link from %s port %s to %s port %s' % \ (link['source_dev'], link['source_port_name'], dest_details['name'], dest_port) new_links.append({'description': desc, 'destination_node_id': dest_details['id'], 'destination_port_id': dest_details['pid'], 'source_port_id': link['source_port_id'], 'source_node_id': link['source_node_id']}) # Remove duplicate links and add link_id link_id = 1 for link in new_links: t_link = str(link['source_node_id']) + ':' + \ str(link['source_port_id']) for link2 in new_links: d_link = str(link2['destination_node_id']) + ':' + \ str(link2['destination_port_id']) if t_link == d_link: new_links.remove(link2) break link['id'] = link_id link_id += 1 self.add_node_connection(link, nodes) return new_links
Generate a list of links :param list nodes: A list of nodes from :py:meth:`generate_nodes` :return: list of links :rtype: list
def device_id_from_name(device_name, nodes): device_id = None for node in nodes: if device_name == node['properties']['name']: device_id = node['id'] break return device_id
Get the device ID when given a device name :param str device_name: device name :param list nodes: list of nodes from :py:meth:`generate_nodes` :return: device ID :rtype: int
def port_id_from_name(port_name, device_id, nodes): port_id = None for node in nodes: if device_id == node['id']: for port in node['ports']: if port_name == port['name']: port_id = port['id'] break break return port_id
Get the port ID when given a port name :param str port_name: port name :param str device_id: device ID :param list nodes: list of nodes from :py:meth:`generate_nodes` :return: port ID :rtype: int
def convert_destination_to_id(destination_node, destination_port, nodes): device_id = None device_name = None port_id = None if destination_node != 'NIO': for node in nodes: if destination_node == node['properties']['name']: device_id = node['id'] device_name = destination_node for port in node['ports']: if destination_port == port['name']: port_id = port['id'] break break else: for node in nodes: if node['type'] == 'Cloud': for port in node['ports']: if destination_port.lower() == port['name'].lower(): device_id = node['id'] device_name = node['properties']['name'] port_id = port['id'] break info = {'id': device_id, 'name': device_name, 'pid': port_id} return info
Convert a destination to device and port ID :param str destination_node: Destination node name :param str destination_port: Destination port name :param list nodes: list of nodes from :py:meth:`generate_nodes` :return: dict containing device ID, device name and port ID :rtype: dict
def get_node_name_from_id(node_id, nodes): node_name = '' for node in nodes: if node['id'] == node_id: node_name = node['properties']['name'] break return node_name
Get the name of a node when given the node_id :param int node_id: The ID of a node :param list nodes: list of nodes from :py:meth:`generate_nodes` :return: node name :rtype: str
def get_port_name_from_id(node_id, port_id, nodes): port_name = '' for node in nodes: if node['id'] == node_id: for port in node['ports']: if port['id'] == port_id: port_name = port['name'] break return port_name
Get the name of a port for a given node and port ID :param int node_id: node ID :param int port_id: port ID :param list nodes: list of nodes from :py:meth:`generate_nodes` :return: port name :rtype: str
def add_node_connection(self, link, nodes): # Description src_desc = 'connected to %s on port %s' % \ (self.get_node_name_from_id(link['destination_node_id'], nodes), self.get_port_name_from_id(link['destination_node_id'], link['destination_port_id'], nodes)) dest_desc = 'connected to %s on port %s' % \ (self.get_node_name_from_id(link['source_node_id'], nodes), self.get_port_name_from_id(link['source_node_id'], link['source_port_id'], nodes)) # Add source connections for node in nodes: if node['id'] == link['source_node_id']: for port in node['ports']: if port['id'] == link['source_port_id']: port['link_id'] = link['id'] port['description'] = src_desc break elif node['id'] == link['destination_node_id']: for port in node['ports']: if port['id'] == link['destination_port_id']: port['link_id'] = link['id'] port['description'] = dest_desc break
Add a connection to a node :param dict link: link definition :param list nodes: list of nodes from :py:meth:`generate_nodes`
def generate_shapes(shapes): new_shapes = {'ellipse': [], 'rectangle': []} for shape in shapes: tmp_shape = {} for shape_item in shapes[shape]: if shape_item != 'type': tmp_shape[shape_item] = shapes[shape][shape_item] new_shapes[shapes[shape]['type']].append(tmp_shape) return new_shapes
Generate the shapes for the topology :param dict shapes: A dict of converted shapes from the old topology :return: dict containing two lists (ellipse, rectangle) :rtype: dict
def generate_notes(notes): new_notes = [] for note in notes: tmp_note = {} for note_item in notes[note]: tmp_note[note_item] = notes[note][note_item] new_notes.append(tmp_note) return new_notes
Generate the notes list :param dict notes: A dict of converted notes from the old topology :return: List of notes for the the topology :rtype: list
def generate_images(self, pixmaps): new_images = [] for image in pixmaps: tmp_image = {} for img_item in pixmaps[image]: if img_item == 'path': path = os.path.join('images', os.path.basename( pixmaps[image][img_item])) tmp_image['path'] = fix_path(path) self.images.append(pixmaps[image][img_item]) else: tmp_image[img_item] = pixmaps[image][img_item] new_images.append(tmp_image) return new_images
Generate the images list and store the images to copy :param dict pixmaps: A dict of converted pixmaps from the old topology :return: A list of images :rtype: list
def add_artwork_item(self, instance, item): if 'interface' in self.old_top[instance][item]: pass else: (item_type, item_id) = item.split(' ') self.artwork[item_type][item_id] = {} for s_item in sorted(self.old_top[instance][item]): if self.old_top[instance][item][s_item] is not None: s_detail = self.old_top[instance][item][s_item] s_type = type(s_detail) if item_type == 'NOTE' and s_type == str: # Fix any escaped newline characters s_detail = s_detail.replace('\\n', '\n') if s_type == str and len(s_detail) > 1 \ and s_detail[0] == '"' and s_detail[-1] == '"': s_detail = s_detail[1:-1] if item_type == 'SHAPE' and s_item == 'fill_color': s_item = 'color' elif s_item == 'rotate': s_item = 'rotation' s_detail = float(s_detail) self.artwork[item_type][item_id][s_item] = s_detail if item_type == 'SHAPE' and \ 'color' not in self.artwork[item_type][item_id]: self.artwork[item_type][item_id]['color'] = '#ffffff' self.artwork[item_type][item_id]['transparency'] = 0
Add an artwork item e.g. Shapes, Notes and Pixmaps :param instance: Hypervisor instance :param item: Item to add
def add_qemu_path(self, instance): tmp_conf = {'qemu_path': self.old_top[instance]['qemupath']} if len(self.topology['conf']) == 0: self.topology['conf'].append(tmp_conf) else: self.topology['conf'][self.hv_id].update(tmp_conf)
Add the qemu path to the hypervisor conf data :param instance: Hypervisor instance
def add_conf_item(self, instance, item): tmp_conf = {} if item not in EXTRA_CONF: tmp_conf['model'] = MODEL_TRANSFORM[item] for s_item in sorted(self.old_top[instance][item]): if self.old_top[instance][item][s_item] is not None: tmp_conf[s_item] = self.old_top[instance][item][s_item] if item in EXTRA_CONF: tmp_conf = {item: tmp_conf} if len(self.topology['conf']) == 0: self.topology['conf'].append(tmp_conf) else: self.topology['conf'][self.hv_id].update(tmp_conf) else: self.topology['conf'].append(tmp_conf) self.hv_id = len(self.topology['conf']) - 1
Add a hypervisor configuration item :param instance: Hypervisor instance :param item: Item to add
def get_topology(self): topology = {'name': self._name, 'resources_type': 'local', 'topology': {}, 'type': 'topology', 'version': '1.0'} if self._links: topology['topology']['links'] = self._links if self._nodes: topology['topology']['nodes'] = self._nodes if self._servers: topology['topology']['servers'] = self._servers if self._notes: topology['topology']['notes'] = self._notes if self._shapes['ellipse']: topology['topology']['ellipses'] = self._shapes['ellipse'] if self._shapes['rectangle']: topology['topology']['rectangles'] = \ self._shapes['rectangle'] if self._images: topology['topology']['images'] = self._images return topology
Get the converted topology ready for JSON encoding :return: converted topology assembled into a single dict :rtype: dict
def get_vboxes(self): vbox_list = [] vbox_max = None for node in self.nodes: if node['type'] == 'VirtualBoxVM': vbox_list.append(node['vbox_id']) if len(vbox_list) > 0: vbox_max = max(vbox_list) return vbox_max
Get the maximum ID of the VBoxes :return: Maximum VBox ID :rtype: int
def get_qemus(self): qemu_vm_list = [] qemu_vm_max = None for node in self.nodes: if node['type'] == 'QemuVM': qemu_vm_list.append(node['qemu_id']) if len(qemu_vm_list) > 0: qemu_vm_max = max(qemu_vm_list) return qemu_vm_max
Get the maximum ID of the Qemu VMs :return: Maximum Qemu VM ID :rtype: int
def getElements(self,name=''): 'Get a list of child elements' #If no tag name is specified, return the all children if not name: return self.children else: # else return only those children with a matching tag name elements = [] for element in self.children: if element.name == name: elements.append(element) return elementf getElements(self,name=''): 'Get a list of child elements' #If no tag name is specified, return the all children if not name: return self.children else: # else return only those children with a matching tag name elements = [] for element in self.children: if element.name == name: elements.append(element) return elements
Get a list of child elements
def StartElement(self,name,attributes): 'SAX start element even handler' # Instantiate an Element object element = Element(name.encode(),attributes) # Push element onto the stack and make it a child of parent if len(self.nodeStack) > 0: parent = self.nodeStack[-1] parent.AddChild(element) else: self.root = element self.nodeStack.append(elementf StartElement(self,name,attributes): 'SAX start element even handler' # Instantiate an Element object element = Element(name.encode(),attributes) # Push element onto the stack and make it a child of parent if len(self.nodeStack) > 0: parent = self.nodeStack[-1] parent.AddChild(element) else: self.root = element self.nodeStack.append(element)
SAX start element even handler
def CharacterData(self,data): 'SAX character data event handler' ## HACK: to preserve the newlines #if string.strip(data): data = data.encode("utf-8") element = self.nodeStack[-1] element.cdata += data returf CharacterData(self,data): 'SAX character data event handler' ## HACK: to preserve the newlines #if string.strip(data): data = data.encode("utf-8") element = self.nodeStack[-1] element.cdata += data return
SAX character data event handler
def key_dict( from_dict ): new_dict = {} old2new = {} new2old = {} for key in from_dict: k = normalizeUnicode(key,'identifier') if k != key: i = '' while new_dict.has_key("%s%s" % (k,i) ): if not i: i = 1 else: i += 1 k = "%s%s" % (k,i) old2new[key] = k new2old[k] = key new_dict[k] = from_dict[key] return (new_dict.keys(), new_dict, old2new, new2old)
Returns dict from_dict['unicode_save_field'] = 'original key with unicode'
def doShow(self, xml=0): if xml == 0: print 'Errorcode:', self.errorcode print print 'Product information:' for key in self.product.keys(): print ' ', key.encode('UTF-8'), print '->', self.product[key].encode('UTF-8') print print 'Database information:' for key in self.database.keys(): print ' ', key.encode('UTF-8'), print'->', self.database[key].encode('UTF-8') print print 'Metadata:' for field in self.metadata.keys(): print print ' ', field.encode('UTF-8') for property in self.metadata[field]: print ' ', property.encode('UTF-8'), print '->', self.metadata[field][property].encode('UTF-8') print print 'Records:' for record in self.resultset: print for column in record: print ' ', column.encode('UTF-8'), if type(record[column]) == UnicodeType: print '->', record[column].encode('UTF-8') else: print '->', record[column] else: tags = [ 'FMPXMLRESULT', 'ERRORCODE', 'PRODUCT', 'DATABASE', 'METADATA', 'FIELD', 'RESULTSET', 'ROW', 'COL', 'DATA' ] xml = self.data for tag in tags: xml = string.replace(xml, '></' + tag, '>\n</' + tag) xml = string.replace(xml, '><' + tag, '>\n<' + tag) print xml
Shows the contents of our resultset.
def _setMaxRecords(self, maxRec): if type(maxRec) == int: self._maxRecords = maxRec elif type(maxRec) == str and (maxRec.lower == 'all' or maxRec.isdigit()): self._maxRecords = maxRec.lower else: raise FMError, 'Unsupported -max value (not a number or "all").'
Specifies the maximum number of records you want returned (number or constant 'all')
def _setSkipRecords(self, skipRec): if type(skipRec) == int or (type(skipRec) == str and skipRec.isdigit()): self._skipRecords = skipRec else: raise FMError, 'Unsupported -skip value (not a number).'
Specifies how many records to skip in the found set
def _setLogicalOperator(self, lop): if not lop.lower() in ['and', 'or']: raise FMError, 'Unsupported logical operator (not one of "and" or "or").' self._lop = lop.lower()
Sets the way the find fields should be combined together.
def _setComparasionOperator(self, field, oper): if oper != '': validOperators = { 'eq':'eq', 'equals':'eq', '=':'eq', '==':'eq', 'cn':'cn', 'contains':'cn', '%%':'cn', '%':'cn', '*':'cn', 'bw':'bw', 'begins with':'bw', '^':'bw', 'ew':'ew', 'ends with':'ew', '$':'ew', 'gt':'gt', 'greater than':'gt', '>':'gt', 'gte':'gte', 'greater than or equals':'gte', '>=':'gte', 'lt':'lt', 'less than':'lt', '<':'lt', 'lte':'lte', 'less than or equals':'lte', '<=':'lte', 'neq':'neq', 'not equals':'neq', '!=':'neq', '<>':'neq' } if not string.lower(oper) in validOperators.keys(): raise FMError, 'Invalid operator "'+ oper + '" for "' + field + '"' oper = validOperators[oper.lower()] self._dbParams.append( ["%s.op" % field, oper] )
Sets correct operator for given string representation
def _addDBParam(self, name, value): if name[-4:] == '__OP': return self._setComparasionOperator(name[:-4], value) if name[-3:] == '.op': return self._setComparasionOperator(name[:-3], value) if name.find('__') != -1: import re name = name.replace('__','::') elif name.find('.') != -1: name = name.replace('.','::') self._dbParams.append( [name, value] )
Adds a database parameter
def _addSortParam(self, field, order=''): if order != '': validSortOrders = { 'ascend':'ascend', 'ascending':'ascend', '<':'ascend', 'descend':'descend', 'descending':'descend', '>':'descend' } if not string.lower(order) in validSortOrders.keys(): raise FMError, 'Invalid sort order for "' + field + '"' self._sortParams.append( [field, validSortOrders[string.lower(order)]] )
Adds a sort parameter, order have to be in ['ascend', 'ascending','descend', 'descending','custom']
def getFile(self, file_xml_uri): find = re.match('/fmi/xml/cnt/([\w\d.-]+)\.([\w]+)?-*', file_xml_uri) file_name = find.group(1) file_extension = find.group(2) file_binary = self._doRequest(is_file=True, file_xml_uri=file_xml_uri) return (file_name, file_extension, file_binary)
This will execute cmd to fetch file data from FMServer
def doScript(self, script_name, params=None, return_all=False): request = [ uu({'-db': self._db }), uu({'-lay': self._layout }), uu({'-script': script_name}) ] if params: request.append(uu({'-script.param': params })) request.append(uu({'-findall': '' })) result = self._doRequest(request) result = FMResultset.FMResultset(result) try: # Try to return results from the script resp = result.resultset if return_all else result.resultset[0] except IndexError: resp = None return resp
This function executes the script for given layout for the current db.
def doScriptAfter(self, func, func_kwargs={}, script_name='', params=None): request = [ uu({'-script': script_name}) ] if params: request.append(uu({'-script.param': params })) self._extra_script = request return func(**func_kwargs)
This function will execute extra script after passed function
def getDbNames(self): request = [] request.append(uu({'-dbnames': '' })) result = self._doRequest(request) result = FMResultset.FMResultset(result) dbNames = [] for dbName in result.resultset: dbNames.append(string.lower(dbName['DATABASE_NAME'])) return dbNames
This function returns the list of open databases
def getLayoutNames(self): if self._db == '': raise FMError, 'No database was selected' request = [] request.append(uu({'-db': self._db })) request.append(uu({'-layoutnames': '' })) result = self._doRequest(request) result = FMResultset.FMResultset(result) layoutNames = [] for layoutName in result.resultset: layoutNames.append(string.lower(layoutName['LAYOUT_NAME'])) return layoutNames
This function returns the list of layouts for the current db.
def getScriptNames(self): if self._db == '': raise FMError, 'No database was selected' request = [] request.append(uu({'-db': self._db })) request.append(uu({'-scriptnames': '' })) result = self._doRequest(request) result = FMResultset.FMResultset(result) scriptNames = [] for scriptName in result.resultset: scriptNames.append(string.lower(scriptName['SCRIPT_NAME'])) return scriptNames
This function returns the list of layouts for the current db.
def _preFind(self, WHAT={}, SORT=[], SKIP=None, MAX=None, LOP='AND'): if hasattr(WHAT, '_modified'): self._addDBParam('RECORDID', WHAT.RECORDID) elif type(WHAT)==dict: for key in WHAT: self._addDBParam(key, WHAT[key]) else: raise FMError, 'Python Runtime: Object type (%s) given to on of function doFind* as argument WHAT cannot be used.' % type(WHAT) for key in SORT: self._addSortParam(key, SORT[key]) if SKIP: self._setSkipRecords(SKIP) if MAX: self._setMaxRecords(MAX) if LOP: self._setLogicalOperator(LOP) if self._layout == '': raise FMError, 'No layout was selected'
This function will process attributtes for all -find* commands.
def doFind(self, WHAT={}, SORT=[], SKIP=None, MAX=None, LOP='AND', **params): self._preFind(WHAT, SORT, SKIP, MAX, LOP) for key in params: self._addDBParam(key, params[key]) try: return self._doAction('-find') except FMServerError as e: if e.args[0] in [401, 8]: return []
This function will perform the command -find.
def doFindAll(self, WHAT={}, SORT=[], SKIP=None, MAX=None): self._preFind(WHAT, SORT, SKIP, MAX) return self._doAction('-findall')
This function will perform the command -findall.
def doFindAny(self, WHAT={}, SORT=[], SKIP=None, MAX=None, LOP='AND', **params): self._preFind(WHAT, SORT, SKIP, MAX, LOP) for key in params: self._addDBParam(key, params[key]) return self._doAction('-findany')
This function will perform the command -findany.
def doDelete(self, WHAT={}): if hasattr(WHAT, '_modified'): self._addDBParam('RECORDID', WHAT.RECORDID) self._addDBParam('MODID', WHAT.MODID) elif type(WHAT) == dict and WHAT.has_key('RECORDID'): self._addDBParam('RECORDID', WHAT['RECORDID']) else: raise FMError, 'Python Runtime: Object type (%s) given to function doDelete as argument WHAT cannot be used.' % type(WHAT) if self._layout == '': raise FMError, 'No layout was selected' if self._checkRecordID() == 0: raise FMError, 'RecordID is missing' return self._doAction('-delete')
This function will perform the command -delete.
def doNew(self, WHAT={}, **params): if hasattr(WHAT, '_modified'): for key in WHAT: if key not in ['RECORDID','MODID']: if WHAT.__new2old__.has_key(key): self._addDBParam(WHAT.__new2old__[key].encode('utf-8'), WHAT[key]) else: self._addDBParam(key, WHAT[key]) elif type(WHAT)==dict: for key in WHAT: self._addDBParam(key, WHAT[key]) else: raise FMError, 'Python Runtime: Object type (%s) given to function doNew as argument WHAT cannot be used.' % type(WHAT) if self._layout == '': raise FMError, 'No layout was selected' for key in params: self._addDBParam(key, params[key]) if len(self._dbParams) == 0: raise FMError, 'No data to be added' return self._doAction('-new')
This function will perform the command -new.
def doDup(self, WHAT={}, **params): if hasattr(WHAT, '_modified'): for key, value in WHAT._modified(): if WHAT.__new2old__.has_key(key): self._addDBParam(WHAT.__new2old__[key].encode('utf-8'), value) else: self._addDBParam(key, value) self._addDBParam('RECORDID', WHAT.RECORDID) self._addDBParam('MODID', WHAT.MODID) elif type(WHAT) == dict: for key in WHAT: self._addDBParam(key, WHAT[key]) else: raise FMError, 'Python Runtime: Object type (%s) given to function doDup as argument WHAT cannot be used.' % type(WHAT) if self._layout == '': raise FMError, 'No layout was selected' for key in params: self._addDBParam(key, params[key]) if self._checkRecordID() == 0: raise FMError, 'RecordID is missing' return self._doAction('-dup')
This function will perform the command -dup.
def _buildUrl(self): return '%(protocol)s://%(host)s:%(port)s%(address)s'%{ 'protocol': self._protocol, 'host': self._host, 'port': self._port, 'address': self._address, }
Builds url for normal FM requests.
def _buildFileUrl(self, xml_req): return '%(protocol)s://%(host)s:%(port)s%(xml_req)s'%{ 'protocol': self._protocol, 'host': self._host, 'port': self._port, 'xml_req': xml_req, }
Builds url for fetching the files from FM.
def _doRequest(self, request=None, is_file=False, file_xml_uri=''): if request is None: request = [] if is_file and file_xml_uri: url = self._buildFileUrl(file_xml_uri) else: request = '&'.join(request) url = "%s?%s" % (self._buildUrl(), request) if self._debug: print '[PyFileMaker DEBUG] ', url resp = requests.get( url = url, auth = (self._login, self._password) ) resp.raise_for_status() return resp.content
This function will perform the specified request on the FileMaker server, and it will return the raw result from FileMaker.
def FMErrorByNum( num ): if not num in FMErrorNum.keys(): raise FMServerError, (num, FMErrorNum[-1]) elif num == 102: raise FMFieldError, (num, FMErrorNum[num]) else: raise FMServerError, (num, FMErrorNum[num])
This function raises an error based on the specified error code.