INSTRUCTION
stringlengths
1
46.3k
RESPONSE
stringlengths
75
80.2k
Populates self._user_dn with the distinguished name of our user. This will either construct the DN from a template in AUTH_LDAP_USER_DN_TEMPLATE or connect to the server and search for it. If we have to search, we'll cache the DN.
def _load_user_dn(self): """ Populates self._user_dn with the distinguished name of our user. This will either construct the DN from a template in AUTH_LDAP_USER_DN_TEMPLATE or connect to the server and search for it. If we have to search, we'll cache the DN. """ if self._using_simple_bind_mode(): self._user_dn = self._construct_simple_user_dn() else: if self.settings.CACHE_TIMEOUT > 0: cache_key = valid_cache_key( "django_auth_ldap.user_dn.{}".format(self._username) ) self._user_dn = cache.get_or_set( cache_key, self._search_for_user_dn, self.settings.CACHE_TIMEOUT ) else: self._user_dn = self._search_for_user_dn()
Searches the directory for a user matching AUTH_LDAP_USER_SEARCH. Populates self._user_dn and self._user_attrs.
def _search_for_user_dn(self): """ Searches the directory for a user matching AUTH_LDAP_USER_SEARCH. Populates self._user_dn and self._user_attrs. """ search = self.settings.USER_SEARCH if search is None: raise ImproperlyConfigured( "AUTH_LDAP_USER_SEARCH must be an LDAPSearch instance." ) results = search.execute(self.connection, {"user": self._username}) if results is not None and len(results) == 1: (user_dn, self._user_attrs) = next(iter(results)) else: user_dn = None return user_dn
Returns True if the group requirement (AUTH_LDAP_REQUIRE_GROUP) is met. Always returns True if AUTH_LDAP_REQUIRE_GROUP is None.
def _check_required_group(self): """ Returns True if the group requirement (AUTH_LDAP_REQUIRE_GROUP) is met. Always returns True if AUTH_LDAP_REQUIRE_GROUP is None. """ required_group_dn = self.settings.REQUIRE_GROUP if required_group_dn is not None: if not isinstance(required_group_dn, LDAPGroupQuery): required_group_dn = LDAPGroupQuery(required_group_dn) result = required_group_dn.resolve(self) if not result: raise self.AuthenticationFailed( "user does not satisfy AUTH_LDAP_REQUIRE_GROUP" ) return True
Returns True if the negative group requirement (AUTH_LDAP_DENY_GROUP) is met. Always returns True if AUTH_LDAP_DENY_GROUP is None.
def _check_denied_group(self): """ Returns True if the negative group requirement (AUTH_LDAP_DENY_GROUP) is met. Always returns True if AUTH_LDAP_DENY_GROUP is None. """ denied_group_dn = self.settings.DENY_GROUP if denied_group_dn is not None: is_member = self._get_groups().is_member_of(denied_group_dn) if is_member: raise self.AuthenticationFailed( "user does not satisfy AUTH_LDAP_DENY_GROUP" ) return True
Loads the User model object from the database or creates it if it doesn't exist. Also populates the fields, subject to AUTH_LDAP_ALWAYS_UPDATE_USER.
def _get_or_create_user(self, force_populate=False): """ Loads the User model object from the database or creates it if it doesn't exist. Also populates the fields, subject to AUTH_LDAP_ALWAYS_UPDATE_USER. """ save_user = False username = self.backend.ldap_to_django_username(self._username) self._user, built = self.backend.get_or_build_user(username, self) self._user.ldap_user = self self._user.ldap_username = self._username should_populate = force_populate or self.settings.ALWAYS_UPDATE_USER or built if built: if self.settings.NO_NEW_USERS: raise self.AuthenticationFailed( "user does not satisfy AUTH_LDAP_NO_NEW_USERS" ) logger.debug("Creating Django user {}".format(username)) self._user.set_unusable_password() save_user = True if should_populate: logger.debug("Populating Django user {}".format(username)) self._populate_user() save_user = True # Give the client a chance to finish populating the user just # before saving. populate_user.send(self.backend.__class__, user=self._user, ldap_user=self) if save_user: self._user.save() # This has to wait until we're sure the user has a pk. if self.settings.MIRROR_GROUPS or self.settings.MIRROR_GROUPS_EXCEPT: self._normalize_mirror_settings() self._mirror_groups()
Converts one or more group DNs to an LDAPGroupQuery. group_dns may be a string, a non-empty list or tuple of strings, or an LDAPGroupQuery. The result will be an LDAPGroupQuery. A list or tuple will be joined with the | operator.
def _normalize_group_dns(self, group_dns): """ Converts one or more group DNs to an LDAPGroupQuery. group_dns may be a string, a non-empty list or tuple of strings, or an LDAPGroupQuery. The result will be an LDAPGroupQuery. A list or tuple will be joined with the | operator. """ if isinstance(group_dns, LDAPGroupQuery): query = group_dns elif isinstance(group_dns, str): query = LDAPGroupQuery(group_dns) elif isinstance(group_dns, (list, tuple)) and len(group_dns) > 0: query = reduce(operator.or_, map(LDAPGroupQuery, group_dns)) else: raise ValueError(group_dns) return query
Validates the group mirroring settings and converts them as necessary.
def _normalize_mirror_settings(self): """ Validates the group mirroring settings and converts them as necessary. """ def malformed_mirror_groups_except(): return ImproperlyConfigured( "{} must be a collection of group names".format( self.settings._name("MIRROR_GROUPS_EXCEPT") ) ) def malformed_mirror_groups(): return ImproperlyConfigured( "{} must be True or a collection of group names".format( self.settings._name("MIRROR_GROUPS") ) ) mge = self.settings.MIRROR_GROUPS_EXCEPT mg = self.settings.MIRROR_GROUPS if mge is not None: if isinstance(mge, (set, frozenset)): pass elif isinstance(mge, (list, tuple)): mge = self.settings.MIRROR_GROUPS_EXCEPT = frozenset(mge) else: raise malformed_mirror_groups_except() if not all(isinstance(value, str) for value in mge): raise malformed_mirror_groups_except() elif mg: warnings.warn( ConfigurationWarning( "Ignoring {} in favor of {}".format( self.settings._name("MIRROR_GROUPS"), self.settings._name("MIRROR_GROUPS_EXCEPT"), ) ) ) mg = self.settings.MIRROR_GROUPS = None if mg is not None: if isinstance(mg, (bool, set, frozenset)): pass elif isinstance(mg, (list, tuple)): mg = self.settings.MIRROR_GROUPS = frozenset(mg) else: raise malformed_mirror_groups() if isinstance(mg, (set, frozenset)) and ( not all(isinstance(value, str) for value in mg) ): raise malformed_mirror_groups()
Mirrors the user's LDAP groups in the Django database and updates the user's membership.
def _mirror_groups(self): """ Mirrors the user's LDAP groups in the Django database and updates the user's membership. """ target_group_names = frozenset(self._get_groups().get_group_names()) current_group_names = frozenset( self._user.groups.values_list("name", flat=True).iterator() ) # These were normalized to sets above. MIRROR_GROUPS_EXCEPT = self.settings.MIRROR_GROUPS_EXCEPT MIRROR_GROUPS = self.settings.MIRROR_GROUPS # If the settings are white- or black-listing groups, we'll update # target_group_names such that we won't modify the membership of groups # beyond our purview. if isinstance(MIRROR_GROUPS_EXCEPT, (set, frozenset)): target_group_names = (target_group_names - MIRROR_GROUPS_EXCEPT) | ( current_group_names & MIRROR_GROUPS_EXCEPT ) elif isinstance(MIRROR_GROUPS, (set, frozenset)): target_group_names = (target_group_names & MIRROR_GROUPS) | ( current_group_names - MIRROR_GROUPS ) if target_group_names != current_group_names: existing_groups = list( Group.objects.filter(name__in=target_group_names).iterator() ) existing_group_names = frozenset(group.name for group in existing_groups) new_groups = [ Group.objects.get_or_create(name=name)[0] for name in target_group_names if name not in existing_group_names ] self._user.groups.set(existing_groups + new_groups)
Returns an _LDAPUserGroups object, which can determine group membership.
def _get_groups(self): """ Returns an _LDAPUserGroups object, which can determine group membership. """ if self._groups is None: self._groups = _LDAPUserGroups(self) return self._groups
Binds to the LDAP server with AUTH_LDAP_BIND_DN and AUTH_LDAP_BIND_PASSWORD.
def _bind(self): """ Binds to the LDAP server with AUTH_LDAP_BIND_DN and AUTH_LDAP_BIND_PASSWORD. """ self._bind_as(self.settings.BIND_DN, self.settings.BIND_PASSWORD, sticky=True)
Binds to the LDAP server with the given credentials. This does not trap exceptions. If sticky is True, then we will consider the connection to be bound for the life of this object. If False, then the caller only wishes to test the credentials, after which the connection will be considered unbound.
def _bind_as(self, bind_dn, bind_password, sticky=False): """ Binds to the LDAP server with the given credentials. This does not trap exceptions. If sticky is True, then we will consider the connection to be bound for the life of this object. If False, then the caller only wishes to test the credentials, after which the connection will be considered unbound. """ self._get_connection().simple_bind_s(bind_dn, bind_password) self._connection_bound = sticky
Returns our cached LDAPObject, which may or may not be bound.
def _get_connection(self): """ Returns our cached LDAPObject, which may or may not be bound. """ if self._connection is None: uri = self.settings.SERVER_URI if callable(uri): if func_supports_parameter(uri, "request"): uri = uri(self._request) else: warnings.warn( "Update AUTH_LDAP_SERVER_URI callable %s.%s to accept " "a positional `request` argument. Support for callables " "accepting no arguments will be removed in a future " "version." % (uri.__module__, uri.__name__), DeprecationWarning, ) uri = uri() self._connection = self.backend.ldap.initialize(uri, bytes_mode=False) for opt, value in self.settings.CONNECTION_OPTIONS.items(): self._connection.set_option(opt, value) if self.settings.START_TLS: logger.debug("Initiating TLS") self._connection.start_tls_s() return self._connection
Loads the settings we need to deal with groups. Raises ImproperlyConfigured if anything's not right.
def _init_group_settings(self): """ Loads the settings we need to deal with groups. Raises ImproperlyConfigured if anything's not right. """ self._group_type = self.settings.GROUP_TYPE if self._group_type is None: raise ImproperlyConfigured( "AUTH_LDAP_GROUP_TYPE must be an LDAPGroupType instance." ) self._group_search = self.settings.GROUP_SEARCH if self._group_search is None: raise ImproperlyConfigured( "AUTH_LDAP_GROUP_SEARCH must be an LDAPSearch instance." )
Returns the set of Django group names that this user belongs to by virtue of LDAP group memberships.
def get_group_names(self): """ Returns the set of Django group names that this user belongs to by virtue of LDAP group memberships. """ if self._group_names is None: self._load_cached_attr("_group_names") if self._group_names is None: group_infos = self._get_group_infos() self._group_names = { self._group_type.group_name_from_info(group_info) for group_info in group_infos } self._cache_attr("_group_names") return self._group_names
Returns true if our user is a member of the given group.
def is_member_of(self, group_dn): """ Returns true if our user is a member of the given group. """ is_member = None # Normalize the DN group_dn = group_dn.lower() # If we have self._group_dns, we'll use it. Otherwise, we'll try to # avoid the cost of loading it. if self._group_dns is None: is_member = self._group_type.is_member(self._ldap_user, group_dn) if is_member is None: is_member = group_dn in self.get_group_dns() logger.debug( "{} is{}a member of {}".format( self._ldap_user.dn, is_member and " " or " not ", group_dn ) ) return is_member
Returns a (cached) list of group_info structures for the groups that our user is a member of.
def _get_group_infos(self): """ Returns a (cached) list of group_info structures for the groups that our user is a member of. """ if self._group_infos is None: self._group_infos = self._group_type.user_groups( self._ldap_user, self._group_search ) return self._group_infos
Memcache keys can't have spaces in them, so we'll remove them from the DN for maximum compatibility.
def _cache_key(self, attr_name): """ Memcache keys can't have spaces in them, so we'll remove them from the DN for maximum compatibility. """ dn = self._ldap_user.dn return valid_cache_key( "auth_ldap.{}.{}.{}".format(self.__class__.__name__, attr_name, dn) )
Returns the configured ldap module.
def get_ldap(cls, global_options=None): """ Returns the configured ldap module. """ # Apply global LDAP options once if not cls._ldap_configured and global_options is not None: for opt, value in global_options.items(): ldap.set_option(opt, value) cls._ldap_configured = True return ldap
Initializes and returns our logger instance.
def get_logger(cls): """ Initializes and returns our logger instance. """ if cls.logger is None: cls.logger = logging.getLogger("django_auth_ldap") cls.logger.addHandler(logging.NullHandler()) return cls.logger
Returns a new search object with additional search terms and-ed to the filter string. term_dict maps attribute names to assertion values. If you don't want the values escaped, pass escape=False.
def search_with_additional_terms(self, term_dict, escape=True): """ Returns a new search object with additional search terms and-ed to the filter string. term_dict maps attribute names to assertion values. If you don't want the values escaped, pass escape=False. """ term_strings = [self.filterstr] for name, value in term_dict.items(): if escape: value = self.ldap.filter.escape_filter_chars(value) term_strings.append("({}={})".format(name, value)) filterstr = "(&{})".format("".join(term_strings)) return self.__class__( self.base_dn, self.scope, filterstr, attrlist=self.attrlist )
Returns a new search object with filterstr and-ed to the original filter string. The caller is responsible for passing in a properly escaped string.
def search_with_additional_term_string(self, filterstr): """ Returns a new search object with filterstr and-ed to the original filter string. The caller is responsible for passing in a properly escaped string. """ filterstr = "(&{}{})".format(self.filterstr, filterstr) return self.__class__( self.base_dn, self.scope, filterstr, attrlist=self.attrlist )
Given the (DN, attrs) 2-tuple of an LDAP group, this returns the name of the Django group. This may return None to indicate that a particular LDAP group has no corresponding Django group. The base implementation returns the value of the cn attribute, or whichever attribute was given to __init__ in the name_attr parameter.
def group_name_from_info(self, group_info): """ Given the (DN, attrs) 2-tuple of an LDAP group, this returns the name of the Django group. This may return None to indicate that a particular LDAP group has no corresponding Django group. The base implementation returns the value of the cn attribute, or whichever attribute was given to __init__ in the name_attr parameter. """ try: name = group_info[1][self.name_attr][0] except (KeyError, IndexError): name = None return name
Searches for any group that is either the user's primary or contains the user as a member.
def user_groups(self, ldap_user, group_search): """ Searches for any group that is either the user's primary or contains the user as a member. """ groups = [] try: user_uid = ldap_user.attrs["uid"][0] if "gidNumber" in ldap_user.attrs: user_gid = ldap_user.attrs["gidNumber"][0] filterstr = "(|(gidNumber={})(memberUid={}))".format( self.ldap.filter.escape_filter_chars(user_gid), self.ldap.filter.escape_filter_chars(user_uid), ) else: filterstr = "(memberUid={})".format( self.ldap.filter.escape_filter_chars(user_uid) ) search = group_search.search_with_additional_term_string(filterstr) groups = search.execute(ldap_user.connection) except (KeyError, IndexError): pass return groups
Returns True if the group is the user's primary group or if the user is listed in the group's memberUid attribute.
def is_member(self, ldap_user, group_dn): """ Returns True if the group is the user's primary group or if the user is listed in the group's memberUid attribute. """ try: user_uid = ldap_user.attrs["uid"][0] try: is_member = ldap_user.connection.compare_s( group_dn, "memberUid", user_uid.encode() ) except (ldap.UNDEFINED_TYPE, ldap.NO_SUCH_ATTRIBUTE): is_member = False if not is_member: try: user_gid = ldap_user.attrs["gidNumber"][0] is_member = ldap_user.connection.compare_s( group_dn, "gidNumber", user_gid.encode() ) except (ldap.UNDEFINED_TYPE, ldap.NO_SUCH_ATTRIBUTE): is_member = False except (KeyError, IndexError): is_member = False return is_member
This searches for all of a user's groups from the bottom up. In other words, it returns the groups that the user belongs to, the groups that those groups belong to, etc. Circular references will be detected and pruned.
def user_groups(self, ldap_user, group_search): """ This searches for all of a user's groups from the bottom up. In other words, it returns the groups that the user belongs to, the groups that those groups belong to, etc. Circular references will be detected and pruned. """ group_info_map = {} # Maps group_dn to group_info of groups we've found member_dn_set = {ldap_user.dn} # Member DNs to search with next handled_dn_set = set() # Member DNs that we've already searched with while len(member_dn_set) > 0: group_infos = self.find_groups_with_any_member( member_dn_set, group_search, ldap_user.connection ) new_group_info_map = {info[0]: info for info in group_infos} group_info_map.update(new_group_info_map) handled_dn_set.update(member_dn_set) # Get ready for the next iteration. To avoid cycles, we make sure # never to search with the same member DN twice. member_dn_set = set(new_group_info_map.keys()) - handled_dn_set return group_info_map.values()
Returns a function for aggregating a sequence of sub-results.
def aggregator(self): """ Returns a function for aggregating a sequence of sub-results. """ if self.connector == self.AND: aggregator = all elif self.connector == self.OR: aggregator = any else: raise ValueError(self.connector) return aggregator
Generates the query result for each child.
def _resolve_children(self, ldap_user, groups): """ Generates the query result for each child. """ for child in self.children: if isinstance(child, LDAPGroupQuery): yield child.resolve(ldap_user, groups) else: yield groups.is_member_of(child)
Makes a list of positions and position commands from the tree
def gather_positions(tree): """Makes a list of positions and position commands from the tree""" pos = {'data-x': 'r0', 'data-y': 'r0', 'data-z': 'r0', 'data-rotate-x': 'r0', 'data-rotate-y': 'r0', 'data-rotate-z': 'r0', 'data-scale': 'r0', 'is_path': False } steps = 0 default_movement = True for step in tree.findall('step'): steps += 1 for key in POSITION_ATTRIBS: value = step.get(key) if value is not None: # We have a new value default_movement = False # No longer use the default movement pos[key] = value elif pos[key] and not pos[key].startswith('r'): # The old value was absolute and no new value, so stop pos[key] = 'r0' # We had no new value, and the old value was a relative # movement, so we just keep moving. if steps == 1 and pos['data-scale'] == 'r0': # No scale given for first slide, it needs to start at 1 pos['data-scale'] = '1' if default_movement and steps != 1: # No positioning has been given, use default: pos['data-x'] = 'r%s' % DEFAULT_MOVEMENT if 'data-rotate' in step.attrib: # data-rotate is an alias for data-rotate-z pos['data-rotate-z'] = step.get('data-rotate') del step.attrib['data-rotate'] if 'hovercraft-path' in step.attrib: # Path given x and y will be calculated from the path default_movement = False # No longer use the default movement pos['is_path'] = True # Add the path spec pos['path'] = step.attrib['hovercraft-path'] yield pos.copy() # And get rid of it for the next step del pos['path'] else: if 'data-x' in step.attrib or 'data-y' in step.attrib: # No longer using a path pos['is_path'] = False yield pos.copy()
Calculates position information
def calculate_positions(positions): """Calculates position information""" current_position = {'data-x': 0, 'data-y': 0, 'data-z': 0, 'data-rotate-x': 0, 'data-rotate-y': 0, 'data-rotate-z': 0, 'data-scale': 1, } positer = iter(positions) position = next(positer) _update_position(current_position, position) while True: if 'path' in position: # Start of a new path! path = position['path'] # Follow the path specification first_point = _pos_to_cord(current_position) # Paths that end in Z or z are closed. closed_path = path.strip()[-1].upper() == 'Z' path = parse_path(path) # Find out how many positions should be calculated: count = 1 last = False deferred_positions = [] while True: try: position = next(positer) deferred_positions.append(position) except StopIteration: last = True # This path goes to the end break if not position.get('is_path') or 'path' in position: # The end of the path, or the start of a new one break count += 1 if count < 2: raise AssertionError("The path specification is only used for " "one slide, which makes it pointless.") if closed_path: # This path closes in on itself. Skip the last part, so that # the first and last step doesn't overlap. endcount = count + 1 else: endcount = count multiplier = (endcount * DEFAULT_MOVEMENT) / path.length() offset = path.point(0) path_iter = iter(deferred_positions) for x in range(count): point = path.point(x / (endcount - 1)) point = ((point - offset) * multiplier) + first_point current_position.update(_coord_to_pos(point)) rotation = _path_angle(path, x / (endcount - 1)) current_position['data-rotate-z'] = rotation yield current_position.copy() try: position = next(path_iter) except StopIteration: last = True break _update_position(current_position, position) if last: break continue yield current_position.copy() try: position = next(positer) except StopIteration: break _update_position(current_position, position)
Updates the tree with new positions
def update_positions(tree, positions): """Updates the tree with new positions""" for step, pos in zip(tree.findall('step'), positions): for key in sorted(pos): value = pos.get(key) if key.endswith("-rel"): abs_key = key[:key.index("-rel")] if value is not None: els = tree.findall(".//*[@id='" + value + "']") for el in els : pos[abs_key] = num(el.get(abs_key)) + pos.get(abs_key) step.attrib[abs_key] = str(pos.get(abs_key)) else: step.attrib[key] = str(pos[key]) if 'hovercraft-path' in step.attrib: del step.attrib['hovercraft-path']
Position the slides in the tree
def position_slides(tree): """Position the slides in the tree""" positions = gather_positions(tree) positions = calculate_positions(positions) update_positions(tree, positions)
Makes a copy of a node with the same attributes and text, but no children.
def copy_node(node): """Makes a copy of a node with the same attributes and text, but no children.""" element = node.makeelement(node.tag) element.text = node.text element.tail = node.tail for key, value in node.items(): element.set(key, value) return element
Copies a resource file and returns the source path for monitoring
def copy_resource(self, resource, targetdir): """Copies a resource file and returns the source path for monitoring""" final_path = resource.final_path() if final_path[0] == '/' or (':' in final_path) or ('?' in final_path): # Absolute path or URI: Do nothing return source_path = self.get_source_path(resource) if resource.resource_type == DIRECTORY_RESOURCE: for file_path in glob.iglob(os.path.join(source_path, '**'), recursive=True): if os.path.isdir(file_path): continue rest_target_path = file_path[len(source_path)+1:] target_path = os.path.join(targetdir, final_path, rest_target_path) # Don't yield the result, we don't monitor these. self._copy_file(file_path, target_path) else: target_path = os.path.join(targetdir, final_path) yield self._copy_file(source_path, target_path)
Start the server. Do nothing if server is already running. This function will block if no_block is not set to True.
def start(self): """Start the server. Do nothing if server is already running. This function will block if no_block is not set to True. """ if not self.is_run: # set class attribute ThreadingTCPServer.address_family = socket.AF_INET6 if self.ipv6 else socket.AF_INET ThreadingTCPServer.daemon_threads = True # init server self._service = ThreadingTCPServer((self.host, self.port), self.ModbusService, bind_and_activate=False) # set socket options self._service.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._service.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) # TODO test no_delay with bench self._service.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) # bind and activate self._service.server_bind() self._service.server_activate() # serve request if self.no_block: self._serve_th = Thread(target=self._serve) self._serve_th.daemon = True self._serve_th.start() else: self._serve()
Generates the presentation and returns a list of files used
def generate(args): """Generates the presentation and returns a list of files used""" source_files = {args.presentation} # Parse the template info template_info = Template(args.template) if args.css: presentation_dir = os.path.split(args.presentation)[0] target_path = os.path.relpath(args.css, presentation_dir) template_info.add_resource(args.css, CSS_RESOURCE, target=target_path, extra_info='all') source_files.add(args.css) if args.js: presentation_dir = os.path.split(args.presentation)[0] target_path = os.path.relpath(args.js, presentation_dir) template_info.add_resource(args.js, JS_RESOURCE, target=target_path, extra_info=JS_POSITION_BODY) source_files.add(args.js) # Make the resulting HTML htmldata, dependencies = rst2html(args.presentation, template_info, args.auto_console, args.skip_help, args.skip_notes, args.mathjax, args.slide_numbers) source_files.update(dependencies) # Write the HTML out if not os.path.exists(args.targetdir): os.makedirs(args.targetdir) with open(os.path.join(args.targetdir, 'index.html'), 'wb') as outfile: outfile.write(htmldata) # Copy supporting files source_files.update(template_info.copy_resources(args.targetdir)) # Copy images from the source: sourcedir = os.path.split(os.path.abspath(args.presentation))[0] tree = html.fromstring(htmldata) for image in tree.iterdescendants('img'): filename = image.attrib['src'] source_files.add(copy_resource(filename, sourcedir, args.targetdir)) RE_CSS_URL = re.compile(br"""url\(['"]?(.*?)['"]?[\)\?\#]""") # Copy any files referenced by url() in the css-files: for resource in template_info.resources: if resource.resource_type != CSS_RESOURCE: continue # path in CSS is relative to CSS file; construct source/dest accordingly css_base = template_info.template_root if resource.is_in_template else sourcedir css_sourcedir = os.path.dirname(os.path.join(css_base, resource.filepath)) css_targetdir = os.path.dirname(os.path.join(args.targetdir, resource.final_path())) uris = RE_CSS_URL.findall(template_info.read_data(resource)) uris = [uri.decode() for uri in uris] if resource.is_in_template and template_info.builtin_template: for filename in uris: template_info.add_resource(filename, OTHER_RESOURCE, target=css_targetdir, is_in_template=True) else: for filename in uris: source_files.add(copy_resource(filename, css_sourcedir, css_targetdir)) # All done! return {os.path.abspath(f) for f in source_files if f}
Stop the server. Do nothing if server is already not running.
def stop(self): """Stop the server. Do nothing if server is already not running. """ if self.is_run: self._service.shutdown() self._service.server_close()
Get or set host (IPv4/IPv6 or hostname like 'plc.domain.net') :param hostname: hostname or IPv4/IPv6 address or None for get value :type hostname: str or None :returns: hostname or None if set fail :rtype: str or None
def host(self, hostname=None): """Get or set host (IPv4/IPv6 or hostname like 'plc.domain.net') :param hostname: hostname or IPv4/IPv6 address or None for get value :type hostname: str or None :returns: hostname or None if set fail :rtype: str or None """ if (hostname is None) or (hostname == self.__hostname): return self.__hostname # when hostname change ensure old socket is close self.close() # IPv4 ? try: socket.inet_pton(socket.AF_INET, hostname) self.__hostname = hostname return self.__hostname except socket.error: pass # IPv6 ? try: socket.inet_pton(socket.AF_INET6, hostname) self.__hostname = hostname return self.__hostname except socket.error: pass # DNS name ? if re.match('^[a-z][a-z0-9\.\-]+$', hostname): self.__hostname = hostname return self.__hostname else: return None
Get or set TCP port :param port: TCP port number or None for get value :type port: int or None :returns: TCP port or None if set fail :rtype: int or None
def port(self, port=None): """Get or set TCP port :param port: TCP port number or None for get value :type port: int or None :returns: TCP port or None if set fail :rtype: int or None """ if (port is None) or (port == self.__port): return self.__port # when port change ensure old socket is close self.close() # valid port ? if 0 < int(port) < 65536: self.__port = int(port) return self.__port else: return None
Get or set unit ID field :param unit_id: unit ID (0 to 255) or None for get value :type unit_id: int or None :returns: unit ID or None if set fail :rtype: int or None
def unit_id(self, unit_id=None): """Get or set unit ID field :param unit_id: unit ID (0 to 255) or None for get value :type unit_id: int or None :returns: unit ID or None if set fail :rtype: int or None """ if unit_id is None: return self.__unit_id if 0 <= int(unit_id) < 256: self.__unit_id = int(unit_id) return self.__unit_id else: return None
Get or set timeout field :param timeout: socket timeout in seconds or None for get value :type timeout: float or None :returns: timeout or None if set fail :rtype: float or None
def timeout(self, timeout=None): """Get or set timeout field :param timeout: socket timeout in seconds or None for get value :type timeout: float or None :returns: timeout or None if set fail :rtype: float or None """ if timeout is None: return self.__timeout if 0 < float(timeout) < 3600: self.__timeout = float(timeout) return self.__timeout else: return None
Get or set debug mode :param state: debug state or None for get value :type state: bool or None :returns: debug state or None if set fail :rtype: bool or None
def debug(self, state=None): """Get or set debug mode :param state: debug state or None for get value :type state: bool or None :returns: debug state or None if set fail :rtype: bool or None """ if state is None: return self.__debug self.__debug = bool(state) return self.__debug
Get or set automatic TCP connect mode :param state: auto_open state or None for get value :type state: bool or None :returns: auto_open state or None if set fail :rtype: bool or None
def auto_open(self, state=None): """Get or set automatic TCP connect mode :param state: auto_open state or None for get value :type state: bool or None :returns: auto_open state or None if set fail :rtype: bool or None """ if state is None: return self.__auto_open self.__auto_open = bool(state) return self.__auto_open
Get or set automatic TCP close mode (after each request) :param state: auto_close state or None for get value :type state: bool or None :returns: auto_close state or None if set fail :rtype: bool or None
def auto_close(self, state=None): """Get or set automatic TCP close mode (after each request) :param state: auto_close state or None for get value :type state: bool or None :returns: auto_close state or None if set fail :rtype: bool or None """ if state is None: return self.__auto_close self.__auto_close = bool(state) return self.__auto_close
Get or set modbus mode (TCP or RTU) :param mode: mode (MODBUS_TCP/MODBUS_RTU) to set or None for get value :type mode: int :returns: mode or None if set fail :rtype: int or None
def mode(self, mode=None): """Get or set modbus mode (TCP or RTU) :param mode: mode (MODBUS_TCP/MODBUS_RTU) to set or None for get value :type mode: int :returns: mode or None if set fail :rtype: int or None """ if mode is None: return self.__mode if mode == const.MODBUS_TCP or mode == const.MODBUS_RTU: self.__mode = mode return self.__mode else: return None
Connect to modbus server (open TCP connection) :returns: connect status (True if open) :rtype: bool
def open(self): """Connect to modbus server (open TCP connection) :returns: connect status (True if open) :rtype: bool """ # restart TCP if already open if self.is_open(): self.close() # init socket and connect # list available sockets on the target host/port # AF_xxx : AF_INET -> IPv4, AF_INET6 -> IPv6, # AF_UNSPEC -> IPv6 (priority on some system) or 4 # list available socket on target host for res in socket.getaddrinfo(self.__hostname, self.__port, socket.AF_UNSPEC, socket.SOCK_STREAM): af, sock_type, proto, canon_name, sa = res try: self.__sock = socket.socket(af, sock_type, proto) except socket.error: self.__sock = None continue try: self.__sock.settimeout(self.__timeout) self.__sock.connect(sa) except socket.error: self.__sock.close() self.__sock = None continue break # check connect status if self.__sock is not None: return True else: self.__last_error = const.MB_CONNECT_ERR self.__debug_msg('connect error') return False
Close TCP connection :returns: close status (True for close/None if already close) :rtype: bool or None
def close(self): """Close TCP connection :returns: close status (True for close/None if already close) :rtype: bool or None """ if self.__sock: self.__sock.close() self.__sock = None return True else: return None
Modbus function READ_COILS (0x01) :param bit_addr: bit address (0 to 65535) :type bit_addr: int :param bit_nb: number of bits to read (1 to 2000) :type bit_nb: int :returns: bits list or None if error :rtype: list of bool or None
def read_coils(self, bit_addr, bit_nb=1): """Modbus function READ_COILS (0x01) :param bit_addr: bit address (0 to 65535) :type bit_addr: int :param bit_nb: number of bits to read (1 to 2000) :type bit_nb: int :returns: bits list or None if error :rtype: list of bool or None """ # check params if not (0 <= int(bit_addr) <= 65535): self.__debug_msg('read_coils(): bit_addr out of range') return None if not (1 <= int(bit_nb) <= 2000): self.__debug_msg('read_coils(): bit_nb out of range') return None if (int(bit_addr) + int(bit_nb)) > 65536: self.__debug_msg('read_coils(): read after ad 65535') return None # build frame tx_buffer = self._mbus_frame(const.READ_COILS, struct.pack('>HH', bit_addr, bit_nb)) # send request s_send = self._send_mbus(tx_buffer) # check error if not s_send: return None # receive f_body = self._recv_mbus() # check error if not f_body: return None # check min frame body size if len(f_body) < 2: self.__last_error = const.MB_RECV_ERR self.__debug_msg('read_coils(): rx frame under min size') self.close() return None # extract field "byte count" rx_byte_count = struct.unpack("B", f_body[0:1])[0] # frame with bits value -> bits[] list f_bits = bytearray(f_body[1:]) # check rx_byte_count: match nb of bits request and check buffer size if not ((rx_byte_count >= int((bit_nb + 7) / 8)) and (rx_byte_count == len(f_bits))): self.__last_error = const.MB_RECV_ERR self.__debug_msg('read_coils(): rx byte count mismatch') self.close() return None # allocate a bit_nb size list bits = [None] * bit_nb # fill bits list with bit items for i, item in enumerate(bits): bits[i] = bool(f_bits[int(i / 8)] >> (i % 8) & 0x01) # return bits list return bits
Modbus function READ_INPUT_REGISTERS (0x04) :param reg_addr: register address (0 to 65535) :type reg_addr: int :param reg_nb: number of registers to read (1 to 125) :type reg_nb: int :returns: registers list or None if fail :rtype: list of int or None
def read_input_registers(self, reg_addr, reg_nb=1): """Modbus function READ_INPUT_REGISTERS (0x04) :param reg_addr: register address (0 to 65535) :type reg_addr: int :param reg_nb: number of registers to read (1 to 125) :type reg_nb: int :returns: registers list or None if fail :rtype: list of int or None """ # check params if not (0x0000 <= int(reg_addr) <= 0xffff): self.__debug_msg('read_input_registers(): reg_addr out of range') return None if not (0x0001 <= int(reg_nb) <= 0x007d): self.__debug_msg('read_input_registers(): reg_nb out of range') return None if (int(reg_addr) + int(reg_nb)) > 0x10000: self.__debug_msg('read_input_registers(): read after ad 65535') return None # build frame tx_buffer = self._mbus_frame(const.READ_INPUT_REGISTERS, struct.pack('>HH', reg_addr, reg_nb)) # send request s_send = self._send_mbus(tx_buffer) # check error if not s_send: return None # receive f_body = self._recv_mbus() # check error if not f_body: return None # check min frame body size if len(f_body) < 2: self.__last_error = const.MB_RECV_ERR self.__debug_msg('read_input_registers(): rx frame under min size') self.close() return None # extract field "byte count" rx_byte_count = struct.unpack('B', f_body[0:1])[0] # frame with regs value f_regs = f_body[1:] # check rx_byte_count: buffer size must be consistent and have at least the requested number of registers if not ((rx_byte_count >= 2 * reg_nb) and (rx_byte_count == len(f_regs))): self.__last_error = const.MB_RECV_ERR self.__debug_msg('read_input_registers(): rx byte count mismatch') self.close() return None # allocate a reg_nb size list registers = [None] * reg_nb # fill registers list with register items for i, item in enumerate(registers): registers[i] = struct.unpack('>H', f_regs[i * 2:i * 2 + 2])[0] # return registers list return registers
Modbus function WRITE_SINGLE_COIL (0x05) :param bit_addr: bit address (0 to 65535) :type bit_addr: int :param bit_value: bit value to write :type bit_value: bool :returns: True if write ok or None if fail :rtype: bool or None
def write_single_coil(self, bit_addr, bit_value): """Modbus function WRITE_SINGLE_COIL (0x05) :param bit_addr: bit address (0 to 65535) :type bit_addr: int :param bit_value: bit value to write :type bit_value: bool :returns: True if write ok or None if fail :rtype: bool or None """ # check params if not (0 <= int(bit_addr) <= 65535): self.__debug_msg('write_single_coil(): bit_addr out of range') return None # build frame bit_value = 0xFF if bit_value else 0x00 tx_buffer = self._mbus_frame(const.WRITE_SINGLE_COIL, struct.pack('>HBB', bit_addr, bit_value, 0)) # send request s_send = self._send_mbus(tx_buffer) # check error if not s_send: return None # receive f_body = self._recv_mbus() # check error if not f_body: return None # check fix frame size if len(f_body) != 4: self.__last_error = const.MB_RECV_ERR self.__debug_msg('write_single_coil(): rx frame size error') self.close() return None # register extract (rx_bit_addr, rx_bit_value, rx_padding) = struct.unpack('>HBB', f_body[:4]) # check bit write is_ok = (rx_bit_addr == bit_addr) and (rx_bit_value == bit_value) return True if is_ok else None
Modbus function WRITE_SINGLE_REGISTER (0x06) :param reg_addr: register address (0 to 65535) :type reg_addr: int :param reg_value: register value to write :type reg_value: int :returns: True if write ok or None if fail :rtype: bool or None
def write_single_register(self, reg_addr, reg_value): """Modbus function WRITE_SINGLE_REGISTER (0x06) :param reg_addr: register address (0 to 65535) :type reg_addr: int :param reg_value: register value to write :type reg_value: int :returns: True if write ok or None if fail :rtype: bool or None """ # check params if not (0 <= int(reg_addr) <= 65535): self.__debug_msg('write_single_register(): reg_addr out of range') return None if not (0 <= int(reg_value) <= 65535): self.__debug_msg('write_single_register(): reg_value out of range') return None # build frame tx_buffer = self._mbus_frame(const.WRITE_SINGLE_REGISTER, struct.pack('>HH', reg_addr, reg_value)) # send request s_send = self._send_mbus(tx_buffer) # check error if not s_send: return None # receive f_body = self._recv_mbus() # check error if not f_body: return None # check fix frame size if len(f_body) != 4: self.__last_error = const.MB_RECV_ERR self.__debug_msg('write_single_register(): rx frame size error') self.close() return None # register extract rx_reg_addr, rx_reg_value = struct.unpack('>HH', f_body) # check register write is_ok = (rx_reg_addr == reg_addr) and (rx_reg_value == reg_value) return True if is_ok else None
Modbus function WRITE_MULTIPLE_COILS (0x0F) :param bits_addr: bits address (0 to 65535) :type bits_addr: int :param bits_value: bits values to write :type bits_value: list :returns: True if write ok or None if fail :rtype: bool or None
def write_multiple_coils(self, bits_addr, bits_value): """Modbus function WRITE_MULTIPLE_COILS (0x0F) :param bits_addr: bits address (0 to 65535) :type bits_addr: int :param bits_value: bits values to write :type bits_value: list :returns: True if write ok or None if fail :rtype: bool or None """ # number of bits to write bits_nb = len(bits_value) # check params if not (0x0000 <= int(bits_addr) <= 0xffff): self.__debug_msg('write_multiple_coils(): bits_addr out of range') return None if not (0x0001 <= int(bits_nb) <= 0x07b0): self.__debug_msg('write_multiple_coils(): number of bits out of range') return None if (int(bits_addr) + int(bits_nb)) > 0x10000: self.__debug_msg('write_multiple_coils(): write after ad 65535') return None # build frame # format bits value string bits_val_str = b'' # allocate bytes list b_size = int(bits_nb / 8) b_size += 1 if (bits_nb % 8) else 0 bytes_l = [0] * b_size # populate bytes list with bits_value for i, item in enumerate(bits_value): if item: byte_i = int(i/8) bytes_l[byte_i] = set_bit(bytes_l[byte_i], i % 8) # format bits_val_str for byte in bytes_l: bits_val_str += struct.pack('B', byte) bytes_nb = len(bits_val_str) # format modbus frame body body = struct.pack('>HHB', bits_addr, bits_nb, bytes_nb) + bits_val_str tx_buffer = self._mbus_frame(const.WRITE_MULTIPLE_COILS, body) # send request s_send = self._send_mbus(tx_buffer) # check error if not s_send: return None # receive f_body = self._recv_mbus() # check error if not f_body: return None # check fix frame size if len(f_body) != 4: self.__last_error = const.MB_RECV_ERR self.__debug_msg('write_multiple_coils(): rx frame size error') self.close() return None # register extract (rx_bit_addr, rx_bit_nb) = struct.unpack('>HH', f_body[:4]) # check regs write is_ok = (rx_bit_addr == bits_addr) return True if is_ok else None
Modbus function WRITE_MULTIPLE_REGISTERS (0x10) :param regs_addr: registers address (0 to 65535) :type regs_addr: int :param regs_value: registers values to write :type regs_value: list :returns: True if write ok or None if fail :rtype: bool or None
def write_multiple_registers(self, regs_addr, regs_value): """Modbus function WRITE_MULTIPLE_REGISTERS (0x10) :param regs_addr: registers address (0 to 65535) :type regs_addr: int :param regs_value: registers values to write :type regs_value: list :returns: True if write ok or None if fail :rtype: bool or None """ # number of registers to write regs_nb = len(regs_value) # check params if not (0x0000 <= int(regs_addr) <= 0xffff): self.__debug_msg('write_multiple_registers(): regs_addr out of range') return None if not (0x0001 <= int(regs_nb) <= 0x007b): self.__debug_msg('write_multiple_registers(): number of registers out of range') return None if (int(regs_addr) + int(regs_nb)) > 0x10000: self.__debug_msg('write_multiple_registers(): write after ad 65535') return None # build frame # format reg value string regs_val_str = b"" for reg in regs_value: # check current register value if not (0 <= int(reg) <= 0xffff): self.__debug_msg('write_multiple_registers(): regs_value out of range') return None # pack register for build frame regs_val_str += struct.pack('>H', reg) bytes_nb = len(regs_val_str) # format modbus frame body body = struct.pack('>HHB', regs_addr, regs_nb, bytes_nb) + regs_val_str tx_buffer = self._mbus_frame(const.WRITE_MULTIPLE_REGISTERS, body) # send request s_send = self._send_mbus(tx_buffer) # check error if not s_send: return None # receive f_body = self._recv_mbus() # check error if not f_body: return None # check fix frame size if len(f_body) != 4: self.__last_error = const.MB_RECV_ERR self.__debug_msg('write_multiple_registers(): rx frame size error') self.close() return None # register extract (rx_reg_addr, rx_reg_nb) = struct.unpack('>HH', f_body[:4]) # check regs write is_ok = (rx_reg_addr == regs_addr) return True if is_ok else None
Wait data available for socket read :returns: True if data available or None if timeout or socket error :rtype: bool or None
def _can_read(self): """Wait data available for socket read :returns: True if data available or None if timeout or socket error :rtype: bool or None """ if self.__sock is None: return None if select.select([self.__sock], [], [], self.__timeout)[0]: return True else: self.__last_error = const.MB_TIMEOUT_ERR self.__debug_msg('timeout error') self.close() return None
Send data over current socket :param data: registers value to write :type data: str (Python2) or class bytes (Python3) :returns: True if send ok or None if error :rtype: bool or None
def _send(self, data): """Send data over current socket :param data: registers value to write :type data: str (Python2) or class bytes (Python3) :returns: True if send ok or None if error :rtype: bool or None """ # check link if self.__sock is None: self.__debug_msg('call _send on close socket') return None # send data_l = len(data) try: send_l = self.__sock.send(data) except socket.error: send_l = None # handle send error if (send_l is None) or (send_l != data_l): self.__last_error = const.MB_SEND_ERR self.__debug_msg('_send error') self.close() return None else: return send_l
Receive data over current socket :param max_size: number of bytes to receive :type max_size: int :returns: receive data or None if error :rtype: str (Python2) or class bytes (Python3) or None
def _recv(self, max_size): """Receive data over current socket :param max_size: number of bytes to receive :type max_size: int :returns: receive data or None if error :rtype: str (Python2) or class bytes (Python3) or None """ # wait for read if not self._can_read(): self.close() return None # recv try: r_buffer = self.__sock.recv(max_size) except socket.error: r_buffer = None # handle recv error if not r_buffer: self.__last_error = const.MB_RECV_ERR self.__debug_msg('_recv error') self.close() return None return r_buffer
Receive data over current socket, loop until all bytes is receive (avoid TCP frag) :param size: number of bytes to receive :type size: int :returns: receive data or None if error :rtype: str (Python2) or class bytes (Python3) or None
def _recv_all(self, size): """Receive data over current socket, loop until all bytes is receive (avoid TCP frag) :param size: number of bytes to receive :type size: int :returns: receive data or None if error :rtype: str (Python2) or class bytes (Python3) or None """ r_buffer = bytes() while len(r_buffer) < size: r_packet = self._recv(size - len(r_buffer)) if not r_packet: return None r_buffer += r_packet return r_buffer
Send modbus frame :param frame: modbus frame to send (with MBAP for TCP/CRC for RTU) :type frame: str (Python2) or class bytes (Python3) :returns: number of bytes send or None if error :rtype: int or None
def _send_mbus(self, frame): """Send modbus frame :param frame: modbus frame to send (with MBAP for TCP/CRC for RTU) :type frame: str (Python2) or class bytes (Python3) :returns: number of bytes send or None if error :rtype: int or None """ # for auto_open mode, check TCP and open if need if self.__auto_open and not self.is_open(): self.open() # send request bytes_send = self._send(frame) if bytes_send: if self.__debug: self._pretty_dump('Tx', frame) return bytes_send else: return None
Receive a modbus frame :returns: modbus frame body or None if error :rtype: str (Python2) or class bytes (Python3) or None
def _recv_mbus(self): """Receive a modbus frame :returns: modbus frame body or None if error :rtype: str (Python2) or class bytes (Python3) or None """ # receive # modbus TCP receive if self.__mode == const.MODBUS_TCP: # 7 bytes header (mbap) rx_buffer = self._recv_all(7) # check recv if not (rx_buffer and len(rx_buffer) == 7): self.__last_error = const.MB_RECV_ERR self.__debug_msg('_recv MBAP error') self.close() return None rx_frame = rx_buffer # decode header (rx_hd_tr_id, rx_hd_pr_id, rx_hd_length, rx_hd_unit_id) = struct.unpack('>HHHB', rx_frame) # check header if not ((rx_hd_tr_id == self.__hd_tr_id) and (rx_hd_pr_id == 0) and (rx_hd_length < 256) and (rx_hd_unit_id == self.__unit_id)): self.__last_error = const.MB_RECV_ERR self.__debug_msg('MBAP format error') if self.__debug: rx_frame += self._recv_all(rx_hd_length - 1) self._pretty_dump('Rx', rx_frame) self.close() return None # end of frame rx_buffer = self._recv_all(rx_hd_length - 1) if not (rx_buffer and (len(rx_buffer) == rx_hd_length - 1) and (len(rx_buffer) >= 2)): self.__last_error = const.MB_RECV_ERR self.__debug_msg('_recv frame body error') self.close() return None rx_frame += rx_buffer # dump frame if self.__debug: self._pretty_dump('Rx', rx_frame) # body decode rx_bd_fc = struct.unpack('B', rx_buffer[0:1])[0] f_body = rx_buffer[1:] # modbus RTU receive elif self.__mode == const.MODBUS_RTU: # receive modbus RTU frame (max size is 256 bytes) rx_buffer = self._recv(256) # on _recv error if not rx_buffer: return None rx_frame = rx_buffer # dump frame if self.__debug: self._pretty_dump('Rx', rx_frame) # RTU frame min size is 5 bytes if len(rx_buffer) < 5: self.__last_error = const.MB_RECV_ERR self.__debug_msg('short frame error') self.close() return None # check CRC if not self._crc_is_ok(rx_frame): self.__last_error = const.MB_CRC_ERR self.__debug_msg('CRC error') self.close() return None # body decode (rx_unit_id, rx_bd_fc) = struct.unpack("BB", rx_frame[:2]) # check if not (rx_unit_id == self.__unit_id): self.__last_error = const.MB_RECV_ERR self.__debug_msg('unit ID mismatch error') self.close() return None # format f_body: remove unit ID, function code and CRC 2 last bytes f_body = rx_frame[2:-2] # for auto_close mode, close socket after each request if self.__auto_close: self.close() # check except if rx_bd_fc > 0x80: # except code exp_code = struct.unpack('B', f_body[0:1])[0] self.__last_error = const.MB_EXCEPT_ERR self.__last_except = exp_code self.__debug_msg('except (code ' + str(exp_code) + ')') return None else: # return return f_body
Build modbus frame (add MBAP for Modbus/TCP, slave AD + CRC for RTU) :param fc: modbus function code :type fc: int :param body: modbus frame body :type body: str (Python2) or class bytes (Python3) :returns: modbus frame :rtype: str (Python2) or class bytes (Python3)
def _mbus_frame(self, fc, body): """Build modbus frame (add MBAP for Modbus/TCP, slave AD + CRC for RTU) :param fc: modbus function code :type fc: int :param body: modbus frame body :type body: str (Python2) or class bytes (Python3) :returns: modbus frame :rtype: str (Python2) or class bytes (Python3) """ # build frame body f_body = struct.pack('B', fc) + body # modbus/TCP if self.__mode == const.MODBUS_TCP: # build frame ModBus Application Protocol header (mbap) self.__hd_tr_id = random.randint(0, 65535) tx_hd_pr_id = 0 tx_hd_length = len(f_body) + 1 f_mbap = struct.pack('>HHHB', self.__hd_tr_id, tx_hd_pr_id, tx_hd_length, self.__unit_id) return f_mbap + f_body # modbus RTU elif self.__mode == const.MODBUS_RTU: # format [slave addr(unit_id)]frame_body[CRC16] slave_ad = struct.pack('B', self.__unit_id) return self._add_crc(slave_ad + f_body)
Print modbus/TCP frame ('[header]body') or RTU ('body[CRC]') on stdout :param label: modbus function code :type label: str :param data: modbus frame :type data: str (Python2) or class bytes (Python3)
def _pretty_dump(self, label, data): """Print modbus/TCP frame ('[header]body') or RTU ('body[CRC]') on stdout :param label: modbus function code :type label: str :param data: modbus frame :type data: str (Python2) or class bytes (Python3) """ # split data string items to a list of hex value dump = ['%02X' % c for c in bytearray(data)] # format for TCP or RTU if self.__mode == const.MODBUS_TCP: if len(dump) > 6: # [MBAP] ... dump[0] = '[' + dump[0] dump[6] += ']' elif self.__mode == const.MODBUS_RTU: if len(dump) > 4: # ... [CRC] dump[-2] = '[' + dump[-2] dump[-1] += ']' # print result print(label) s = '' for i in dump: s += i + ' ' print(s)
Add CRC to modbus frame (for RTU mode) :param frame: modbus RTU frame :type frame: str (Python2) or class bytes (Python3) :returns: modbus RTU frame with CRC :rtype: str (Python2) or class bytes (Python3)
def _add_crc(self, frame): """Add CRC to modbus frame (for RTU mode) :param frame: modbus RTU frame :type frame: str (Python2) or class bytes (Python3) :returns: modbus RTU frame with CRC :rtype: str (Python2) or class bytes (Python3) """ crc = struct.pack('<H', crc16(frame)) return frame + crc
Get the list of bits of val_int integer (default size is 16 bits) Return bits list, least significant bit first. Use list.reverse() if need. :param val_int: integer value :type val_int: int :param val_size: bit size of integer (word = 16, long = 32) (optional) :type val_size: int :returns: list of boolean "bits" (least significant first) :rtype: list
def get_bits_from_int(val_int, val_size=16): """Get the list of bits of val_int integer (default size is 16 bits) Return bits list, least significant bit first. Use list.reverse() if need. :param val_int: integer value :type val_int: int :param val_size: bit size of integer (word = 16, long = 32) (optional) :type val_size: int :returns: list of boolean "bits" (least significant first) :rtype: list """ # allocate a bit_nb size list bits = [None] * val_size # fill bits list with bit items for i, item in enumerate(bits): bits[i] = bool((val_int >> i) & 0x01) # return bits list return bits
Word list (16 bits int) to long list (32 bits int) By default word_list_to_long() use big endian order. For use little endian, set big_endian param to False. :param val_list: list of 16 bits int value :type val_list: list :param big_endian: True for big endian/False for little (optional) :type big_endian: bool :returns: list of 32 bits int value :rtype: list
def word_list_to_long(val_list, big_endian=True): """Word list (16 bits int) to long list (32 bits int) By default word_list_to_long() use big endian order. For use little endian, set big_endian param to False. :param val_list: list of 16 bits int value :type val_list: list :param big_endian: True for big endian/False for little (optional) :type big_endian: bool :returns: list of 32 bits int value :rtype: list """ # allocate list for long int long_list = [None] * int(len(val_list) / 2) # fill registers list with register items for i, item in enumerate(long_list): if big_endian: long_list[i] = (val_list[i * 2] << 16) + val_list[(i * 2) + 1] else: long_list[i] = (val_list[(i * 2) + 1] << 16) + val_list[i * 2] # return long list return long_list
Long list (32 bits int) to word list (16 bits int) By default long_list_to_word() use big endian order. For use little endian, set big_endian param to False. :param val_list: list of 32 bits int value :type val_list: list :param big_endian: True for big endian/False for little (optional) :type big_endian: bool :returns: list of 16 bits int value :rtype: list
def long_list_to_word(val_list, big_endian=True): """Long list (32 bits int) to word list (16 bits int) By default long_list_to_word() use big endian order. For use little endian, set big_endian param to False. :param val_list: list of 32 bits int value :type val_list: list :param big_endian: True for big endian/False for little (optional) :type big_endian: bool :returns: list of 16 bits int value :rtype: list """ # allocate list for long int word_list = list() # fill registers list with register items for i, item in enumerate(val_list): if big_endian: word_list.append(val_list[i] >> 16) word_list.append(val_list[i] & 0xffff) else: word_list.append(val_list[i] & 0xffff) word_list.append(val_list[i] >> 16) # return long list return word_list
Compute CRC16 :param frame: frame :type frame: str (Python2) or class bytes (Python3) :returns: CRC16 :rtype: int
def crc16(frame): """Compute CRC16 :param frame: frame :type frame: str (Python2) or class bytes (Python3) :returns: CRC16 :rtype: int """ crc = 0xFFFF for index, item in enumerate(bytearray(frame)): next_byte = item crc ^= next_byte for i in range(8): lsb = crc & 1 crc >>= 1 if lsb: crc ^= 0xA001 return crc
Wrap text to length characters, breaking when target length is reached, taking into account character width. Used to wrap lines which cannot be wrapped on whitespace.
def _wc_hard_wrap(line, length): """ Wrap text to length characters, breaking when target length is reached, taking into account character width. Used to wrap lines which cannot be wrapped on whitespace. """ chars = [] chars_len = 0 for char in line: char_len = wcwidth(char) if chars_len + char_len > length: yield "".join(chars) chars = [] chars_len = 0 chars.append(char) chars_len += char_len if chars: yield "".join(chars)
Wrap text to given length, breaking on whitespace and taking into account character width. Meant for use on a single line or paragraph. Will destroy spacing between words and paragraphs and any indentation.
def wc_wrap(text, length): """ Wrap text to given length, breaking on whitespace and taking into account character width. Meant for use on a single line or paragraph. Will destroy spacing between words and paragraphs and any indentation. """ line_words = [] line_len = 0 words = re.split(r"\s+", text.strip()) for word in words: word_len = wcswidth(word) if line_words and line_len + word_len > length: line = " ".join(line_words) if line_len <= length: yield line else: yield from _wc_hard_wrap(line, length) line_words = [] line_len = 0 line_words.append(word) line_len += word_len + 1 # add 1 to account for space between words if line_words: line = " ".join(line_words) if line_len <= length: yield line else: yield from _wc_hard_wrap(line, length)
Truncates text to given length, taking into account wide characters. If truncated, the last char is replaced by an elipsis.
def trunc(text, length): """ Truncates text to given length, taking into account wide characters. If truncated, the last char is replaced by an elipsis. """ if length < 1: raise ValueError("length should be 1 or larger") # Remove whitespace first so no unneccesary truncation is done. text = text.strip() text_length = wcswidth(text) if text_length <= length: return text # We cannot just remove n characters from the end since we don't know how # wide these characters are and how it will affect text length. # Use wcwidth to determine how many characters need to be truncated. chars_to_truncate = 0 trunc_length = 0 for char in reversed(text): chars_to_truncate += 1 trunc_length += wcwidth(char) if text_length - trunc_length <= length: break # Additional char to make room for elipsis n = chars_to_truncate + 1 return text[:-n].strip() + '…'
Pads text to given length, taking into account wide characters.
def pad(text, length): """Pads text to given length, taking into account wide characters.""" text_length = wcswidth(text) if text_length < length: return text + ' ' * (length - text_length) return text
Makes text fit the given length by padding or truncating it.
def fit_text(text, length): """Makes text fit the given length by padding or truncating it.""" text_length = wcswidth(text) if text_length > length: return trunc(text, length) if text_length < length: return pad(text, length) return text
Attempt to extract an error message from response body
def _get_error_message(response): """Attempt to extract an error message from response body""" try: data = response.json() if "error_description" in data: return data['error_description'] if "error" in data: return data['error'] except Exception: pass return "Unknown error"
Creates a config file. Attempts to load data from legacy config files if they exist.
def make_config(path): """Creates a config file. Attempts to load data from legacy config files if they exist. """ apps, user = load_legacy_config() apps = {a.instance: a._asdict() for a in apps} users = {user_id(user): user._asdict()} if user else {} active_user = user_id(user) if user else None config = { "apps": apps, "users": users, "active_user": active_user, } print_out("Creating config file at <blue>{}</blue>".format(path)) # Ensure dir exists os.makedirs(dirname(path), exist_ok=True) with open(path, 'w') as f: json.dump(config, f, indent=True)
Posts a new status. https://github.com/tootsuite/documentation/blob/master/Using-the-API/API.md#posting-a-new-status
def post_status( app, user, status, visibility='public', media_ids=None, sensitive=False, spoiler_text=None, in_reply_to_id=None ): """ Posts a new status. https://github.com/tootsuite/documentation/blob/master/Using-the-API/API.md#posting-a-new-status """ # Idempotency key assures the same status is not posted multiple times # if the request is retried. headers = {"Idempotency-Key": uuid.uuid4().hex} return http.post(app, user, '/api/v1/statuses', { 'status': status, 'media_ids[]': media_ids, 'visibility': visibility, 'sensitive': str_bool(sensitive), 'spoiler_text': spoiler_text, 'in_reply_to_id': in_reply_to_id, }, headers=headers).json()
Deletes a status with given ID. https://github.com/tootsuite/documentation/blob/master/Using-the-API/API.md#deleting-a-status
def delete_status(app, user, status_id): """ Deletes a status with given ID. https://github.com/tootsuite/documentation/blob/master/Using-the-API/API.md#deleting-a-status """ return http.delete(app, user, '/api/v1/statuses/{}'.format(status_id))
Given timeline response headers, returns the path to the next batch
def _get_next_path(headers): """Given timeline response headers, returns the path to the next batch""" links = headers.get('Link', '') matches = re.match('<([^>]+)>; rel="next"', links) if matches: parsed = urlparse(matches.group(1)) return "?".join([parsed.path, parsed.query])
Reply to the selected status
def reply(self): """Reply to the selected status""" status = self.get_selected_status() app, user = self.app, self.user if not app or not user: self.footer.draw_message("You must be logged in to reply", Color.RED) return compose_modal = ComposeModal(self.stdscr, default_cw='\n'.join(status['spoiler_text']) or None, resize_callback=self.on_resize) content, cw = compose_modal.loop() self.full_redraw() if content is None: return elif len(content) == 0: self.footer.draw_message("Status must contain content", Color.RED) return self.footer.draw_message("Submitting reply...", Color.YELLOW) response = api.post_status(app, user, content, spoiler_text=cw, sensitive=cw is not None, in_reply_to_id=status['id']) status = parse_status(response) self.statuses.insert(0, status) self.selected += 1 self.left.draw_statuses(self.statuses, self.selected) self.footer.draw_message("✓ Reply posted", Color.GREEN)
Reblog or unreblog selected status.
def toggle_reblog(self): """Reblog or unreblog selected status.""" status = self.get_selected_status() assert status app, user = self.app, self.user if not app or not user: self.footer.draw_message("You must be logged in to reblog", Color.RED) return status_id = status['id'] if status['reblogged']: status['reblogged'] = False self.footer.draw_message("Unboosting status...", Color.YELLOW) api.unreblog(app, user, status_id) self.footer.draw_message("✓ Status unboosted", Color.GREEN) else: status['reblogged'] = True self.footer.draw_message("Boosting status...", Color.YELLOW) api.reblog(app, user, status_id) self.footer.draw_message("✓ Status boosted", Color.GREEN) self.right.draw(status)
Favourite or unfavourite selected status.
def toggle_favourite(self): """Favourite or unfavourite selected status.""" status = self.get_selected_status() assert status app, user = self.app, self.user if not app or not user: self.footer.draw_message("You must be logged in to favourite", Color.RED) return status_id = status['id'] if status['favourited']: self.footer.draw_message("Undoing favourite status...", Color.YELLOW) api.unfavourite(app, user, status_id) self.footer.draw_message("✓ Status unfavourited", Color.GREEN) else: self.footer.draw_message("Favourite status...", Color.YELLOW) api.favourite(app, user, status_id) self.footer.draw_message("✓ Status favourited", Color.GREEN) status['favourited'] = not status['favourited'] self.right.draw(status)
Move to the previous status in the timeline.
def select_previous(self): """Move to the previous status in the timeline.""" self.footer.clear_message() if self.selected == 0: self.footer.draw_message("Cannot move beyond first toot.", Color.GREEN) return old_index = self.selected new_index = self.selected - 1 self.selected = new_index self.redraw_after_selection_change(old_index, new_index)
Move to the next status in the timeline.
def select_next(self): """Move to the next status in the timeline.""" self.footer.clear_message() old_index = self.selected new_index = self.selected + 1 # Load more statuses if no more are available if self.selected + 1 >= len(self.statuses): self.fetch_next() self.left.draw_statuses(self.statuses, self.selected, new_index - 1) self.draw_footer_status() self.selected = new_index self.redraw_after_selection_change(old_index, new_index)
Perform a full redraw of the UI.
def full_redraw(self): """Perform a full redraw of the UI.""" self.left.draw_statuses(self.statuses, self.selected) self.right.draw(self.get_selected_status()) self.header.draw(self.user) self.draw_footer_status()
Get the bottom-right corner of some text as would be drawn by draw_lines
def size_as_drawn(lines, screen_width): """Get the bottom-right corner of some text as would be drawn by draw_lines""" y = 0 x = 0 for line in lines: wrapped = list(wc_wrap(line, screen_width)) if len(wrapped) > 0: for wrapped_line in wrapped: x = len(wrapped_line) y += 1 else: x = 0 y += 1 return y - 1, x - 1 if x != 0 else 0
For a given account name, returns the Account object. Raises an exception if not found.
def _find_account(app, user, account_name): """For a given account name, returns the Account object. Raises an exception if not found. """ if not account_name: raise ConsoleError("Empty account name given") accounts = api.search_accounts(app, user, account_name) if account_name[0] == "@": account_name = account_name[1:] for account in accounts: if account['acct'] == account_name: return account raise ConsoleError("Account not found")
When using broser login, username was not stored so look it up
def add_username(user, apps): """When using broser login, username was not stored so look it up""" if not user: return None apps = [a for a in apps if a.instance == user.instance] if not apps: return None from toot.api import verify_credentials creds = verify_credentials(apps.pop(), user) return User(user.instance, creds['username'], user.access_token)
Converts html to text, strips all tags.
def get_text(html): """Converts html to text, strips all tags.""" # Ignore warnings made by BeautifulSoup, if passed something that looks like # a file (e.g. a dot which matches current dict), it will warn that the file # should be opened instead of passing a filename. with warnings.catch_warnings(): warnings.simplefilter("ignore") text = BeautifulSoup(html.replace('&apos;', "'"), "html.parser").get_text() return unicodedata.normalize('NFKC', text)
Attempt to convert html to plain text while keeping line breaks. Returns a list of paragraphs, each being a list of lines.
def parse_html(html): """Attempt to convert html to plain text while keeping line breaks. Returns a list of paragraphs, each being a list of lines. """ paragraphs = re.split("</?p[^>]*>", html) # Convert <br>s to line breaks and remove empty paragraphs paragraphs = [re.split("<br */?>", p) for p in paragraphs if p] # Convert each line in each paragraph to plain text: return [[get_text(l) for l in p] for p in paragraphs]
Given a Status contents in HTML, converts it into lines of plain text. Returns a generator yielding lines of content.
def format_content(content): """Given a Status contents in HTML, converts it into lines of plain text. Returns a generator yielding lines of content. """ paragraphs = parse_html(content) first = True for paragraph in paragraphs: if not first: yield "" for line in paragraph: yield line first = False
Lets user input multiple lines of text, terminated by EOF.
def multiline_input(): """Lets user input multiple lines of text, terminated by EOF.""" lines = [] while True: try: lines.append(input()) except EOFError: break return "\n".join(lines).strip()
Converts the table to a dict.
def to_dict(self): """Converts the table to a dict.""" return {"name": self.table_name, "kind": self.table_kind, "data": [r.to_dict() for r in self]}
Converts a string to a datetime.
def to_datetime(value): """Converts a string to a datetime.""" if value is None: return None if isinstance(value, six.integer_types): return parser.parse(value) return parser.isoparse(value)
Converts a string to a timedelta.
def to_timedelta(value): """Converts a string to a timedelta.""" if value is None: return None if isinstance(value, (six.integer_types, float)): return timedelta(microseconds=(float(value) / 10)) match = _TIMESPAN_PATTERN.match(value) if match: if match.group(1) == "-": factor = -1 else: factor = 1 return factor * timedelta( days=int(match.group("d") or 0), hours=int(match.group("h")), minutes=int(match.group("m")), seconds=float(match.group("s")), ) else: raise ValueError("Timespan value '{}' cannot be decoded".format(value))
Enqueuing an ingest command from local files. :param pandas.DataFrame df: input dataframe to ingest. :param azure.kusto.ingest.IngestionProperties ingestion_properties: Ingestion properties.
def ingest_from_dataframe(self, df, ingestion_properties): """Enqueuing an ingest command from local files. :param pandas.DataFrame df: input dataframe to ingest. :param azure.kusto.ingest.IngestionProperties ingestion_properties: Ingestion properties. """ from pandas import DataFrame if not isinstance(df, DataFrame): raise ValueError("Expected DataFrame instance, found {}".format(type(df))) file_name = "df_{timestamp}_{pid}.csv.gz".format(timestamp=int(time.time()), pid=os.getpid()) temp_file_path = os.path.join(tempfile.gettempdir(), file_name) df.to_csv(temp_file_path, index=False, encoding="utf-8", header=False, compression="gzip") fd = FileDescriptor(temp_file_path) blob_name = "{db}__{table}__{guid}__{file}".format( db=ingestion_properties.database, table=ingestion_properties.table, guid=uuid.uuid4(), file=file_name ) containers = self._resource_manager.get_containers() container_details = random.choice(containers) storage_client = CloudStorageAccount(container_details.storage_account_name, sas_token=container_details.sas) blob_service = storage_client.create_block_blob_service() blob_service.create_blob_from_path( container_name=container_details.object_name, blob_name=blob_name, file_path=temp_file_path ) url = blob_service.make_blob_url(container_details.object_name, blob_name, sas_token=container_details.sas) self.ingest_from_blob(BlobDescriptor(url, fd.size), ingestion_properties=ingestion_properties) fd.delete_files() os.unlink(temp_file_path)
Enqueuing an ingest command from local files. :param file_descriptor: a FileDescriptor to be ingested. :param azure.kusto.ingest.IngestionProperties ingestion_properties: Ingestion properties.
def ingest_from_file(self, file_descriptor, ingestion_properties): """Enqueuing an ingest command from local files. :param file_descriptor: a FileDescriptor to be ingested. :param azure.kusto.ingest.IngestionProperties ingestion_properties: Ingestion properties. """ file_descriptors = list() containers = self._resource_manager.get_containers() if isinstance(file_descriptor, FileDescriptor): descriptor = file_descriptor else: descriptor = FileDescriptor(file_descriptor) file_descriptors.append(descriptor) blob_name = "{db}__{table}__{guid}__{file}".format( db=ingestion_properties.database, table=ingestion_properties.table, guid=uuid.uuid4(), file=descriptor.stream_name, ) container_details = random.choice(containers) storage_client = CloudStorageAccount(container_details.storage_account_name, sas_token=container_details.sas) blob_service = storage_client.create_block_blob_service() blob_service.create_blob_from_stream( container_name=container_details.object_name, blob_name=blob_name, stream=descriptor.zipped_stream ) url = blob_service.make_blob_url(container_details.object_name, blob_name, sas_token=container_details.sas) self.ingest_from_blob( BlobDescriptor(url, descriptor.size, descriptor.source_id), ingestion_properties=ingestion_properties )
Enqueuing an ingest command from azure blobs. :param azure.kusto.ingest.BlobDescriptor blob_descriptor: An object that contains a description of the blob to be ingested. :param azure.kusto.ingest.IngestionProperties ingestion_properties: Ingestion properties.
def ingest_from_blob(self, blob_descriptor, ingestion_properties): """Enqueuing an ingest command from azure blobs. :param azure.kusto.ingest.BlobDescriptor blob_descriptor: An object that contains a description of the blob to be ingested. :param azure.kusto.ingest.IngestionProperties ingestion_properties: Ingestion properties. """ queues = self._resource_manager.get_ingestion_queues() queue_details = random.choice(queues) storage_client = CloudStorageAccount(queue_details.storage_account_name, sas_token=queue_details.sas) queue_service = storage_client.create_queue_service() authorization_context = self._resource_manager.get_authorization_context() ingestion_blob_info = _IngestionBlobInfo( blob_descriptor, ingestion_properties=ingestion_properties, auth_context=authorization_context ) ingestion_blob_info_json = ingestion_blob_info.to_json() encoded = base64.b64encode(ingestion_blob_info_json.encode("utf-8")).decode("utf-8") queue_service.put_message(queue_name=queue_details.object_name, content=encoded)
Converts Kusto tables into pandas DataFrame. :param azure.kusto.data._models.KustoResultTable table: Table received from the response. :return: pandas DataFrame. :rtype: pandas.DataFrame
def dataframe_from_result_table(table): import pandas as pd from ._models import KustoResultTable from dateutil.tz import UTC """Converts Kusto tables into pandas DataFrame. :param azure.kusto.data._models.KustoResultTable table: Table received from the response. :return: pandas DataFrame. :rtype: pandas.DataFrame """ if not table: raise ValueError() if not isinstance(table, KustoResultTable): raise TypeError("Expected KustoResultTable got {}".format(type(table).__name__)) columns = [col.column_name for col in table.columns] frame = pd.DataFrame(table._rows, columns=columns) # fix types for col in table.columns: if col.column_type == "bool": frame[col.column_name] = frame[col.column_name].astype(bool) return frame
Converts array to a json string
def _convert_list_to_json(array): """ Converts array to a json string """ return json.dumps(array, skipkeys=False, allow_nan=False, indent=None, separators=(",", ":"))
Converts array to a json string
def _convert_dict_to_json(array): """ Converts array to a json string """ return json.dumps( array, skipkeys=False, allow_nan=False, indent=None, separators=(",", ":"), sort_keys=True, default=lambda o: o.__dict__, )
Assumed called on Travis, to prepare a package to be deployed This method prints on stdout for Travis. Return is obj to pass to sys.exit() directly
def travis_build_package(): """Assumed called on Travis, to prepare a package to be deployed This method prints on stdout for Travis. Return is obj to pass to sys.exit() directly """ travis_tag = os.environ.get("TRAVIS_TAG") if not travis_tag: print("TRAVIS_TAG environment variable is not present") return "TRAVIS_TAG environment variable is not present" try: version = Version(travis_tag) except InvalidVersion: failure = "Version must be a valid PEP440 version (version is: {})".format(version) print(failure) return failure abs_dist_path = Path(os.environ["TRAVIS_BUILD_DIR"], "dist") [create_package(package, text_type(abs_dist_path)) for package in package_list] print("Produced:\n{}".format(list(abs_dist_path.glob("*")))) pattern = "*{}*".format(version) packages = list(abs_dist_path.glob(pattern)) if not packages: return "Package version does not match tag {}, abort".format(version) pypi_server = os.environ.get("PYPI_SERVER", "default PyPI server") print("Package created as expected and will be pushed to {}".format(pypi_server))
Acquire tokens from AAD.
def acquire_authorization_header(self): """Acquire tokens from AAD.""" try: return self._acquire_authorization_header() except AdalError as error: if self._authentication_method is AuthenticationMethod.aad_username_password: kwargs = {"username": self._username, "client_id": self._client_id} elif self._authentication_method is AuthenticationMethod.aad_application_key: kwargs = {"client_id": self._client_id} elif self._authentication_method is AuthenticationMethod.aad_device_login: kwargs = {"client_id": self._client_id} elif self._authentication_method is AuthenticationMethod.aad_application_certificate: kwargs = {"client_id": self._client_id, "thumbprint": self._thumbprint} else: raise error kwargs["resource"] = self._kusto_cluster kwargs["authority"] = self._adal_context.authority.url raise KustoAuthenticationError(self._authentication_method.value, error, **kwargs)
Creates a KustoConnection string builder that will authenticate with AAD user name and password. :param str connection_string: Kusto connection string should by of the format: https://<clusterName>.kusto.windows.net :param str user_id: AAD user ID. :param str password: Corresponding password of the AAD user. :param str authority_id: optional param. defaults to "common"
def with_aad_user_password_authentication(cls, connection_string, user_id, password, authority_id="common"): """Creates a KustoConnection string builder that will authenticate with AAD user name and password. :param str connection_string: Kusto connection string should by of the format: https://<clusterName>.kusto.windows.net :param str user_id: AAD user ID. :param str password: Corresponding password of the AAD user. :param str authority_id: optional param. defaults to "common" """ _assert_value_is_valid(user_id) _assert_value_is_valid(password) kcsb = cls(connection_string) kcsb[kcsb.ValidKeywords.aad_federated_security] = True kcsb[kcsb.ValidKeywords.aad_user_id] = user_id kcsb[kcsb.ValidKeywords.password] = password kcsb[kcsb.ValidKeywords.authority_id] = authority_id return kcsb