text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Generates keyword arguments for the Docker client to create a volume. <END_TASK> <USER_TASK:> Description: def get_volume_create_kwargs(self, action, volume_name, kwargs=None): """ Generates keyword arguments for the Docker client to create a volume. :param action: Action configuration. :type action: ActionConfig :param volume_name: Volume name. :type volume_name: unicode | str :param kwargs: Additional keyword arguments to complement or override the configuration-based values. :type kwargs: dict :return: Resulting keyword arguments. :rtype: dict """
config = action.config c_kwargs = dict(name=volume_name) if config: c_kwargs['driver'] = config.driver driver_opts = init_options(config.driver_options) if driver_opts: c_kwargs['driver_opts'] = {option_name: resolve_value(option_value) for option_name, option_value in iteritems(driver_opts)} update_kwargs(c_kwargs, init_options(config.create_options), kwargs) else: update_kwargs(c_kwargs, kwargs) return c_kwargs
<SYSTEM_TASK:> Generates keyword arguments for the Docker client to remove a volume. <END_TASK> <USER_TASK:> Description: def get_volume_remove_kwargs(self, action, volume_name, kwargs=None): """ Generates keyword arguments for the Docker client to remove a volume. :param action: Action configuration. :type action: ActionConfig :param volume_name: Volume name. :type volume_name: unicode | str :param kwargs: Additional keyword arguments to complement or override the configuration-based values. :type kwargs: dict :return: Resulting keyword arguments. :rtype: dict """
c_kwargs = dict(name=volume_name) update_kwargs(c_kwargs, kwargs) return c_kwargs
<SYSTEM_TASK:> Generates a container name that should be used for creating new containers and checking the status of existing <END_TASK> <USER_TASK:> Description: def cname(cls, map_name, container, instance=None): """ Generates a container name that should be used for creating new containers and checking the status of existing containers. In this implementation, the format will be ``<map name>.<container name>.<instance>``. If no instance is provided, it is just ``<map name>.<container name>``. :param map_name: Container map name. :type map_name: unicode | str :param container: Container configuration name. :type container: unicode | str :param instance: Instance name (optional). :type instance: unicode | str :return: Container name. :rtype: unicode | str """
if instance: return '{0}.{1}.{2}'.format(map_name, container, instance) return '{0}.{1}'.format(map_name, container)
<SYSTEM_TASK:> Generates a container name that should be used for creating new attached volume containers and checking the <END_TASK> <USER_TASK:> Description: def aname(cls, map_name, attached_name, parent_name=None): """ Generates a container name that should be used for creating new attached volume containers and checking the status of existing containers. In this implementation, the format will be ``<map name>.<attached>``, or ``<map name>.<parent name>.<attached>`` if the parent container configuration name is provided. :param map_name: Container map name. :type map_name: unicode | str :param attached_name: Attached container alias. :type attached_name: unicode | str :param parent_name: Container configuration name that has contains attached container. :type parent_name: unicode | str :return: Container name. :rtype: unicode | str """
if parent_name: return '{0}.{1}.{2}'.format(map_name, parent_name, attached_name) return '{0}.{1}'.format(map_name, attached_name)
<SYSTEM_TASK:> Generates a network name that should be used for creating new networks and checking the status of existing <END_TASK> <USER_TASK:> Description: def nname(cls, map_name, network_name): """ Generates a network name that should be used for creating new networks and checking the status of existing networks on the client. In this implementation, the format will be ``<map name>.<network name>``. :param map_name: Container map name. :type map_name: unicode | str :param network_name: Network configuration name. :type network_name: unicode | str :return: Network name. :rtype: unicode | str """
if network_name in DEFAULT_PRESET_NETWORKS: return network_name return '{0}.{1}'.format(map_name, network_name)
<SYSTEM_TASK:> Determines the host name of a container. In this implementation, replaces all dots and underscores of a <END_TASK> <USER_TASK:> Description: def get_hostname(cls, container_name, client_name=None): """ Determines the host name of a container. In this implementation, replaces all dots and underscores of a container name with a dash; then attaches another dash with the client name, unless there is just one default client. :param container_name: Name of the container. :type container_name: unicode | str :param client_name: Name of the client configuration, where applicable. :type client_name: unicode | str :return: Host name. :rtype: unicode | str """
base_name = container_name for old, new in cls.hostname_replace: base_name = base_name.replace(old, new) if not client_name or client_name == cls.default_client_name: return base_name client_suffix = client_name for old, new in cls.hostname_replace: client_suffix = client_suffix.replace(old, new) return '{0}-{1}'.format(base_name, client_suffix)
<SYSTEM_TASK:> Formats an ``adduser`` command. <END_TASK> <USER_TASK:> Description: def adduser(username, uid=None, system=False, no_login=True, no_password=False, group=False, gecos=None, **kwargs): """ Formats an ``adduser`` command. :param username: User name. :type username: unicode | str :param uid: Optional user id to use. :type uid: long | int :param system: Create a system user account. :type system: bool :param no_login: Disable the login for this user. Not compatible with CentOS. Implies setting '--no-create-home', and ``no_password``. :type no_login: bool :param no_password: Disable the password for this user. Not compatible with CentOS. :type no_password: bool :param group: Create a group along with the user. Not compatible with CentOS. :type group: bool :param gecos: Set GECOS information in order to suppress an interactive prompt. On CentOS, use ``__comment`` instead. :type gecos: unicode | str :param kwargs: Additional keyword arguments which are converted to the command line. :return: A formatted ``adduser`` command with arguments. :rtype: unicode | str """
return _format_cmd('adduser', username, __system=bool(system), __uid=uid, __group=bool(group), __gid=uid, no_login=(no_login, _NO_CREATE_HOME, _NO_LOGIN), __disabled_password=no_login or bool(no_password), __gecos=gecos, **kwargs)
<SYSTEM_TASK:> Generates a unix command line for creating a directory. <END_TASK> <USER_TASK:> Description: def mkdir(path, create_parent=True, check_if_exists=False): """ Generates a unix command line for creating a directory. :param path: Directory path. :type path: unicode | str :param create_parent: Create parent directories, if necessary. Default is ``True``. :type create_parent: bool :param check_if_exists: Prepend a check if the directory exists; in that case, the command is not run. Default is ``False``. :type check_if_exists: bool :return: Unix shell command line. :rtype: unicode | str """
cmd = _format_cmd('mkdir', path, _p=create_parent) if check_if_exists: return 'if [[ ! -d {0} ]]; then {1}; fi'.format(path, cmd) return cmd
<SYSTEM_TASK:> Create translation serializer dynamically. <END_TASK> <USER_TASK:> Description: def bind(self, field_name, parent): """ Create translation serializer dynamically. Takes translatable model class (shared_model) from parent serializer and it may create a serializer class on the fly if no custom class was specified. """
super(TranslatedFieldsField, self).bind(field_name, parent) # Expect 1-on-1 for now. Allow using source as alias, # but it should not be a dotted path for now related_name = self.source or field_name # This could all be done in __init__(), but by moving the code here, # it's possible to auto-detect the parent model. if self.shared_model is not None and self.serializer_class is not None: return # Fill in the blanks if self.serializer_class is None: if self.shared_model is None: # Auto detect parent model from .serializers import TranslatableModelSerializer if not isinstance(parent, TranslatableModelSerializer): raise TypeError("Expected 'TranslatableModelSerializer' as serializer base class") if not issubclass(parent.Meta.model, TranslatableModel): raise TypeError("Expected 'TranslatableModel' for the parent model") self.shared_model = parent.Meta.model # Create serializer based on shared model. translated_model = self.shared_model._parler_meta[related_name] self.serializer_class = create_translated_fields_serializer( self.shared_model, related_name=related_name, meta={'fields': translated_model.get_translated_fields()} ) else: if not issubclass(self.serializer_class.Meta.model, TranslatedFieldsModel): raise TypeError("Expected 'TranslatedFieldsModel' for the serializer model")
<SYSTEM_TASK:> Serialize translated fields. <END_TASK> <USER_TASK:> Description: def to_representation(self, value): """ Serialize translated fields. Simply iterate over available translations and, for each language, delegate serialization logic to the translation model serializer. Output languages can be selected by passing a list of language codes, `languages`, within the serialization context. """
if value is None: return # Only need one serializer to create the native objects serializer = self.serializer_class( instance=self.parent.instance, # Typically None context=self.context, partial=self.parent.partial ) # Don't need to have a 'language_code', it will be split up already, # so this should avoid redundant output. if 'language_code' in serializer.fields: raise ImproperlyConfigured("Serializer may not have a 'language_code' field") translations = value.all() # value = translations related manager languages = self.context.get('languages') if languages: translations = translations.filter(language_code__in=languages) # Split into a dictionary per language result = OrderedDict() for translation in translations: result[translation.language_code] = serializer.to_representation(translation) return result
<SYSTEM_TASK:> Deserialize data from translations fields. <END_TASK> <USER_TASK:> Description: def to_internal_value(self, data): """ Deserialize data from translations fields. For each received language, delegate validation logic to the translation model serializer. """
if data is None: return if not isinstance(data, dict): self.fail('invalid') if not self.allow_empty and len(data) == 0: self.fail('empty') result, errors = {}, {} for lang_code, model_fields in data.items(): serializer = self.serializer_class(data=model_fields) if serializer.is_valid(): result[lang_code] = serializer.validated_data else: errors[lang_code] = serializer.errors if errors: raise serializers.ValidationError(errors) return result
<SYSTEM_TASK:> Parsing passed text to json. <END_TASK> <USER_TASK:> Description: def parse(self, text, layers=None): """Parsing passed text to json. Args: text: Text to parse. layers (optional): Special fields. Only one string or iterable object (e.g "Data", ("Data", "Fio")). Only these fields will be returned. Returns: The parsed text into a json object. """
params = { "text": text, "key": self.key, } if layers is not None: # if it's string if isinstance(layers, six.string_types): params["layers"] = layers # if it's another iterable object elif isinstance(layers, collections.Iterable): params["layers"] = ",".join(layers) req = requests.get(self.NLU_URL, params=params) return req.json()
<SYSTEM_TASK:> Try to get the generated file. <END_TASK> <USER_TASK:> Description: def generate(self, text): """Try to get the generated file. Args: text: The text that you want to generate. """
if not text: raise Exception("No text to speak") if len(text) >= self.MAX_CHARS: raise Exception("Number of characters must be less than 2000") params = self.__params.copy() params["text"] = text self._data = requests.get(self.TTS_URL, params=params, stream=False).iter_content()
<SYSTEM_TASK:> Save data in file. <END_TASK> <USER_TASK:> Description: def save(self, path="speech"): """Save data in file. Args: path (optional): A path to save file. Defaults to "speech". File extension is optional. Absolute path is allowed. Returns: The path to the saved file. """
if self._data is None: raise Exception("There's nothing to save") extension = "." + self.__params["format"] if os.path.splitext(path)[1] != extension: path += extension with open(path, "wb") as f: for d in self._data: f.write(d) return path
<SYSTEM_TASK:> Create a Rest Framework serializer class for a translated fields model. <END_TASK> <USER_TASK:> Description: def create_translated_fields_serializer(shared_model, meta=None, related_name=None, **fields): """ Create a Rest Framework serializer class for a translated fields model. :param shared_model: The shared model. :type shared_model: :class:`parler.models.TranslatableModel` """
if not related_name: translated_model = shared_model._parler_meta.root_model else: translated_model = shared_model._parler_meta[related_name].model # Define inner Meta class if not meta: meta = {} meta['model'] = translated_model meta.setdefault('fields', ['language_code'] + translated_model.get_translated_fields()) # Define serialize class attributes attrs = {} attrs.update(fields) attrs['Meta'] = type('Meta', (), meta) # Dynamically create the serializer class return type('{0}Serializer'.format(translated_model.__name__), (serializers.ModelSerializer,), attrs)
<SYSTEM_TASK:> Extract the translations and save them after main object save. <END_TASK> <USER_TASK:> Description: def save(self, **kwargs): """ Extract the translations and save them after main object save. By default all translations will be saved no matter if creating or updating an object. Users with more complex needs might define their own save and handle translation saving themselves. """
translated_data = self._pop_translated_data() instance = super(TranslatableModelSerializer, self).save(**kwargs) self.save_translations(instance, translated_data) return instance
<SYSTEM_TASK:> Separate data of translated fields from other data. <END_TASK> <USER_TASK:> Description: def _pop_translated_data(self): """ Separate data of translated fields from other data. """
translated_data = {} for meta in self.Meta.model._parler_meta: translations = self.validated_data.pop(meta.rel_name, {}) if translations: translated_data[meta.rel_name] = translations return translated_data
<SYSTEM_TASK:> Save translation data into translation objects. <END_TASK> <USER_TASK:> Description: def save_translations(self, instance, translated_data): """ Save translation data into translation objects. """
for meta in self.Meta.model._parler_meta: translations = translated_data.get(meta.rel_name, {}) for lang_code, model_fields in translations.items(): translation = instance._get_translated_model(lang_code, auto_create=True, meta=meta) for field, value in model_fields.items(): setattr(translation, field, value) # Go through the same hooks as the regular model, # instead of calling translation.save() directly. instance.save_translations()
<SYSTEM_TASK:> Try to load the given conf file. <END_TASK> <USER_TASK:> Description: def load_conf(cfg_path): """ Try to load the given conf file. """
global config try: cfg = open(cfg_path, 'r') except Exception as ex: if verbose: print("Unable to open {0}".format(cfg_path)) print(str(ex)) return False # Read the entire contents of the conf file cfg_json = cfg.read() cfg.close() # print(cfg_json) # Try to parse the conf file into a Python structure try: config = json.loads(cfg_json) except Exception as ex: print("Unable to parse configuration file as JSON") print(str(ex)) return False # This config was successfully loaded return True
<SYSTEM_TASK:> Translates alias references to their defined values. <END_TASK> <USER_TASK:> Description: def translate_message_tokens(message_tokens): """ Translates alias references to their defined values. The first token is a channel alias. The remaining tokens are value aliases. """
trans_tokens = [] if message_tokens[0] in cv_dict[channels_key]: trans_tokens.append(cv_dict[channels_key][message_tokens[0]]) else: trans_tokens.append(int(message_tokens[0])) for token in message_tokens[1:]: if token in cv_dict[values_key]: trans_tokens.extend(cv_dict[values_key][token]) else: trans_tokens.append(int(token)) return trans_tokens
<SYSTEM_TASK:> Parse message string to response object. <END_TASK> <USER_TASK:> Description: def parse(cls, msg): """Parse message string to response object."""
lines = msg.splitlines() version, status_code, reason = lines[0].split() headers = cls.parse_headers('\r\n'.join(lines[1:])) return cls(version=version, status_code=status_code, reason=reason, headers=headers)
<SYSTEM_TASK:> Send request to a given address via given transport. <END_TASK> <USER_TASK:> Description: def sendto(self, transport, addr): """ Send request to a given address via given transport. Args: transport (asyncio.DatagramTransport): Write transport to send the message on. addr (Tuple[str, int]): IP address and port pair to send the message to. """
msg = bytes(self) + b'\r\n' logger.debug("%s:%s < %s", *(addr + (self,))) transport.sendto(msg, addr)
<SYSTEM_TASK:> Send a set of RGB values to the light <END_TASK> <USER_TASK:> Description: def send_rgb(dev, red, green, blue, dimmer): """ Send a set of RGB values to the light """
cv = [0 for v in range(0, 512)] cv[0] = red cv[1] = green cv[2] = blue cv[6] = dimmer sent = dev.send_multi_value(1, cv) return sent
<SYSTEM_TASK:> How to control a DMX light through an Anyma USB controller <END_TASK> <USER_TASK:> Description: def main(): """ How to control a DMX light through an Anyma USB controller """
# Channel value list for channels 1-512 cv = [0 for v in range(0, 512)] # Create an instance of the DMX controller and open it print("Opening DMX controller...") dev = pyudmx.uDMXDevice() # This will automagically find a single Anyma-type USB DMX controller dev.open() # For informational purpose, display what we know about the DMX controller print(dev.Device) # Send messages to the light changing it to red, then green, then blue # This is the "hard way" to do it, but illustrates how it's done print("Setting to red...") cv[0] = 255 # red cv[6] = 128 # dimmer to half value sent = dev.send_multi_value(1, cv) print("Set to red") sleep(3.0) print("Setting to green...") cv[0] = 0 # red cv[1] = 255 # green cv[6] = 128 # dimmer to half value sent = dev.send_multi_value(1, cv) print("Set to green") sleep(3.0) print("Setting to blue...") cv[0] = 0 # red cv[1] = 0 # green cv[2] = 255 # blue cv[6] = 128 # dimmer to half value sent = dev.send_multi_value(1, cv) print("Set to blue") sleep(3.0) # Here's an easier way to do it print("And, again the easier way") send_rgb(dev, 255, 0, 0, 128) sleep(3.0) send_rgb(dev, 0, 255, 0, 128) sleep(3.0) send_rgb(dev, 0, 0, 255, 128) sleep(3.0) print("Reset all channels and close..") # Turns the light off cv = [0 for v in range(0, 512)] dev.send_multi_value(1, cv) dev.close()
<SYSTEM_TASK:> Connect to vCenter server <END_TASK> <USER_TASK:> Description: def connect(self): """Connect to vCenter server"""
try: context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2) if self.config['no_ssl_verify']: requests.packages.urllib3.disable_warnings() context.verify_mode = ssl.CERT_NONE self.si = SmartConnectNoSSL( host=self.config['server'], user=self.config['username'], pwd=self.config['password'], port=int(self.config['port']), certFile=None, keyFile=None, ) else: self.si = SmartConnect( host=self.config['server'], user=self.config['username'], pwd=self.config['password'], port=int(self.config['port']), sslContext=context, certFile=None, keyFile=None, ) except Exception as e: print('Unable to connect to vsphere server.') print(e) sys.exit(1) # add a clean up routine atexit.register(Disconnect, self.si) self.content = self.si.RetrieveContent()
<SYSTEM_TASK:> Shutdown guest <END_TASK> <USER_TASK:> Description: def shutdown(self): """ Shutdown guest fallback to power off if guest tools aren't installed """
vm = self.get_vm_failfast(self.config['name']) if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOff: print("%s already poweredOff" % vm.name) else: if self.guestToolsRunning(vm): timeout_minutes = 10 print("waiting for %s to shutdown " "(%s minutes before forced powerOff)" % ( vm.name, str(timeout_minutes) )) vm.ShutdownGuest() if self.WaitForVirtualMachineShutdown(vm, timeout_minutes * 60): print("shutdown complete") print("%s poweredOff" % vm.name) else: print("%s has not shutdown after %s minutes:" "will powerOff" % (vm.name, str(timeout_minutes))) self.powerOff() else: print("GuestTools not running or not installed: will powerOff") self.powerOff()
<SYSTEM_TASK:> Find a resource pool given a pool name for desired cluster <END_TASK> <USER_TASK:> Description: def get_resource_pool(self, cluster, pool_name): """ Find a resource pool given a pool name for desired cluster """
pool_obj = None # get a list of all resource pools in this cluster cluster_pools_list = cluster.resourcePool.resourcePool # get list of all resource pools with a given text name pool_selections = self.get_obj( [vim.ResourcePool], pool_name, return_all=True ) # get the first pool that exists in a given cluster if pool_selections: for p in pool_selections: if p in cluster_pools_list: pool_obj = p break return pool_obj
<SYSTEM_TASK:> Get the vsphere object associated with a given text name or MOID <END_TASK> <USER_TASK:> Description: def get_obj(self, vimtype, name, return_all=False, path=""): """Get the vsphere object associated with a given text name or MOID"""
obj = list() if path: obj_folder = self.content.searchIndex.FindByInventoryPath(path) container = self.content.viewManager.CreateContainerView( obj_folder, vimtype, True ) else: container = self.content.viewManager.CreateContainerView( self.content.rootFolder, vimtype, True) for c in container.view: if name in [c.name, c._GetMoId()]: if return_all is False: return c break else: obj.append(c) if len(obj) > 0: return obj else: # for backwards-compat return None
<SYSTEM_TASK:> Get a HostSystem object <END_TASK> <USER_TASK:> Description: def get_host_system_failfast( self, name, verbose=False, host_system_term='HS' ): """ Get a HostSystem object fail fast if the object isn't a valid reference """
if verbose: print("Finding HostSystem named %s..." % name) hs = self.get_host_system(name) if hs is None: print("Error: %s '%s' does not exist" % (host_system_term, name)) sys.exit(1) if verbose: print("Found HostSystem: {0} Name: {1}" % (hs, hs.name)) return hs
<SYSTEM_TASK:> Guest shutdown requests do not run a task we can wait for. <END_TASK> <USER_TASK:> Description: def WaitForVirtualMachineShutdown( self, vm_to_poll, timeout_seconds, sleep_period=5 ): """ Guest shutdown requests do not run a task we can wait for. So, we must poll and wait for status to be poweredOff. Returns True if shutdown, False if poll expired. """
seconds_waited = 0 # wait counter while seconds_waited < timeout_seconds: # sleep first, since nothing shuts down instantly seconds_waited += sleep_period time.sleep(sleep_period) vm = self.get_vm(vm_to_poll.name) if vm.runtime.powerState == \ vim.VirtualMachinePowerState.poweredOff: return True return False
<SYSTEM_TASK:> Get a cached post-processed result of a GitHub API call. Uses Trac cache <END_TASK> <USER_TASK:> Description: def data(self): """ Get a cached post-processed result of a GitHub API call. Uses Trac cache to avoid constant querying of the remote API. If a previous API call did not succeed, automatically retries after a timeout. """
if self._next_update and datetime.now() > self._next_update: self.update() return self._data
<SYSTEM_TASK:> Return a sequence of `GitHubTeam` objects, one for each team in this <END_TASK> <USER_TASK:> Description: def teams(self): """ Return a sequence of `GitHubTeam` objects, one for each team in this org. """
teams = self._teamlist.teams() # find out which teams have been added or removed since the last sync current_teams = set(self._teamobjects.keys()) new_teams = set(teams.keys()) # pylint: disable=no-member added = new_teams - current_teams removed = current_teams - new_teams for team in removed: del self._teamobjects[team] for team in added: self._teamobjects[team] = GitHubTeam( self._api, self._env, self._org, teams[team], team) # pylint: disable=unsubscriptable-object return self._teamobjects.values()
<SYSTEM_TASK:> Return a list of all users in this organization. Users are identified <END_TASK> <USER_TASK:> Description: def members(self): """ Return a list of all users in this organization. Users are identified by their login name. Note that this is computed from the teams in the organization, because GitHub does not currently offer a WebHook for organization membership, so converting org membership would lead to stale data. """
allmembers = set() for team in self.teams(): allmembers.update(team.members()) return sorted(allmembers)
<SYSTEM_TASK:> Trigger an update and cache invalidation for the team identified by the <END_TASK> <USER_TASK:> Description: def update_team(self, slug): """ Trigger an update and cache invalidation for the team identified by the given `slug`. Returns `True` on success, `False` otherwise. :param slug: The GitHub 'slug' that identifies the team in URLs """
if slug not in self._teamobjects: # This case is checked and handled further up, but better be safe # than sorry. return False # pragma: no cover return self._teamobjects[slug].update()
<SYSTEM_TASK:> Connect to the given GitHub API URL template by replacing all <END_TASK> <USER_TASK:> Description: def github_api(self, url, *args): """ Connect to the given GitHub API URL template by replacing all placeholders with the given parameters and return the decoded JSON result on success. On error, return `None`. :param url: The path to request from the GitHub API. Contains format string placeholders that will be replaced with all additional positional arguments. """
import requests import urllib github_api_url = os.environ.get("TRAC_GITHUB_API_URL", "https://api.github.com/") formatted_url = github_api_url + url.format(*(urllib.quote(str(x)) for x in args)) access_token = _config_secret(self.access_token) self.log.debug("Hitting GitHub API endpoint %s with user %s", formatted_url, self.username) # pylint: disable=no-member results = [] try: has_next = True while has_next: req = requests.get(formatted_url, auth=(self.username, access_token)) if req.status_code != 200: try: message = req.json()['message'] except Exception: # pylint: disable=broad-except message = req.text self.log.error("Error communicating with GitHub API at {}: {}".format( # pylint: disable=no-member formatted_url, message)) return None results.extend(req.json()) has_next = 'next' in req.links if has_next: formatted_url = req.links['next']['url'] except requests.exceptions.ConnectionError as rce: self.log.error("Exception while communicating with GitHub API at {}: {}".format( # pylint: disable=no-member formatted_url, rce)) return None return results
<SYSTEM_TASK:> Trigger update and cache invalidation for the team identified by the <END_TASK> <USER_TASK:> Description: def update_team(self, slug): """ Trigger update and cache invalidation for the team identified by the given `slug`, if any. Returns `True` if the update was successful, `False` otherwise. :param slug: GitHub 'slug' name for the team to be updated. """
if self._org: if not self._org.has_team(slug): return self._org.update() return self._org.update_team(slug) # self._org is created during Trac startup, so there should never # be a case where we try to update an org before it's created; this # is a sanity check only. return False
<SYSTEM_TASK:> Return a list of names of the groups that the user with the specified <END_TASK> <USER_TASK:> Description: def get_permission_groups(self, username): """ Return a list of names of the groups that the user with the specified name is a member of. Implements an `IPermissionGroupProvider` API. This specific implementation connects to GitHub with a dedicated user, fetches and caches the teams and their users configured at GitHub and converts the data into a format usable for easy access by username. """
if not self.organization or not self.username or not self.access_token: return [] elif (self.username_prefix and not username.startswith(self.username_prefix)): return [] data = self._fetch_groups() if not data: self.log.error("No cached groups from GitHub available") # pylint: disable=no-member return [] else: return data.get(username[len(self.username_prefix):], [])
<SYSTEM_TASK:> Return whether the handler wants to process the given request. <END_TASK> <USER_TASK:> Description: def match_request(self, req): """ Return whether the handler wants to process the given request. Implements an `IRequestHandler` API. """
match = self._request_re.match(req.path_info) if match: return True if os.environ.get('TRAC_GITHUB_ENABLE_DEBUGGING', None) is not None: debug_match = self._debug_request_re.match(req.path_info) if debug_match: return True
<SYSTEM_TASK:> Debgging helper used for testing, processes the given request and dumps <END_TASK> <USER_TASK:> Description: def process_debug_request(self, req): """ Debgging helper used for testing, processes the given request and dumps the internal state of cached user to group mappings. Note that this is only callable if TRAC_GITHUB_ENABLE_DEBUGGING is set in the environment. """
req.send(json.dumps(self._fetch_groups()).encode('utf-8'), 'application/json', 200)
<SYSTEM_TASK:> Process the given request `req`, implements an `IRequestHandler` API. <END_TASK> <USER_TASK:> Description: def process_request(self, req): """ Process the given request `req`, implements an `IRequestHandler` API. Normally, `process_request` would return a tuple, but since none of these requests will return an HTML page, they will all terminate without a return value and directly send a response. """
if os.environ.get('TRAC_GITHUB_ENABLE_DEBUGGING', None) is not None: debug_match = self._debug_request_re.match(req.path_info) if debug_match: self.process_debug_request(req) if req.method != 'POST': msg = u'Endpoint is ready to accept GitHub Organization membership notifications.\n' self.log.warning(u'Method not allowed (%s)', req.method) # pylint: disable=no-member req.send(msg.encode('utf-8'), 'text/plain', 405) event = req.get_header('X-GitHub-Event') supported_events = { 'ping': self._handle_ping_ev, 'membership': self._handle_membership_ev } # Check whether this event is supported if event not in supported_events: msg = u'Event type %s is not supported\n' % event self.log.warning(msg.rstrip('\n')) # pylint: disable=no-member req.send(msg.encode('utf-8'), 'text/plain', 400) # Verify the event's signature reqdata = req.read() signature = req.get_header('X-Hub-Signature') if not self._verify_webhook_signature(signature, reqdata): msg = u'Webhook signature verification failed\n' self.log.warning(msg.rstrip('\n')) # pylint: disable=no-member req.send(msg.encode('utf-8'), 'text/plain', 403) # Decode JSON and handle errors try: payload = json.loads(reqdata) except (ValueError, KeyError): msg = u'Invalid payload\n' self.log.warning(msg.rstrip('\n')) # pylint: disable=no-member req.send(msg.encode('utf-8'), 'text/plain', 400) # Handle the event try: supported_events[event](req, payload) except RequestDone: # Normal termination, bubble up raise except Exception: # pylint: disable=broad-except msg = (u'Exception occurred while handling payload, ' 'possible invalid payload\n%s' % traceback.format_exc()) self.log.warning(msg.rstrip('\n')) # pylint: disable=no-member req.send(msg.encode('utf-8'), 'text/plain', 500)
<SYSTEM_TASK:> Returns a map of the direct children of a piece of Content. Content can have multiple types of children - <END_TASK> <USER_TASK:> Description: def get_content_children(self, content_id, expand=None, parent_version=None, callback=None): """ Returns a map of the direct children of a piece of Content. Content can have multiple types of children - for example a Page can have children that are also Pages, but it can also have Comments and Attachments. The {@link ContentType}(s) of the children returned is specified by the "expand" query parameter in the request - this parameter can include expands for multiple child types. If no types are included in the expand parameter, the map returned will just list the child types that are available to be expanded for the {@link Content} referenced by the "content_id" parameter. :param content_id (string): A string containing the id of the content to retrieve children for. :param expand (string): OPTIONAL :A comma separated list of properties to expand on the children. Default: None. :param parent_version (int): OPTIONAL: An integer representing the version of the content to retrieve children for. Default: 0 (Latest) :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the content/{id}/child endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. """
params = {} if expand: params["expand"] = expand if parent_version: params["parentVersion"] = parent_version return self._service_get_request("rest/api/content/{id}/child".format(id=content_id), params=params, callback=callback)
<SYSTEM_TASK:> Returns a map of the descendants of a piece of Content. Content can have multiple types of descendants - <END_TASK> <USER_TASK:> Description: def get_content_descendants(self, content_id, expand=None, callback=None): """ Returns a map of the descendants of a piece of Content. Content can have multiple types of descendants - for example a Page can have descendants that are also Pages, but it can also have Comments and Attachments. The {@link ContentType}(s) of the descendants returned is specified by the "expand" query parameter in the request - this parameter can include expands for multiple descendant types. If no types are included in the expand parameter, the map returned will just list the descendant types that are available to be expanded for the {@link Content} referenced by the "content_id" parameter. :param content_id (string): A string containing the id of the content to retrieve descendants for. :param expand (string): OPTIONAL: A comma separated list of properties to expand on the descendants. Default: None. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the content/{id}/child/{type} endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. """
params = {} if expand: params["expand"] = expand return self._service_get_request("rest/api/content/{id}/descendant".format(id=content_id), params=params, callback=callback)
<SYSTEM_TASK:> Returns the direct descendants of a piece of Content, limited to a single descendant type. <END_TASK> <USER_TASK:> Description: def get_content_descendants_by_type(self, content_id, child_type, expand=None, start=None, limit=None, callback=None): """ Returns the direct descendants of a piece of Content, limited to a single descendant type. The {@link ContentType}(s) of the descendants returned is specified by the "type" path parameter in the request. Currently the only supported descendants are comment descendants of non-comment Content. :param content_id (string): A string containing the id of the content to retrieve descendants for :param child_type (string): A {@link ContentType} to filter descendants on. :param expand (string): OPTIONAL: A comma separated list of properties to expand on the descendants. Default: Empty :param start (int): OPTIONAL: The index of the first item within the result set that should be returned. Default: 0. :param limit (int): OPTIONAL: How many items should be returned after the start index. Default: 25 or site limit. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the content/{id}/descendant/{type} endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. """
params = {} if expand: params["expand"] = expand if start is not None: params["start"] = int(start) if limit is not None: params["limit"] = int(limit) return self._service_get_request("rest/api/content/{id}/descendant/{type}" "".format(id=content_id, type=child_type), params=params, callback=callback)
<SYSTEM_TASK:> Returns a paginated list of content properties. <END_TASK> <USER_TASK:> Description: def get_content_properties(self, content_id, expand=None, start=None, limit=None, callback=None): """ Returns a paginated list of content properties. Content properties are a key / value store of properties attached to a piece of Content. The key is a string, and the value is a JSON-serializable object. :param content_id (string): A string containing the id of the property content container. :param expand (string): OPTIONAL: A comma separated list of properties to expand on the content properties. Default: Empty. :param start (int): OPTIONAL: The start point of the collection to return. Default: None (0). :param limit (int): OPTIONAL: The limit of the number of items to return, this may be restricted by fixed system limits. Default: 10. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the content/{id}/property endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. """
params = {} if expand: params["expand"] = expand if start is not None: params["start"] = int(start) if limit is not None: params["limit"] = int(limit) return self._service_get_request("rest/api/content/{id}/property".format(id=content_id), params=params, callback=callback)
<SYSTEM_TASK:> Add one or more attachments to a Confluence Content entity, with optional comments. <END_TASK> <USER_TASK:> Description: def create_new_attachment_by_content_id(self, content_id, attachments, callback=None): """ Add one or more attachments to a Confluence Content entity, with optional comments. Comments are optional, but if included there must be as many comments as there are files, and the comments must be in the same order as the files. :param content_id (string): A string containing the id of the attachments content container. :param attachments (list of dicts or dict): This is a list of dictionaries or a dictionary. Each dictionary must have the key "file" with a value that is I/O like (file, StringIO, etc.), and may also have a key "comment" with a string for file comments. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the content/{id}/child/attachment endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. """
if isinstance(attachments, list): assert all(isinstance(at, dict) and "file" in list(at.keys()) for at in attachments) elif isinstance(attachments, dict): assert "file" in list(attachments.keys()) else: assert False return self._service_post_request("rest/api/content/{id}/child/attachment".format(id=content_id), headers={"X-Atlassian-Token": "nocheck"}, files=attachments, callback=callback)
<SYSTEM_TASK:> Creates a new Space. <END_TASK> <USER_TASK:> Description: def create_new_space(self, space_definition, callback=None): """ Creates a new Space. The incoming Space does not include an id, but must include a Key and Name, and should include a Description. :param space_definition (dict): The dictionary describing the new space. Must include keys "key", "name", and "description". :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the space endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. Example space data: { "key": "TST", "name": "Example space", "description": { "plain": { "value": "This is an example space", "representation": "plain" } } } """
assert isinstance(space_definition, dict) and {"key", "name", "description"} <= set(space_definition.keys()) return self._service_post_request("rest/api/space", data=json.dumps(space_definition), headers={"Content-Type": "application/json"}, callback=callback)
<SYSTEM_TASK:> Updates a piece of Content, or restores if it is trashed. <END_TASK> <USER_TASK:> Description: def update_content_by_id(self, content_data, content_id, callback=None): """ Updates a piece of Content, or restores if it is trashed. The body contains the representation of the content. Must include the new version number. To restore a piece of content that has the status of trashed the content must have it's version incremented, and status set to current. No other field modifications will be performed when restoring a piece of content from the trash. Request example to restore from trash: { "id": "557059", "status": "current", "version": { "number": 2 } } :param content_data (dict): The content data (with desired updates). This should be retrieved via the API call to get content data, then modified to desired state. Required keys are: "id", "type", "title", "space", "version", and "body". :param content_id (string): The id of the content to update. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the content/{id} endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. Example content data: { "id": "3604482", "type": "page", "title": "Example Content title", "space": { "key": "TST" }, "version": { "number": 2, "minorEdit": false }, "body": { "storage": { "value": "<p>This is the updated text for the new page</p>", "representation": "storage" } } } """
assert isinstance(content_data, dict) and set(content_data.keys()) >= self.UPDATE_CONTENT_REQUIRED_KEYS return self._service_put_request("rest/api/content/{id}".format(id=content_id), data=json.dumps(content_data), headers={"Content-Type": "application/json"}, callback=callback)
<SYSTEM_TASK:> Update the non-binary data of an Attachment. <END_TASK> <USER_TASK:> Description: def update_attachment_metadata(self, content_id, attachment_id, new_metadata, callback=None): """ Update the non-binary data of an Attachment. This resource can be used to update an attachment's filename, media-type, comment, and parent container. :param content_id (string): A string containing the ID of the attachments content container. :param attachment_id (string): The ID of the attachment to update. :param new_metadata (dict): The updated metadata for the attachment. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the content/{id}/child/attachment/{attachment_id} endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. Example attachment metadata: { "id": "att5678", "type": "attachment", "title": "new_file_name.txt", "version": { "number": 2, "minorEdit": false } } """
assert isinstance(new_metadata, dict) and set(new_metadata.keys()) >= self.ATTACHMENT_METADATA_KEYS return self._service_put_request("rest/api/content/{id}/child/attachment/{attachment_id}" "".format(id=content_id, attachment_id=attachment_id), data=json.dumps(new_metadata), headers={"Content-Type": "application/json"}, callback=callback)
<SYSTEM_TASK:> Update the binary data of an Attachment, and optionally the comment and the minor edit field. <END_TASK> <USER_TASK:> Description: def update_attachment(self, content_id, attachment_id, attachment, callback=None): """ Update the binary data of an Attachment, and optionally the comment and the minor edit field. This adds a new version of the attachment, containing the new binary data, filename, and content-type. When updating the binary data of an attachment, the comment related to it together with the field that specifies if it's a minor edit can be updated as well, but are not required. If an update is considered to be a minor edit, notifications will not be sent to the watchers of that content. :param content_id (string): A string containing the id of the attachments content container. :param attachment_id (string): The id of the attachment to upload a new file for. :param attachment (dict): The dictionary describing the attachment to upload. The dict must have a key "file", which has a value that is an I/O object (file, StringIO, etc.), and can also have a "comment" key describing the attachment, and a "minorEdit" key, which is a boolean used to flag that the changes to the attachment are not substantial. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the content/{content_id}/child/attachment/{attachment_id}/data endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. """
if isinstance(attachment, dict): assert "file" in list(attachment.keys()) else: assert False return self._service_post_request("rest/api/content/{content_id}/child/attachment/{attachment_id}/data" "".format(content_id=content_id, attachment_id=attachment_id), headers={"X-Atlassian-Token": "nocheck"}, files=attachment, callback=callback)
<SYSTEM_TASK:> Updates a content property. <END_TASK> <USER_TASK:> Description: def update_property(self, content_id, property_key, new_property_data, callback=None): """ Updates a content property. The body contains the representation of the content property. Must include the property id, and the new version number. Attempts to create a new content property if the given version number is 1, just like {@link #create(com.atlassian.confluence.api.model.content.id.ContentId, String, com.atlassian.confluence.api.model.content.JsonContentProperty)}. :param content_id (string): The ID for the content to attach the property to. :param property_key (string): The key for the property to update. :param new_property_data (dict): The updated property data. This requires the keys "key", "value", and "version". :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the content/{id}/property/{key} endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. Example updated property data: { "key": "example-property-key", "value": { "anything": "goes" }, "version": { "number": 2, "minorEdit": false } } """
assert isinstance(new_property_data, dict) and {"key", "value", "version"} <= set(new_property_data.keys()) return self._service_put_request("rest/api/content/{id}/property/{key}".format(id=content_id, key=property_key), data=json.dumps(new_property_data), headers={"Content-Type": "application/json"}, callback=callback)
<SYSTEM_TASK:> Updates a Space. <END_TASK> <USER_TASK:> Description: def update_space(self, space_key, space_definition, callback=None): """ Updates a Space. Currently only the Space name, description and homepage can be updated. :param space_key (string): The key of the space to update. :param space_definition (dict): The dictionary describing the updated space metadata. This should include "key", "name" and "description". :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the space/{key} endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. Example updated space definition: { "key": "TST", "name": "Example space", "description": { "plain": { "value": "This is an example space", "representation": "plain" } } } """
assert isinstance(space_definition, dict) and {"key", "name", "description"} <= set(space_definition.keys()) return self._service_put_request("rest/api/space/{key}".format(key=space_key), data=json.dumps(space_definition), headers={"Content-Type": "application/json"}, callback=callback)
<SYSTEM_TASK:> Converts between content body representations. <END_TASK> <USER_TASK:> Description: def convert_contentbody_to_new_type(self, content_data, old_representation, new_representation, callback=None): """ Converts between content body representations. Not all representations can be converted to/from other formats. Supported conversions: Source Representation | Destination Representation Supported -------------------------------------------------------------- "storage" | "view","export_view","editor" "editor" | "storage" "view" | None "export_view" | None :param content_data (string): The content data to transform. :param old_representation (string): The representation to convert from. :param new_representation (string): The representation to convert to. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the contentbody/convert/{to} endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. """
assert {old_representation, new_representation} < {"storage", "editor", "view", "export_view"} # TODO: Enforce conversion rules better here. request_data = {"value": str(content_data), "representation": old_representation} return self._service_post_request("rest/api/contentbody/convert/{to}".format(to=new_representation), data=json.dumps(request_data), headers={"Content-Type": "application/json"}, callback=callback)
<SYSTEM_TASK:> Deletes a labels to the specified content. <END_TASK> <USER_TASK:> Description: def delete_label_by_id(self, content_id, label_name, callback=None): """ Deletes a labels to the specified content. There is an alternative form of this delete method that is not implemented. A DELETE request to /rest/api/content/{id}/label/{label} will also delete a label, but is more limited in the label name that can be accepted (and has no real apparent upside). :param content_id (string): A string containing the id of the labels content container. :param label_name (string): OPTIONAL: The name of the label to be removed from the content. Default: Empty (probably deletes all labels). :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: Empty if successful, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. """
params = {"name": label_name} return self._service_delete_request("rest/api/content/{id}/label".format(id=content_id), params=params, callback=callback)
<SYSTEM_TASK:> Deletes a Space. <END_TASK> <USER_TASK:> Description: def delete_space(self, space_key, callback=None): """ Deletes a Space. The space is deleted in a long running task, so the space cannot be considered deleted when this method returns. Clients can follow the status link in the response and poll it until the task completes. :param space_key (string): The key of the space to delete. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: A pointer to the longpoll task if successful, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. """
return self._service_delete_request("rest/api/space/{key}".format(key=space_key), callback=callback)
<SYSTEM_TASK:> Add a sensor, warning if it exists. <END_TASK> <USER_TASK:> Description: def add(self, sensor): """Add a sensor, warning if it exists."""
if isinstance(sensor, (list, tuple)): for sss in sensor: self.add(sss) return if not isinstance(sensor, Sensor): raise TypeError("pysma.Sensor expected") if sensor.name in self: old = self[sensor.name] self.__s.remove(old) _LOGGER.warning("Replacing sensor %s with %s", old, sensor) if sensor.key in self: _LOGGER.warning("Duplicate SMA sensor key %s", sensor.key) self.__s.append(sensor)
<SYSTEM_TASK:> Read a set of keys. <END_TASK> <USER_TASK:> Description: def read(self, sensors): """Read a set of keys."""
payload = {'destDev': [], 'keys': list(set([s.key for s in sensors]))} if self.sma_sid is None: yield from self.new_session() if self.sma_sid is None: return False body = yield from self._fetch_json(URL_VALUES, payload=payload) # On the first 401 error we close the session which will re-login if body.get('err') == 401: _LOGGER.warning("401 error detected, closing session to force " "another login attempt") self.close_session() return False _LOGGER.debug(json.dumps(body)) for sen in sensors: if sen.extract_value(body): _LOGGER.debug("%s\t= %s %s", sen.name, sen.value, sen.unit) return True
<SYSTEM_TASK:> Change state to playing. <END_TASK> <USER_TASK:> Description: def play(self): """Change state to playing."""
if self.state == STATE_PAUSED: self._player.set_state(Gst.State.PLAYING) self.state = STATE_PLAYING
<SYSTEM_TASK:> When a message is received from Gstreamer. <END_TASK> <USER_TASK:> Description: def _on_message(self, bus, message): # pylint: disable=unused-argument """When a message is received from Gstreamer."""
if message.type == Gst.MessageType.EOS: self.stop() elif message.type == Gst.MessageType.ERROR: self.stop() err, _ = message.parse_error() _LOGGER.error('%s', err)
<SYSTEM_TASK:> Return the node before this node. <END_TASK> <USER_TASK:> Description: def get_previous_node(node): """ Return the node before this node. """
if node.prev_sibling: return node.prev_sibling if node.parent: return get_previous_node(node.parent)
<SYSTEM_TASK:> will construct kwargs for cmd <END_TASK> <USER_TASK:> Description: def casperjs_command_kwargs(): """ will construct kwargs for cmd """
kwargs = { 'stdout': subprocess.PIPE, 'stderr': subprocess.PIPE, 'universal_newlines': True } phantom_js_cmd = app_settings['PHANTOMJS_CMD'] if phantom_js_cmd: path = '{0}:{1}'.format( os.getenv('PATH', ''), os.path.dirname(phantom_js_cmd) ) kwargs.update({'env': {'PATH': path}}) return kwargs
<SYSTEM_TASK:> Captures web pages using ``casperjs`` <END_TASK> <USER_TASK:> Description: def casperjs_capture(stream, url, method=None, width=None, height=None, selector=None, data=None, waitfor=None, size=None, crop=None, render='png', wait=None): """ Captures web pages using ``casperjs`` """
if isinstance(stream, six.string_types): output = stream else: with NamedTemporaryFile('wb+', suffix='.%s' % render, delete=False) as f: output = f.name try: cmd = CASPERJS_CMD + [url, output] # Extra command-line options cmd += ['--format=%s' % render] if method: cmd += ['--method=%s' % method] if width: cmd += ['--width=%s' % width] if height: cmd += ['--height=%s' % height] if selector: cmd += ['--selector=%s' % selector] if data: cmd += ['--data="%s"' % json.dumps(data)] if waitfor: cmd += ['--waitfor=%s' % waitfor] if wait: cmd += ['--wait=%s' % wait] logger.debug(cmd) # Run CasperJS process proc = subprocess.Popen(cmd, **casperjs_command_kwargs()) stdout = proc.communicate()[0] process_casperjs_stdout(stdout) size = parse_size(size) render = parse_render(render) if size or (render and render != 'png' and render != 'pdf'): # pdf isn't an image, therefore we can't postprocess it. image_postprocess(output, stream, size, crop, render) else: if stream != output: # From file to stream with open(output, 'rb') as out: stream.write(out.read()) stream.flush() finally: if stream != output: os.unlink(output)
<SYSTEM_TASK:> Allow to override printing url, not necessarily on the same <END_TASK> <USER_TASK:> Description: def build_absolute_uri(request, url): """ Allow to override printing url, not necessarily on the same server instance. """
if app_settings.get('CAPTURE_ROOT_URL'): return urljoin(app_settings.get('CAPTURE_ROOT_URL'), url) return request.build_absolute_uri(url)
<SYSTEM_TASK:> Render a template from django project, and return the <END_TASK> <USER_TASK:> Description: def render_template(template_name, context, format='png', output=None, using=None, **options): """ Render a template from django project, and return the file object of the result. """
# output stream, as required by casperjs_capture stream = BytesIO() out_f = None # the suffix=.html is a hack for phantomjs which *will* # complain about not being able to open source file # unless it has a 'html' extension. with NamedTemporaryFile(suffix='.html') as render_file: template_content = render_to_string( template_name, context, using=using, ) # now, we need to replace all occurences of STATIC_URL # with the corresponding file://STATIC_ROOT, but only # if STATIC_URL doesn't contain a public URI (like http(s)) static_url = getattr(settings, 'STATIC_URL', '') if settings.STATIC_ROOT and\ static_url and not static_url.startswith('http'): template_content = template_content.replace( static_url, 'file://%s' % settings.STATIC_ROOT ) render_file.write(template_content.encode('utf-8')) # this is so that the temporary file actually gets filled # with the result. render_file.seek(0) casperjs_capture( stream, url='file://%s' % render_file.name, **options ) # if no output was provided, use NamedTemporaryFile # (so it is an actual file) and return it (so that # after function ends, it gets automatically removed) if not output: out_f = NamedTemporaryFile() else: # if output was provided, write the rendered # content to it out_f = open(output, 'wb') out_f.write(stream.getvalue()) out_f.seek(0) # return the output if NamedTemporaryFile was used if not output: return out_f else: # otherwise, just close the file. out_f.close()
<SYSTEM_TASK:> Launch an operation on a thread and get a handle to its future result. <END_TASK> <USER_TASK:> Description: def go(fn, *args, **kwargs): """Launch an operation on a thread and get a handle to its future result. >>> from time import sleep >>> def print_sleep_print(duration): ... sleep(duration) ... print('hello from background thread') ... sleep(duration) ... print('goodbye from background thread') ... return 'return value' ... >>> future = go(print_sleep_print, 0.1) >>> sleep(0.15) hello from background thread >>> print('main thread') main thread >>> result = future() goodbye from background thread >>> result 'return value' """
if not callable(fn): raise TypeError('go() requires a function, not %r' % (fn,)) result = [None] error = [] def target(): try: result[0] = fn(*args, **kwargs) except Exception: # Are we in interpreter shutdown? if sys: error.extend(sys.exc_info()) t = threading.Thread(target=target) t.daemon = True t.start() def get_result(timeout=10): t.join(timeout) if t.is_alive(): raise AssertionError('timed out waiting for %r' % fn) if error: reraise(*error) return result[0] return get_result
<SYSTEM_TASK:> Launch a thread and wait for its result before exiting the code block. <END_TASK> <USER_TASK:> Description: def going(fn, *args, **kwargs): """Launch a thread and wait for its result before exiting the code block. >>> with going(lambda: 'return value') as future: ... pass >>> future() # Won't block, the future is ready by now. 'return value' Or discard the result: >>> with going(lambda: "don't care"): ... pass If an exception is raised within the context, the result is lost: >>> with going(lambda: 'return value') as future: ... assert 1 == 0 Traceback (most recent call last): ... AssertionError """
future = go(fn, *args, **kwargs) try: yield future except: # We are raising an exception, just try to clean up the future. exc_info = sys.exc_info() try: # Shorter than normal timeout. future(timeout=1) except: log_message = ('\nerror in %s:\n' % format_call(inspect.currentframe())) sys.stderr.write(log_message) traceback.print_exc() # sys.stderr.write('exc in %s' % format_call(inspect.currentframe())) reraise(*exc_info) else: # Raise exception or discard result. future(timeout=10)
<SYSTEM_TASK:> Call method while holding a lock. <END_TASK> <USER_TASK:> Description: def _synchronized(meth): """Call method while holding a lock."""
@functools.wraps(meth) def wrapper(self, *args, **kwargs): with self._lock: return meth(self, *args, **kwargs) return wrapper
<SYSTEM_TASK:> Take a client socket and return a Request. <END_TASK> <USER_TASK:> Description: def mock_server_receive_request(client, server): """Take a client socket and return a Request."""
header = mock_server_receive(client, 16) length = _UNPACK_INT(header[:4])[0] request_id = _UNPACK_INT(header[4:8])[0] opcode = _UNPACK_INT(header[12:])[0] msg_bytes = mock_server_receive(client, length - 16) if opcode not in OPCODES: raise NotImplementedError("Don't know how to unpack opcode %d yet" % opcode) return OPCODES[opcode].unpack(msg_bytes, client, server, request_id)
<SYSTEM_TASK:> Receive `length` bytes from a socket object. <END_TASK> <USER_TASK:> Description: def mock_server_receive(sock, length): """Receive `length` bytes from a socket object."""
msg = b'' while length: chunk = sock.recv(length) if chunk == b'': raise socket.error(errno.ECONNRESET, 'closed') length -= len(chunk) msg += chunk return msg
<SYSTEM_TASK:> Make the documents for a `Request` or `Reply`. <END_TASK> <USER_TASK:> Description: def make_docs(*args, **kwargs): """Make the documents for a `Request` or `Reply`. Takes a variety of argument styles, returns a list of dicts. Used by `make_prototype_request` and `make_reply`, which are in turn used by `MockupDB.receives`, `Request.replies`, and so on. See examples in tutorial. """
err_msg = "Can't interpret args: " if not args and not kwargs: return [] if not args: # OpReply(ok=1, ismaster=True). return [kwargs] if isinstance(args[0], (int, float, bool)): # server.receives().ok(0, err='uh oh'). if args[1:]: raise_args_err(err_msg, ValueError) doc = OrderedDict({'ok': args[0]}) doc.update(kwargs) return [doc] if isinstance(args[0], (list, tuple)): # Send a batch: OpReply([{'a': 1}, {'a': 2}]). if not all(isinstance(doc, (OpReply, Mapping)) for doc in args[0]): raise_args_err('each doc must be a dict:') if kwargs: raise_args_err(err_msg, ValueError) return list(args[0]) if isinstance(args[0], (string_type, text_type)): if args[2:]: raise_args_err(err_msg, ValueError) if len(args) == 2: # Command('aggregate', 'collection', {'cursor': {'batchSize': 1}}). doc = OrderedDict({args[0]: args[1]}) else: # OpReply('ismaster', me='a.com'). doc = OrderedDict({args[0]: 1}) doc.update(kwargs) return [doc] if kwargs: raise_args_err(err_msg, ValueError) # Send a batch as varargs: OpReply({'a': 1}, {'a': 2}). if not all(isinstance(doc, (OpReply, Mapping)) for doc in args): raise_args_err('each doc must be a dict') return args
<SYSTEM_TASK:> True if seq0 is a subset of seq1 and their elements are in same order. <END_TASK> <USER_TASK:> Description: def seq_match(seq0, seq1): """True if seq0 is a subset of seq1 and their elements are in same order. >>> seq_match([], []) True >>> seq_match([1], [1]) True >>> seq_match([1, 1], [1]) False >>> seq_match([1], [1, 2]) True >>> seq_match([1, 1], [1, 1]) True >>> seq_match([3], [1, 2, 3]) True >>> seq_match([1, 3], [1, 2, 3]) True >>> seq_match([2, 1], [1, 2, 3]) False """
len_seq1 = len(seq1) if len_seq1 < len(seq0): return False seq1_idx = 0 for i, elem in enumerate(seq0): while seq1_idx < len_seq1: if seq1[seq1_idx] == elem: break seq1_idx += 1 if seq1_idx >= len_seq1 or seq1[seq1_idx] != elem: return False seq1_idx += 1 return True
<SYSTEM_TASK:> Throw an error with standard message, displaying function call. <END_TASK> <USER_TASK:> Description: def raise_args_err(message='bad arguments', error_class=TypeError): """Throw an error with standard message, displaying function call. >>> def f(a, *args, **kwargs): ... raise_args_err() ... >>> f(1, 2, x='y') Traceback (most recent call last): ... TypeError: bad arguments: f(1, 2, x='y') """
frame = inspect.currentframe().f_back raise error_class(message + ': ' + format_call(frame))
<SYSTEM_TASK:> A `MockupDB` that the mongo shell can connect to. <END_TASK> <USER_TASK:> Description: def interactive_server(port=27017, verbose=True, all_ok=False, name='MockupDB', ssl=False, uds_path=None): """A `MockupDB` that the mongo shell can connect to. Call `~.MockupDB.run` on the returned server, and clean it up with `~.MockupDB.stop`. If ``all_ok`` is True, replies {ok: 1} to anything unmatched by a specific responder. """
if uds_path is not None: port = None server = MockupDB(port=port, verbose=verbose, request_timeout=int(1e6), ssl=ssl, auto_ismaster=True, uds_path=uds_path) if all_ok: server.append_responder({}) server.autoresponds('whatsmyuri', you='localhost:12345') server.autoresponds({'getLog': 'startupWarnings'}, log=['hello from %s!' % name]) server.autoresponds(OpMsg('buildInfo'), version='MockupDB ' + __version__) server.autoresponds(OpMsg('listCollections')) server.autoresponds('replSetGetStatus', ok=0) server.autoresponds('getFreeMonitoringStatus', ok=0) return server
<SYSTEM_TASK:> Error reply to a command. <END_TASK> <USER_TASK:> Description: def command_err(self, code=1, errmsg='MockupDB command failure', *args, **kwargs): """Error reply to a command. Returns True so it is suitable as an `~MockupDB.autoresponds` handler. """
kwargs.setdefault('ok', 0) kwargs['code'] = code kwargs['errmsg'] = errmsg self.replies(*args, **kwargs) return True
<SYSTEM_TASK:> Parse message and return an `OpMsg`. <END_TASK> <USER_TASK:> Description: def unpack(cls, msg, client, server, request_id): """Parse message and return an `OpMsg`. Takes the client message as bytes, the client and server socket objects, and the client request id. """
payload_document = OrderedDict() flags, = _UNPACK_UINT(msg[:4]) pos = 4 if flags != 0 and flags != 2: raise ValueError('OP_MSG flag must be 0 or 2 not %r' % (flags,)) while pos < len(msg): payload_type, = _UNPACK_BYTE(msg[pos:pos + 1]) pos += 1 payload_size, = _UNPACK_INT(msg[pos:pos + 4]) if payload_type == 0: doc = bson.decode_all(msg[pos:pos + payload_size], CODEC_OPTIONS)[0] payload_document.update(doc) pos += payload_size elif payload_type == 1: section_size, = _UNPACK_INT(msg[pos:pos + 4]) pos += 4 identifier, pos = _get_c_string(msg, pos) # Section starts w/ 4-byte size prefix, identifier ends w/ nil. documents_len = section_size - len(identifier) - 1 - 4 documents = bson.decode_all(msg[pos:pos + documents_len], CODEC_OPTIONS) payload_document[identifier] = documents pos += documents_len database = payload_document['$db'] return OpMsg(payload_document, namespace=database, flags=flags, _client=client, request_id=request_id, _server=server)
<SYSTEM_TASK:> Parse message and return an `OpQuery` or `Command`. <END_TASK> <USER_TASK:> Description: def unpack(cls, msg, client, server, request_id): """Parse message and return an `OpQuery` or `Command`. Takes the client message as bytes, the client and server socket objects, and the client request id. """
flags, = _UNPACK_INT(msg[:4]) namespace, pos = _get_c_string(msg, 4) is_command = namespace.endswith('.$cmd') num_to_skip, = _UNPACK_INT(msg[pos:pos + 4]) pos += 4 num_to_return, = _UNPACK_INT(msg[pos:pos + 4]) pos += 4 docs = bson.decode_all(msg[pos:], CODEC_OPTIONS) if is_command: assert len(docs) == 1 command_ns = namespace[:-len('.$cmd')] return Command(docs, namespace=command_ns, flags=flags, _client=client, request_id=request_id, _server=server) else: if len(docs) == 1: fields = None else: assert len(docs) == 2 fields = docs[1] return OpQuery(docs[0], fields=fields, namespace=namespace, flags=flags, num_to_skip=num_to_skip, num_to_return=num_to_return, _client=client, request_id=request_id, _server=server)
<SYSTEM_TASK:> Parse message and return an `OpGetMore`. <END_TASK> <USER_TASK:> Description: def unpack(cls, msg, client, server, request_id): """Parse message and return an `OpGetMore`. Takes the client message as bytes, the client and server socket objects, and the client request id. """
flags, = _UNPACK_INT(msg[:4]) namespace, pos = _get_c_string(msg, 4) num_to_return, = _UNPACK_INT(msg[pos:pos + 4]) pos += 4 cursor_id, = _UNPACK_LONG(msg[pos:pos + 8]) return OpGetMore(namespace=namespace, flags=flags, _client=client, num_to_return=num_to_return, cursor_id=cursor_id, request_id=request_id, _server=server)
<SYSTEM_TASK:> Parse message and return an `OpKillCursors`. <END_TASK> <USER_TASK:> Description: def unpack(cls, msg, client, server, _): """Parse message and return an `OpKillCursors`. Takes the client message as bytes, the client and server socket objects, and the client request id. """
# Leading 4 bytes are reserved. num_of_cursor_ids, = _UNPACK_INT(msg[4:8]) cursor_ids = [] pos = 8 for _ in range(num_of_cursor_ids): cursor_ids.append(_UNPACK_INT(msg[pos:pos + 4])[0]) pos += 4 return OpKillCursors(_client=client, cursor_ids=cursor_ids, _server=server)
<SYSTEM_TASK:> Parse message and return an `OpInsert`. <END_TASK> <USER_TASK:> Description: def unpack(cls, msg, client, server, request_id): """Parse message and return an `OpInsert`. Takes the client message as bytes, the client and server socket objects, and the client request id. """
flags, = _UNPACK_INT(msg[:4]) namespace, pos = _get_c_string(msg, 4) docs = bson.decode_all(msg[pos:], CODEC_OPTIONS) return cls(*docs, namespace=namespace, flags=flags, _client=client, request_id=request_id, _server=server)
<SYSTEM_TASK:> Take a `Request` and return an OP_REPLY message as bytes. <END_TASK> <USER_TASK:> Description: def reply_bytes(self, request): """Take a `Request` and return an OP_REPLY message as bytes."""
flags = struct.pack("<i", self._flags) cursor_id = struct.pack("<q", self._cursor_id) starting_from = struct.pack("<i", self._starting_from) number_returned = struct.pack("<i", len(self._docs)) reply_id = random.randint(0, 1000000) response_to = request.request_id data = b''.join([flags, cursor_id, starting_from, number_returned]) data += b''.join([bson.BSON.encode(doc) for doc in self._docs]) message = struct.pack("<i", 16 + len(data)) message += struct.pack("<i", reply_id) message += struct.pack("<i", response_to) message += struct.pack("<i", OP_REPLY) return message + data
<SYSTEM_TASK:> Take a `Request` and return an OP_MSG message as bytes. <END_TASK> <USER_TASK:> Description: def reply_bytes(self, request): """Take a `Request` and return an OP_MSG message as bytes."""
flags = struct.pack("<I", self._flags) payload_type = struct.pack("<b", 0) payload_data = bson.BSON.encode(self.doc) data = b''.join([flags, payload_type, payload_data]) reply_id = random.randint(0, 1000000) response_to = request.request_id header = struct.pack( "<iiii", 16 + len(data), reply_id, response_to, OP_MSG) return header + data
<SYSTEM_TASK:> Begin serving. Returns the bound port, or 0 for domain socket. <END_TASK> <USER_TASK:> Description: def run(self): """Begin serving. Returns the bound port, or 0 for domain socket."""
self._listening_sock, self._address = ( bind_domain_socket(self._address) if self._uds_path else bind_tcp_socket(self._address)) if self._ssl: certfile = os.path.join(os.path.dirname(__file__), 'server.pem') self._listening_sock = _ssl.wrap_socket( self._listening_sock, certfile=certfile, server_side=True) self._accept_thread = threading.Thread(target=self._accept_loop) self._accept_thread.daemon = True self._accept_thread.start() return self.port
<SYSTEM_TASK:> Stop serving. Always call this to clean up after yourself. <END_TASK> <USER_TASK:> Description: def stop(self): """Stop serving. Always call this to clean up after yourself."""
self._stopped = True threads = [self._accept_thread] threads.extend(self._server_threads) self._listening_sock.close() for sock in list(self._server_socks): try: sock.shutdown(socket.SHUT_RDWR) except socket.error: pass try: sock.close() except socket.error: pass with self._unlock(): for thread in threads: thread.join(10) if self._uds_path: try: os.unlink(self._uds_path) except OSError: pass
<SYSTEM_TASK:> Pop the next `Request` and assert it matches. <END_TASK> <USER_TASK:> Description: def receives(self, *args, **kwargs): """Pop the next `Request` and assert it matches. Returns None if the server is stopped. Pass a `Request` or request pattern to specify what client request to expect. See the tutorial for examples. Pass ``timeout`` as a keyword argument to override this server's ``request_timeout``. """
timeout = kwargs.pop('timeout', self._request_timeout) end = time.time() + timeout matcher = Matcher(*args, **kwargs) while not self._stopped: try: # Short timeout so we notice if the server is stopped. request = self._request_q.get(timeout=0.05) except Empty: if time.time() > end: raise AssertionError('expected to receive %r, got nothing' % matcher.prototype) else: if matcher.matches(request): return request else: raise AssertionError('expected to receive %r, got %r' % (matcher.prototype, request))
<SYSTEM_TASK:> Send a canned reply to all matching client requests. <END_TASK> <USER_TASK:> Description: def autoresponds(self, matcher, *args, **kwargs): """Send a canned reply to all matching client requests. ``matcher`` is a `Matcher` or a command name, or an instance of `OpInsert`, `OpQuery`, etc. >>> s = MockupDB() >>> port = s.run() >>> >>> from pymongo import MongoClient >>> client = MongoClient(s.uri) >>> responder = s.autoresponds('ismaster', maxWireVersion=6) >>> client.admin.command('ismaster') == {'ok': 1, 'maxWireVersion': 6} True The remaining arguments are a :ref:`message spec <message spec>`: >>> # ok >>> responder = s.autoresponds('bar', ok=0, errmsg='err') >>> client.db.command('bar') Traceback (most recent call last): ... OperationFailure: command SON([('bar', 1)]) on namespace db.$cmd failed: err >>> responder = s.autoresponds(OpMsg('find', 'collection'), ... {'cursor': {'id': 0, 'firstBatch': [{'_id': 1}, {'_id': 2}]}}) >>> # ok >>> list(client.db.collection.find()) == [{'_id': 1}, {'_id': 2}] True >>> responder = s.autoresponds(OpMsg('find', 'collection'), ... {'cursor': {'id': 0, 'firstBatch': [{'a': 1}, {'a': 2}]}}) >>> # bad >>> list(client.db.collection.find()) == [{'a': 1}, {'a': 2}] True Remove an autoresponder like: >>> responder.cancel() If the request currently at the head of the queue matches, it is popped and replied to. Future matching requests skip the queue. >>> future = go(client.db.command, 'baz') >>> # bad >>> responder = s.autoresponds('baz', {'key': 'value'}) >>> future() == {'ok': 1, 'key': 'value'} True Responders are applied in order, most recently added first, until one matches: >>> responder = s.autoresponds('baz') >>> client.db.command('baz') == {'ok': 1} True >>> responder.cancel() >>> # The previous responder takes over again. >>> client.db.command('baz') == {'ok': 1, 'key': 'value'} True You can pass a request handler in place of the message spec. Return True if you handled the request: >>> responder = s.autoresponds('baz', lambda r: r.ok(a=2)) The standard `Request.ok`, `~Request.replies`, `~Request.fail`, `~Request.hangup` and so on all return True to make them suitable as handler functions. >>> client.db.command('baz') == {'ok': 1, 'a': 2} True If the request is not handled, it is checked against the remaining responders, or enqueued if none match. You can pass the handler as the only argument so it receives *all* requests. For example you could log them, then return None to allow other handlers to run: >>> def logger(request): ... if not request.matches('ismaster'): ... print('logging: %r' % request) >>> responder = s.autoresponds(logger) >>> client.db.command('baz') == {'ok': 1, 'a': 2} logging: OpMsg({"baz": 1, "$db": "db", "$readPreference": {"mode": "primaryPreferred"}}, namespace="db") True The synonym `subscribe` better expresses your intent if your handler never returns True: >>> subscriber = s.subscribe(logger) .. doctest: :hide: >>> client.close() >>> s.stop() """
return self._insert_responder("top", matcher, *args, **kwargs)
<SYSTEM_TASK:> Add a responder of last resort. <END_TASK> <USER_TASK:> Description: def append_responder(self, matcher, *args, **kwargs): """Add a responder of last resort. Like `.autoresponds`, but instead of adding a responder to the top of the stack, add it to the bottom. This responder will be called if no others match. """
return self._insert_responder("bottom", matcher, *args, **kwargs)
<SYSTEM_TASK:> Connection string to pass to `~pymongo.mongo_client.MongoClient`. <END_TASK> <USER_TASK:> Description: def uri(self): """Connection string to pass to `~pymongo.mongo_client.MongoClient`."""
if self._uds_path: uri = 'mongodb://%s' % (quote_plus(self._uds_path),) else: uri = 'mongodb://%s' % (format_addr(self._address),) return uri + '/?ssl=true' if self._ssl else uri
<SYSTEM_TASK:> Accept client connections and spawn a thread for each. <END_TASK> <USER_TASK:> Description: def _accept_loop(self): """Accept client connections and spawn a thread for each."""
self._listening_sock.setblocking(0) while not self._stopped and not _shutting_down: try: # Wait a short time to accept. if select.select([self._listening_sock.fileno()], [], [], 1): client, client_addr = self._listening_sock.accept() client.setblocking(True) self._log('connection from %s' % format_addr(client_addr)) server_thread = threading.Thread( target=functools.partial( self._server_loop, client, client_addr)) # Store weakrefs to the thread and socket, so we can # dispose them in stop(). self._server_threads[server_thread] = None self._server_socks[client] = None server_thread.daemon = True server_thread.start() except socket.error as error: if error.errno not in ( errno.EAGAIN, errno.EBADF, errno.EWOULDBLOCK): raise except select.error as error: if error.args[0] == errno.EBADF: # Closed. break else: raise
<SYSTEM_TASK:> Read requests from one client socket, 'client'. <END_TASK> <USER_TASK:> Description: def _server_loop(self, client, client_addr): """Read requests from one client socket, 'client'."""
while not self._stopped and not _shutting_down: try: with self._unlock(): request = mock_server_receive_request(client, self) self._requests_count += 1 self._log('%d\t%r' % (request.client_port, request)) # Give most recently added responders precedence. for responder in reversed(self._autoresponders): if responder.handle(request): self._log('\t(autoresponse)') break else: self._request_q.put(request) except socket.error as error: if error.errno in (errno.ECONNRESET, errno.EBADF): # We hung up, or the client did. break raise except select.error as error: if error.args[0] == errno.EBADF: # Closed. break else: raise except AssertionError: traceback.print_exc() break self._log('disconnected: %s' % format_addr(client_addr)) client.close()
<SYSTEM_TASK:> The actual password checking logic. Separated from the authenticate code from Django for easier updating <END_TASK> <USER_TASK:> Description: def check_password(self, username, password): """The actual password checking logic. Separated from the authenticate code from Django for easier updating"""
try: if SUPPORTS_VERIFY: kerberos.checkPassword(username.lower(), password, getattr(settings, "KRB5_SERVICE", ""), getattr(settings, "KRB5_REALM", ""), getattr(settings, "KRB5_VERIFY_KDC", True)) else: kerberos.checkPassword(username.lower(), password, getattr(settings, "KRB5_SERVICE", ""), getattr(settings, "KRB5_REALM", "")) return True except kerberos.BasicAuthError: if getattr(settings, "KRB5_DEBUG", False): logger.exception("Failure during authentication") return False except: if getattr(settings, "KRB5_DEBUG", False): logger.exception("Failure during authentication") # for all other execptions also deny access return False
<SYSTEM_TASK:> Initialize the distance grid by calls to _grid_dist. <END_TASK> <USER_TASK:> Description: def _initialize_distance_grid(self): """Initialize the distance grid by calls to _grid_dist."""
p = [self._grid_distance(i) for i in range(self.num_neurons)] return np.array(p)
<SYSTEM_TASK:> Calculate the distance grid for a single index position. <END_TASK> <USER_TASK:> Description: def _grid_distance(self, index): """ Calculate the distance grid for a single index position. This is pre-calculated for fast neighborhood calculations later on (see _calc_influence). """
# Take every dimension but the first in reverse # then reverse that list again. dimensions = np.cumprod(self.map_dimensions[1::][::-1])[::-1] coord = [] for idx, dim in enumerate(dimensions): if idx != 0: value = (index % dimensions[idx-1]) // dim else: value = index // dim coord.append(value) coord.append(index % self.map_dimensions[-1]) for idx, (width, row) in enumerate(zip(self.map_dimensions, coord)): x = np.abs(np.arange(width) - row) ** 2 dims = self.map_dimensions[::-1] if idx: dims = dims[:-idx] x = np.broadcast_to(x, dims).T if idx == 0: distance = np.copy(x) else: distance += x.T return distance
<SYSTEM_TASK:> Calculate the topographic error. <END_TASK> <USER_TASK:> Description: def topographic_error(self, X, batch_size=1): """ Calculate the topographic error. The topographic error is a measure of the spatial organization of the map. Maps in which the most similar neurons are also close on the grid have low topographic error and indicate that a problem has been learned correctly. Formally, the topographic error is the proportion of units for which the two most similar neurons are not direct neighbors on the map. Parameters ---------- X : numpy array. The input data. batch_size : int The batch size to use when calculating the topographic error. Returns ------- error : numpy array A vector of numbers, representing the topographic error for each data point. """
dist = self.transform(X, batch_size) # Sort the distances and get the indices of the two smallest distances # for each datapoint. res = dist.argsort(1)[:, :2] # Lookup the euclidean distance between these points in the distance # grid dgrid = self.distance_grid.reshape(self.num_neurons, self.num_neurons) res = np.asarray([dgrid[x, y] for x, y in res]) # Subtract 1.0 because 1.0 is the smallest distance. return np.sum(res > 1.0) / len(res)
<SYSTEM_TASK:> Get all neighbors for all neurons. <END_TASK> <USER_TASK:> Description: def neighbors(self, distance=2.0): """Get all neighbors for all neurons."""
dgrid = self.distance_grid.reshape(self.num_neurons, self.num_neurons) for x, y in zip(*np.nonzero(dgrid <= distance)): if x != y: yield x, y
<SYSTEM_TASK:> Get the euclidean distance between a node and its neighbors. <END_TASK> <USER_TASK:> Description: def neighbor_difference(self): """Get the euclidean distance between a node and its neighbors."""
differences = np.zeros(self.num_neurons) num_neighbors = np.zeros(self.num_neurons) distance, _ = self.distance_function(self.weights, self.weights) for x, y in self.neighbors(): differences[x] += distance[x, y] num_neighbors[x] += 1 return differences / num_neighbors
<SYSTEM_TASK:> Calculate the average spread for each node. <END_TASK> <USER_TASK:> Description: def spread(self, X): """ Calculate the average spread for each node. The average spread is a measure of how far each neuron is from the data points which cluster to it. Parameters ---------- X : numpy array The input data. Returns ------- spread : numpy array The average distance from each neuron to each data point. """
distance, _ = self.distance_function(X, self.weights) dists_per_neuron = defaultdict(list) for x, y in zip(np.argmin(distance, 1), distance): dists_per_neuron[x].append(y[x]) out = np.zeros(self.num_neurons) average_spread = {k: np.mean(v) for k, v in dists_per_neuron.items()} for x, y in average_spread.items(): out[x] = y return out
<SYSTEM_TASK:> Calculate the receptive field of the SOM on some data. <END_TASK> <USER_TASK:> Description: def receptive_field(self, X, identities, max_len=10, threshold=0.9, batch_size=1): """ Calculate the receptive field of the SOM on some data. The receptive field is the common ending of all sequences which lead to the activation of a given BMU. If a SOM is well-tuned to specific sequences, it will have longer receptive fields, and therefore gives a better description of the dynamics of a given system. Parameters ---------- X : numpy array Input data. identities : list A list of symbolic identities associated with each input. We expect this list to be as long as the input data. max_len : int, optional, default 10 The maximum length to attempt to find. Raising this increases memory use. threshold : float, optional, default .9 The threshold at which we consider a receptive field valid. If at least this proportion of the sequences of a neuron have the same suffix, that suffix is counted as acquired by the SOM. batch_size : int, optional, default 1 The batch size to use in prediction Returns ------- receptive_fields : dict A dictionary mapping from the neuron id to the found sequences for that neuron. The sequences are represented as lists of symbols from identities. """
receptive_fields = defaultdict(list) predictions = self.predict(X, batch_size) if len(predictions) != len(identities): raise ValueError("X and identities are not the same length: " "{0} and {1}".format(len(X), len(identities))) for idx, p in enumerate(predictions.tolist()): receptive_fields[p].append(identities[idx+1 - max_len:idx+1]) rec = {} for k, v in receptive_fields.items(): # if there's only one sequence, we don't know # anything abouw how salient it is. seq = [] if len(v) <= 1: continue else: for x in reversed(list(zip(*v))): x = Counter(x) if x.most_common(1)[0][1] / sum(x.values()) > threshold: seq.append(x.most_common(1)[0][0]) else: rec[k] = seq break return rec
<SYSTEM_TASK:> Calculate the inverted projection. <END_TASK> <USER_TASK:> Description: def invert_projection(self, X, identities): """ Calculate the inverted projection. The inverted projectio of a SOM is created by association each weight with the input which matches it the most, thus giving a good approximation of the "influence" of each input item. Works best for symbolic (instead of continuous) input data. Parameters ---------- X : numpy array Input data identities : list A list of names for each of the input data. Must be the same length as X. Returns ------- m : numpy array An array with the same shape as the map """
distances = self.transform(X) if len(distances) != len(identities): raise ValueError("X and identities are not the same length: " "{0} and {1}".format(len(X), len(identities))) node_match = [] for d in distances.__getattribute__(self.argfunc)(0): node_match.append(identities[d]) return np.array(node_match)
<SYSTEM_TASK:> Reshaped weights for visualization. <END_TASK> <USER_TASK:> Description: def map_weights(self): """ Reshaped weights for visualization. The weights are reshaped as (W.shape[0], prod(W.shape[1:-1]), W.shape[2]). This allows one to easily see patterns, even for hyper-dimensional soms. For one-dimensional SOMs, the returned array is of shape (W.shape[0], 1, W.shape[2]) Returns ------- w : numpy array A three-dimensional array containing the weights in a 2D array for easy visualization. """
first_dim = self.map_dimensions[0] if len(self.map_dimensions) != 1: second_dim = np.prod(self.map_dimensions[1:]) else: second_dim = 1 # Reshape to appropriate dimensions return self.weights.reshape((first_dim, second_dim, self.data_dimensionality))
<SYSTEM_TASK:> Load a SOM from a JSON file saved with this package.. <END_TASK> <USER_TASK:> Description: def load(cls, path): """ Load a SOM from a JSON file saved with this package.. Parameters ---------- path : str The path to the JSON file. Returns ------- s : cls A som of the specified class. """
data = json.load(open(path)) weights = data['weights'] weights = np.asarray(weights, dtype=np.float64) s = cls(data['map_dimensions'], data['params']['lr']['orig'], data['data_dimensionality'], influence=data['params']['infl']['orig'], lr_lambda=data['params']['lr']['factor'], infl_lambda=data['params']['infl']['factor']) s.weights = weights s.trained = True return s
<SYSTEM_TASK:> Delete a directory recursively. <END_TASK> <USER_TASK:> Description: def remove_dirs(self, directory): """Delete a directory recursively. :param directory: $PATH to directory. :type directory: ``str`` """
LOG.info('Removing directory [ %s ]', directory) local_files = self._drectory_local_files(directory=directory) for file_name in local_files: try: os.remove(file_name['local_object']) except OSError as exp: LOG.error(str(exp)) # Build a list of all local directories directories = sorted( [i for i, _, _ in os.walk(directory)], reverse=True ) # Remove directories for directory_path in directories: try: os.removedirs(directory_path) except OSError as exp: if exp.errno != 2: LOG.error(str(exp)) pass