code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def members(self): if self._members is None: self._members = Feed(self.links["members"], pypump=self._pump) return self._members
:class:`Feed <pypump.models.feed.Feed>` of collection members.
def add(self, obj): activity = { "verb": "add", "object": { "objectType": obj.object_type, "id": obj.id }, "target": { "objectType": self.object_type, "id": self.id } } self._post_activity(activity) # Remove the cash so it's re-generated next time it's needed self._members = None
Adds a member to the collection. :param obj: Object to add. Example: >>> mycollection.add(pump.Person('[email protected]'))
def remove(self, obj): activity = { "verb": "remove", "object": { "objectType": obj.object_type, "id": obj.id }, "target": { "objectType": self.object_type, "id": self.id } } self._post_activity(activity) # Remove the cash so it's re-generated next time it's needed self._members = None
Removes a member from the collection. :param obj: Object to remove. Example: >>> mycollection.remove(pump.Person('[email protected]'))
def outbox(self): if self._outbox is None: self._outbox = Outbox(self.links['activity-outbox'], pypump=self._pump) return self._outbox
:class:`Outbox feed <pypump.models.feed.Outbox>` with all :class:`activities <pypump.models.activity.Activity>` sent by the person. Example: >>> for activity in pump.me.outbox[:2]: ... print(activity) ... pypumptest2 unliked a comment in reply to a note pypumptest2 deleted a note
def followers(self): if self._followers is None: self._followers = Followers(self.links['followers'], pypump=self._pump) return self._followers
:class:`Feed <pypump.models.feed.Feed>` with all :class:`Person <pypump.models.person.Person>` objects following the person. Example: >>> alice = pump.Person('[email protected]') >>> for follower in alice.followers[:2]: ... print(follower.id) ... acct:[email protected] acct:[email protected]
def following(self): if self._following is None: self._following = Following(self.links['following'], pypump=self._pump) return self._following
:class:`Feed <pypump.models.feed.Feed>` with all :class:`Person <pypump.models.person.Person>` objects followed by the person. Example: >>> bob = pump.Person('[email protected]') >>> for followee in bob.following[:3]: ... print(followee.id) ... acct:[email protected] acct:[email protected]
def favorites(self): if self._favorites is None: self._favorites = Favorites(self.links['favorites'], pypump=self._pump) return self._favorites
:class:`Feed <pypump.models.feed.Feed>` with all objects liked/favorited by the person. Example: >>> for like in pump.me.favorites[:3]: ... print(like) ... note by [email protected] image by [email protected] comment by [email protected]
def lists(self): if self._lists is None: self._lists = Lists(self.links['lists'], pypump=self._pump) return self._lists
:class:`Lists feed <pypump.models.feed.Lists>` with all lists owned by the person. Example: >>> for list in pump.me.lists: ... print(list) ... Acquaintances Family Coworkers Friends
def inbox(self): if not self.isme: raise PyPumpException("You can't read other people's inboxes") if self._inbox is None: self._inbox = Inbox(self.links['activity-inbox'], pypump=self._pump) return self._inbox
:class:`Inbox feed <pypump.models.feed.Inbox>` with all :class:`activities <pypump.models.activity.Activity>` received by the person, can only be read if logged in as the owner. Example: >>> for activity in pump.me.inbox[:2]: ... print(activity.id) ... https://microca.st/api/activity/BvqXQOwXShSey1HxYuJQBQ https://pumpyourself.com/api/activity/iQGdnz5-T-auXnbUUdXh-A
def _verb(self, verb): activity = { "verb": verb, "object": { "id": self.id, "objectType": self.object_type, } } self._post_activity(activity)
Posts minimal activity with verb and bare self object. :param verb: verb to be used.
def _post_activity(self, activity, unserialize=True): # I think we always want to post to feed feed_url = "{proto}://{server}/api/user/{username}/feed".format( proto=self._pump.protocol, server=self._pump.client.server, username=self._pump.client.nickname ) data = self._pump.request(feed_url, method="POST", data=activity) if not data: return False if "error" in data: raise PumpException(data["error"]) if unserialize: if "target" in data: # we probably want to unserialize target if it's there # true for collection.{add,remove} self.unserialize(data["target"]) else: # copy activity attributes into object if "author" not in data["object"]: data["object"]["author"] = data["actor"] for key in ["to", "cc", "bto", "bcc"]: if key not in data["object"] and key in data: data["object"][key] = data[key] self.unserialize(data["object"]) return True
Posts a activity to feed
def _add_links(self, links, key="href", proxy_key="proxyURL", endpoints=None): if endpoints is None: endpoints = ["likes", "replies", "shares", "self", "followers", "following", "lists", "favorites", "members"] if links.get("links"): for endpoint in links['links']: # It would seem occasionally the links["links"][endpoint] is # just a string (what would be the href value). I don't know # why, it's likely a bug in pump.io but for now we'll support # this too. if isinstance(links['links'][endpoint], dict): self._add_link(endpoint, links['links'][endpoint]["href"]) else: self._add_link(endpoint, links["links"][endpoint]) for endpoint in endpoints: if links.get(endpoint, None) is None: continue if "pump_io" in links[endpoint]: self._add_link(endpoint, links[endpoint]["pump_io"][proxy_key]) elif "url" in links[endpoint]: self._add_link(endpoint, links[endpoint]["url"]) else: self._add_link(endpoint, links[endpoint][key]) return self.links
Parses and adds block of links
def parse_map(self, obj, mapping=None, *args, **kwargs): mapping = mapping or obj._mapping if "data" in kwargs: for k, v in mapping.items(): if kwargs["data"].get(v, None) is not None: val = kwargs["data"][v] else: val = None self.add_attr(obj, k, val, from_json=True) else: for k, v in mapping.items(): if k in kwargs: self.add_attr(obj, k, kwargs[k])
Parses a dictionary of (model_attr, json_attr) items
def likes(self): endpoint = self.links["likes"] if self._likes is None: self._likes = Feed(endpoint, pypump=self._pump) return self._likes
A :class:`Feed <pypump.models.feed.Feed>` of the people who've liked the object. Example: >>> for person in mynote.likes: ... print(person.webfinger) ... [email protected] [email protected]
def comments(self): endpoint = self.links["replies"] if self._comments is None: self._comments = Feed(endpoint, pypump=self._pump) return self._comments
A :class:`Feed <pypump.models.feed.Feed>` of the comments for the object. Example: >>> for comment in mynote.comments: ... print(comment) ... comment by [email protected]
def comment(self, comment): if isinstance(comment, six.string_types): comment = self._pump.Comment(comment) comment.in_reply_to = self comment.send()
Add a :class:`Comment <pypump.models.comment.Comment>` to the object. :param comment: A :class:`Comment <pypump.models.comment.Comment>` instance, text content is also accepted. Example: >>> anote.comment(pump.Comment('I agree!'))
def shares(self): endpoint = self.links["shares"] if self._shares is None: self._shares = Feed(endpoint, pypump=self._pump) return self._shares
A :class:`Feed <pypump.models.feed.Feed>` of the people who've shared the object. Example: >>> for person in mynote.shares: ... print(person.webfinger) ... [email protected] [email protected]
def _set_people(self, people): if hasattr(people, "object_type"): people = [people] elif hasattr(people, "__iter__"): people = list(people) return people
Sets who the object is sent to
def from_file(self, filename): mimetype = mimetypes.guess_type(filename)[0] or "application/octal-stream" headers = { "Content-Type": mimetype, "Content-Length": str(os.path.getsize(filename)), } # upload file file_data = self._pump.request( "/api/user/{0}/uploads".format(self._pump.client.nickname), method="POST", data=open(filename, "rb").read(), headers=headers, ) # now post it to the feed data = { "verb": "post", "object": file_data, } data.update(self.serialize()) if not self.content and not self.display_name and not self.license: self._post_activity(data) else: self._post_activity(data, unserialize=False) # update post with display_name and content if self.content: file_data['content'] = self.content if self.display_name: file_data['displayName'] = self.display_name if self.license: file_data['license'] = self.license data = { "verb": "update", "object": file_data, } self._post_activity(data) return self
Uploads a file from a filename on your system. :param filename: Path to file on your system. Example: >>> myimage.from_file('/path/to/dinner.png')
def unserialize(self, data): # copy activity attributes into object if "author" not in data["object"]: data["object"]["author"] = data["actor"] for key in ["to", "cc", "bto", "bcc"]: if key not in data["object"] and key in data: data["object"][key] = data[key] Mapper(pypump=self._pump).parse_map(self, data=data) self._add_links(data) return self
From JSON -> Activity object
def me(self): if self._me is not None: return self._me self._me = self.Person("{username}@{server}".format( username=self.client.nickname, server=self.client.server, )) return self._me
Returns :class:`Person <pypump.models.person.Person>` instance of the logged in user. Example: >>> pump.me <Person: [email protected]>
def create_store(self): if self.store_class is not None: return self.store_class.load(self.client.webfinger, self) raise NotImplementedError("You need to specify PyPump.store_class or override PyPump.create_store method.")
Creates store object
def _build_url(self, endpoint): server = None if "://" in endpoint: # looks like an url, let's break it down server, endpoint = self._deconstruct_url(endpoint) endpoint = endpoint.lstrip("/") url = "{proto}://{server}/{endpoint}".format( proto=self.protocol, server=self.client.server if server is None else server, endpoint=endpoint, ) return url
Returns a fully qualified URL
def _deconstruct_url(self, url): url = url.split("://", 1)[-1] server, endpoint = url.split("/", 1) return (server, endpoint)
Breaks down URL and returns server and endpoint
def _add_client(self, url, key=None, secret=None): if "://" in url: server, endpoint = self._deconstruct_url(url) else: server = url if server not in self._server_cache: if not (key and secret): client = Client( webfinger=self.client.webfinger, name=self.client.name, type=self.client.type, ) client.set_pump(self) client.register(server) else: client = Client( webfinger=self.client.webfinger, key=key, secret=secret, type=self.client.type, name=self.client.name, ) client.set_pump(self) self._server_cache[server] = client
Creates Client object with key and secret for server and adds it to _server_cache if it doesnt already exist
def oauth_request(self): # get tokens from server and make a dict of them. self._server_tokens = self.request_token() self.store["oauth-request-token"] = self._server_tokens["token"] self.store["oauth-request-secret"] = self._server_tokens["token_secret"] # now we need the user to authorize me to use their pump.io account result = self.verifier_callback(self.construct_oauth_url()) if result is not None: self.verifier(result)
Makes a oauth connection
def construct_oauth_url(self): response = self._requester(requests.head, "{0}://{1}/".format(self.protocol, self.client.server), allow_redirects=False ) if response.is_redirect: server = response.headers['location'] else: server = response.url path = "oauth/authorize?oauth_token={token}".format( token=self.store["oauth-request-token"] ) return "{server}{path}".format( server=server, path=path )
Constructs verifier OAuth URL
def setup_oauth_client(self, url=None): if url and "://" in url: server, endpoint = self._deconstruct_url(url) else: server = self.client.server if server not in self._server_cache: self._add_client(server) if server == self.client.server: self.oauth = OAuth1( client_key=self.store["client-key"], client_secret=self.store["client-secret"], resource_owner_key=self.store["oauth-access-token"], resource_owner_secret=self.store["oauth-access-secret"], ) return self.oauth else: return OAuth1( client_key=self._server_cache[server].key, client_secret=self._server_cache[server].secret, )
Sets up client for requests to pump
def request_token(self): client = OAuth1( client_key=self._server_cache[self.client.server].key, client_secret=self._server_cache[self.client.server].secret, callback_uri=self.callback, ) request = {"auth": client} response = self._requester( requests.post, "oauth/request_token", **request ) data = parse.parse_qs(response.text) data = { 'token': data[self.PARAM_TOKEN][0], 'token_secret': data[self.PARAM_TOKEN_SECRET][0] } return data
Gets OAuth request token
def request_access(self, verifier): client = OAuth1( client_key=self._server_cache[self.client.server].key, client_secret=self._server_cache[self.client.server].secret, resource_owner_key=self.store["oauth-request-token"], resource_owner_secret=self.store["oauth-request-secret"], verifier=verifier, ) request = {"auth": client} response = self._requester( requests.post, "oauth/access_token", **request ) data = parse.parse_qs(response.text) self.store["oauth-access-token"] = data[self.PARAM_TOKEN][0] self.store["oauth-access-secret"] = data[self.PARAM_TOKEN_SECRET][0] self._server_tokens = {}
Get OAuth access token so we can make requests
def logged_in(self): if "oauth-access-token" not in self.store: return False response = self.request("/api/whoami", allow_redirects=False) # It should response with a redirect to our profile if it's logged in if response.status_code != 302: return False # the location should be the profile we have if response.headers["location"] != self.me.links["self"]: return False return True
Return boolean if is logged in
def cudnnCreate(): handle = ctypes.c_void_p() status = _libcudnn.cudnnCreate(ctypes.byref(handle)) cudnnCheckStatus(status) return handle.value
Initialize cuDNN. Initializes cuDNN and returns a handle to the cuDNN context. Returns ------- handle : cudnnHandle cuDNN context
def cudnnDestroy(handle): status = _libcudnn.cudnnDestroy(ctypes.c_void_p(handle)) cudnnCheckStatus(status)
Release cuDNN resources. Release hardware resources used by cuDNN. Parameters ---------- handle : cudnnHandle cuDNN context.
def cudnnSetStream(handle, id): status = _libcudnn.cudnnSetStream(handle, id) cudnnCheckStatus(status)
Set current cuDNN library stream. Parameters ---------- handle : cudnnHandle cuDNN context. id : cudaStream Stream Id.
def cudnnGetStream(handle): id = ctypes.c_void_p() status = _libcudnn.cudnnGetStream(handle, ctypes.byref(id)) cudnnCheckStatus(status) return id.value
Get current cuDNN library stream. Parameters ---------- handle : int cuDNN context. Returns ------- id : int Stream ID.
def cudnnCreateTensorDescriptor(): tensor = ctypes.c_void_p() status = _libcudnn.cudnnCreateTensorDescriptor(ctypes.byref(tensor)) cudnnCheckStatus(status) return tensor.value
Create a Tensor descriptor object. Allocates a cudnnTensorDescriptor_t structure and returns a pointer to it. Returns ------- tensor_descriptor : int Tensor descriptor.
def cudnnSetTensor4dDescriptor(tensorDesc, format, dataType, n, c, h, w): status = _libcudnn.cudnnSetTensor4dDescriptor(tensorDesc, format, dataType, n, c, h, w) cudnnCheckStatus(status)
Initialize a previously created Tensor 4D object. This function initializes a previously created Tensor4D descriptor object. The strides of the four dimensions are inferred from the format parameter and set in such a way that the data is contiguous in memory with no padding between dimensions. Parameters ---------- tensorDesc : cudnnTensorDescriptor Handle to a previously created tensor descriptor. format : cudnnTensorFormat Type of format. dataType : cudnnDataType Data type. n : int Number of images. c : int Number of feature maps per image. h : int Height of each feature map. w : int Width of each feature map.
def cudnnSetTensor4dDescriptorEx(tensorDesc, dataType, n, c, h, w, nStride, cStride, hStride, wStride): Initialize a Tensor descriptor object with strides. This function initializes a previously created generic Tensor descriptor object into a 4D tensor, similarly to cudnnSetTensor4dDescriptor but with the strides explicitly passed as parameters. This can be used to lay out the 4D tensor in any order or simply to define gaps between dimensions. Parameters ---------- tensorDesc : cudnnTensorDescriptor_t Handle to a previously created tensor descriptor. dataType : cudnnDataType Data type. n : int Number of images. c : int Number of feature maps per image. h : int Height of each feature map. w : int Width of each feature map. nStride : int Stride between two consective images. cStride : int Stride between two consecutive feature maps. hStride : int Stride between two consecutive rows. wStride : int Stride between two consecutive columns. """ status = _libcudnn.cudnnSetTensor4dDescriptorEx(tensorDesc, dataType, n, c, h, w, nStride, cStride, hStride, wStride) cudnnCheckStatus(status)
Initialize a Tensor descriptor object with strides. This function initializes a previously created generic Tensor descriptor object into a 4D tensor, similarly to cudnnSetTensor4dDescriptor but with the strides explicitly passed as parameters. This can be used to lay out the 4D tensor in any order or simply to define gaps between dimensions. Parameters ---------- tensorDesc : cudnnTensorDescriptor_t Handle to a previously created tensor descriptor. dataType : cudnnDataType Data type. n : int Number of images. c : int Number of feature maps per image. h : int Height of each feature map. w : int Width of each feature map. nStride : int Stride between two consective images. cStride : int Stride between two consecutive feature maps. hStride : int Stride between two consecutive rows. wStride : int Stride between two consecutive columns.
def cudnnSetTensor(handle, srcDesc, srcData, value): Set all data points of a tensor to a given value : srcDest = alpha. Parameters ---------- handle : cudnnHandle Handle to a previously created cuDNN context. srcDesc : cudnnTensorDescriptor Handle to a previously initialized tensor descriptor. srcData : void_p Pointer to data of the tensor described by srcDesc descriptor. value : float Value that all elements of the tensor will be set to. """ dataType, _, _, _, _, _, _, _, _ = cudnnGetTensor4dDescriptor(srcDesc) if dataType == cudnnDataType['CUDNN_DATA_DOUBLE']: alphaRef = ctypes.byref(ctypes.c_double(alpha)) else: alphaRef = ctypes.byref(ctypes.c_float(alpha)) status = _libcudnn.cudnnSetTensor(handle, srcDesc, srcData, alphaRef) cudnnCheckStatus(status)
Set all data points of a tensor to a given value : srcDest = alpha. Parameters ---------- handle : cudnnHandle Handle to a previously created cuDNN context. srcDesc : cudnnTensorDescriptor Handle to a previously initialized tensor descriptor. srcData : void_p Pointer to data of the tensor described by srcDesc descriptor. value : float Value that all elements of the tensor will be set to.
def cudnnCreateFilterDescriptor(): Create a filter descriptor. This function creates a filter descriptor object by allocating the memory needed to hold its opaque structure. Parameters ---------- Returns ------- wDesc : cudnnFilterDescriptor Handle to a newly allocated filter descriptor. """ wDesc = ctypes.c_void_p() status = _libcudnn.cudnnCreateFilterDescriptor(ctypes.byref(wDesc)) cudnnCheckStatus(status) return wDesc.value
Create a filter descriptor. This function creates a filter descriptor object by allocating the memory needed to hold its opaque structure. Parameters ---------- Returns ------- wDesc : cudnnFilterDescriptor Handle to a newly allocated filter descriptor.
def cudnnSetFilter4dDescriptor(wDesc, dataType, format, k, c, h, w): Initialize a filter descriptor. This function initializes a previously created filter descriptor object into a 4D filter. Filters layout must be contiguous in memory. Parameters ---------- wDesc : cudnnFilterDescriptor Handle to a previously created filter descriptor. dataType : cudnnDataType Data type. format: cudnnTensorFormat Tensor format k : int Number of output feature maps. c : int Number of input feature maps. h : int Height of each filter. w : int Width of each filter. """ status = _libcudnn.cudnnSetFilter4dDescriptor(wDesc, dataType, format, k, c, h, w) cudnnCheckStatus(status)
Initialize a filter descriptor. This function initializes a previously created filter descriptor object into a 4D filter. Filters layout must be contiguous in memory. Parameters ---------- wDesc : cudnnFilterDescriptor Handle to a previously created filter descriptor. dataType : cudnnDataType Data type. format: cudnnTensorFormat Tensor format k : int Number of output feature maps. c : int Number of input feature maps. h : int Height of each filter. w : int Width of each filter.
def cudnnGetFilter4dDescriptor(wDesc): Get parameters of filter descriptor. This function queries the parameters of the previouly initialized filter descriptor object. Parameters ---------- wDesc : cudnnFilterDescriptor Handle to a previously created filter descriptor. Returns ------- dataType : cudnnDataType Data type. format: cudnnTensorFormat Tensor format k : int Number of output feature maps. c : int Number of input feature maps. h : int Height of each filter. w : int Width of each filter. """ dataType = ctypes.c_int() format = ctypes.c_int() k = ctypes.c_int() c = ctypes.c_int() h = ctypes.c_int() w = ctypes.c_int() status = _libcudnn.cudnnGetFilter4dDescriptor(wDesc, ctypes.byref(dataType), ctypes.byref(format), ctypes.byref(k), ctypes.byref(c), ctypes.byref(h), ctypes.byref(w)) cudnnCheckStatus(status) return dataType.value, format.value, k.value, c.value, h.value, w.value
Get parameters of filter descriptor. This function queries the parameters of the previouly initialized filter descriptor object. Parameters ---------- wDesc : cudnnFilterDescriptor Handle to a previously created filter descriptor. Returns ------- dataType : cudnnDataType Data type. format: cudnnTensorFormat Tensor format k : int Number of output feature maps. c : int Number of input feature maps. h : int Height of each filter. w : int Width of each filter.
def cudnnCreateConvolutionDescriptor(): Create a convolution descriptor. This function creates a convolution descriptor object by allocating the memory needed to hold its opaque structure. Returns ------- convDesc : cudnnConvolutionDescriptor Handle to newly allocated convolution descriptor. """ convDesc = ctypes.c_void_p() status = _libcudnn.cudnnCreateConvolutionDescriptor(ctypes.byref(convDesc)) cudnnCheckStatus(status) return convDesc.value
Create a convolution descriptor. This function creates a convolution descriptor object by allocating the memory needed to hold its opaque structure. Returns ------- convDesc : cudnnConvolutionDescriptor Handle to newly allocated convolution descriptor.
def cudnnGetConvolutionForwardWorkspaceSize(handle, srcDesc, wDesc, convDesc, destDesc, algo): This function returns the amount of GPU memory workspace the user needs to allocate to be able to call cudnnConvolutionForward with the specified algorithm. Parameters ---------- handle : cudnnHandle Handle to a previously created cuDNN context. srcDesc : cudnnTensorDescriptor Handle to a previously initialized tensor descriptor. wDesc : cudnnFilterDescriptor Handle to a previously initialized filter descriptor. convDesc : cudnnConvolutionDescriptor Previously initialized convolution descriptor. destDesc : cudnnTensorDescriptor Handle to a previously initialized tensor descriptor. algo : cudnnConvolutionFwdAlgo Enumerant that specifies the chosen convolution algorithm. Returns ------- sizeInBytes: c_size_t Amount of GPU memory needed as workspace to be able to execute a forward convolution with the sepcified algo. """ sizeInBytes = ctypes.c_size_t() status = _libcudnn.cudnnGetConvolutionForwardWorkspaceSize(handle, srcDesc, wDesc, convDesc, destDesc, algo, ctypes.byref(sizeInBytes)) cudnnCheckStatus(status) return sizeInBytes
This function returns the amount of GPU memory workspace the user needs to allocate to be able to call cudnnConvolutionForward with the specified algorithm. Parameters ---------- handle : cudnnHandle Handle to a previously created cuDNN context. srcDesc : cudnnTensorDescriptor Handle to a previously initialized tensor descriptor. wDesc : cudnnFilterDescriptor Handle to a previously initialized filter descriptor. convDesc : cudnnConvolutionDescriptor Previously initialized convolution descriptor. destDesc : cudnnTensorDescriptor Handle to a previously initialized tensor descriptor. algo : cudnnConvolutionFwdAlgo Enumerant that specifies the chosen convolution algorithm. Returns ------- sizeInBytes: c_size_t Amount of GPU memory needed as workspace to be able to execute a forward convolution with the sepcified algo.
def cudnnCreatePoolingDescriptor(): Create pooling descriptor. This function creates a pooling descriptor object by allocating the memory needed to hold its opaque structure, Returns ------- poolingDesc : cudnnPoolingDescriptor Newly allocated pooling descriptor. """ poolingDesc = ctypes.c_void_p() status = _libcudnn.cudnnCreatePoolingDescriptor(ctypes.byref(poolingDesc)) cudnnCheckStatus(status) return poolingDesc.value
Create pooling descriptor. This function creates a pooling descriptor object by allocating the memory needed to hold its opaque structure, Returns ------- poolingDesc : cudnnPoolingDescriptor Newly allocated pooling descriptor.
def cudnnSetPooling2dDescriptor(poolingDesc, mode, windowHeight, windowWidth, verticalPadding, horizontalPadding, verticalStride, horizontalStride): Initialize a 2D pooling descriptor. This function initializes a previously created pooling descriptor object. Parameters ---------- poolingDesc : cudnnPoolingDescriptor Handle to a previously created pooling descriptor. mode : cudnnPoolingMode Enumerant to specify the pooling mode. windowHeight : int Height of the pooling window. windowWidth : int Width of the pooling window. verticalPadding: int Size of vertical padding. horizontalPadding: int Size of horizontal padding. verticalStride : int Pooling vertical stride. horizontalStride : int Pooling horizontal stride. """ status = _libcudnn.cudnnSetPooling2dDescriptor(poolingDesc, mode, windowHeight, windowWidth, verticalPadding, horizontalPadding, verticalStride, horizontalStride) cudnnCheckStatus(status)
Initialize a 2D pooling descriptor. This function initializes a previously created pooling descriptor object. Parameters ---------- poolingDesc : cudnnPoolingDescriptor Handle to a previously created pooling descriptor. mode : cudnnPoolingMode Enumerant to specify the pooling mode. windowHeight : int Height of the pooling window. windowWidth : int Width of the pooling window. verticalPadding: int Size of vertical padding. horizontalPadding: int Size of horizontal padding. verticalStride : int Pooling vertical stride. horizontalStride : int Pooling horizontal stride.
def cudnnGetPooling2dDescriptor(poolingDesc): This function queries a previously created pooling descriptor object. Parameters ---------- poolingDesc : cudnnPoolingDescriptor Handle to a previously created 2D pooling descriptor. Returns ------- mode : cudnnPoolingMode Enumerant to specify the pooling mode. windowHeight : int Height of the pooling window. windowWidth : int Width of the pooling window. verticalPadding: int Size of vertical padding. horizontalPadding: int Size of horizontal padding. verticalStride : int Pooling vertical stride. horizontalStride : int Pooling horizontal stride. """ mode = ctypes.c_int() windowHeight = ctypes.c_int() windowWidth = ctypes.c_int() verticalPadding = ctypes.c_int() horizontalPadding = ctypes.c_int() verticalStride = ctypes.c_int() horizontalStride = ctypes.c_int() status = _libcudnn.cudnnGetPooling2dDescriptor(poolingDesc, ctypes.byref(mode), ctypes.byref(windowHeight), ctypes.byref(windowWidth), ctypes.byref(verticalPadding), ctypes.byref(horizontalPadding), ctypes.byref(verticalStride), ctypes.byref(horizontalStride)) cudnnCheckStatus(status) return mode.value, windowHeight.value, windowWidth.value, verticalStride.value, horizontalStride.value
This function queries a previously created pooling descriptor object. Parameters ---------- poolingDesc : cudnnPoolingDescriptor Handle to a previously created 2D pooling descriptor. Returns ------- mode : cudnnPoolingMode Enumerant to specify the pooling mode. windowHeight : int Height of the pooling window. windowWidth : int Width of the pooling window. verticalPadding: int Size of vertical padding. horizontalPadding: int Size of horizontal padding. verticalStride : int Pooling vertical stride. horizontalStride : int Pooling horizontal stride.
def __prefix_key(self, key): # If there isn't a prefix don't bother if self.prefix is None: return key # Don't prefix key if it already has it if key.startswith(self.prefix + "-"): return key return "{0}-{1}".format(self.prefix, key)
This will add the prefix to the key if one exists on the store
def export(self): data = {} for key, value in self.items(): data[key] = value return data
Exports as dictionary
def save(self): if self.filename is None: raise StoreException("Filename must be set to write store to disk") # We need an atomic way of re-writing the settings, we also need to # prevent only overwriting part of the settings file (see bug #116). # Create a temp file and only then re-name it to the config filename = "{filename}.{date}.tmp".format( filename=self.filename, date=datetime.datetime.utcnow().strftime('%Y-%m-%dT%H_%M_%S.%f') ) # The `open` built-in doesn't allow us to set the mode mode = stat.S_IRUSR | stat.S_IWUSR # 0600 fd = os.open(filename, os.O_WRONLY | os.O_CREAT, mode) fout = os.fdopen(fd, "w") fout.write(json.dumps(self.export())) fout.close() # Now we should remove the old config if os.path.isfile(self.filename): os.remove(self.filename) # Now rename the temp file to the real config file os.rename(filename, self.filename)
Saves dictionary to disk in JSON format.
def get_filename(cls): config_home = os.environ.get("XDG_CONFIG_HOME", "~/.config") config_home = os.path.expanduser(config_home) base_path = os.path.join(config_home, "PyPump") if not os.path.isdir(base_path): os.makedirs(base_path) return os.path.join(base_path, "credentials.json")
Gets filename of store on disk
def load(cls, webfinger, pypump): filename = cls.get_filename() if os.path.isfile(filename): data = open(filename).read() data = json.loads(data) store = cls(data, filename=filename) else: store = cls(filename=filename) store.prefix = webfinger return store
Load JSON from disk into store object
def pause(message='Press any key to continue . . . '): if message is not None: print(message, end='') sys.stdout.flush() getch() print()
Prints the specified message if it's not None and waits for a keypress.
def covalent_bonds(atoms, threshold=1.1): bonds=[] for a, b in atoms: bond_distance=( element_data[a.element.title()]['atomic radius'] + element_data[ b.element.title()]['atomic radius']) / 100 dist=distance(a._vector, b._vector) if dist <= bond_distance * threshold: bonds.append(CovalentBond(a, b, dist)) return bonds
Returns all the covalent bonds in a list of `Atom` pairs. Notes ----- Uses information `element_data`, which can be accessed directly through this module i.e. `isambard.ampal.interactions.element_data`. Parameters ---------- atoms : [(`Atom`, `Atom`)] List of pairs of `Atoms`. threshold : float, optional Allows deviation from ideal covalent bond distance to be included. For example, a value of 1.1 would allow interactions up to 10% further from the ideal distance to be included.
def find_covalent_bonds(ampal, max_range=2.2, threshold=1.1, tag=True): sectors=gen_sectors(ampal.get_atoms(), max_range * 1.1) bonds=[] for sector in sectors.values(): atoms=itertools.combinations(sector, 2) bonds.extend(covalent_bonds(atoms, threshold=threshold)) bond_set=list(set(bonds)) if tag: for bond in bond_set: a, b=bond.a, bond.b if 'covalent_bonds' not in a.tags: a.tags['covalent_bonds']=[b] else: a.tags['covalent_bonds'].append(b) if 'covalent_bonds' not in b.tags: b.tags['covalent_bonds']=[a] else: b.tags['covalent_bonds'].append(a) return bond_set
Finds all covalent bonds in the AMPAL object. Parameters ---------- ampal : AMPAL Object Any AMPAL object with a `get_atoms` method. max_range : float, optional Used to define the sector size, so interactions at longer ranges will not be found. threshold : float, optional Allows deviation from ideal covalent bond distance to be included. For example, a value of 1.1 would allow interactions up to 10% further from the ideal distance to be included. tag : bool, optional If `True`, will add the covalent bond to the tags dictionary of each `Atom` involved in the interaction under the `covalent_bonds` key.
def generate_covalent_bond_graph(covalent_bonds): bond_graph=networkx.Graph() for inter in covalent_bonds: bond_graph.add_edge(inter.a, inter.b) return bond_graph
Generates a graph of the covalent bond network described by the interactions. Parameters ---------- covalent_bonds: [CovalentBond] List of `CovalentBond`. Returns ------- bond_graph: networkx.Graph A graph of the covalent bond network.
def generate_bond_subgraphs_from_break(bond_graph, atom1, atom2): bond_graph.remove_edge(atom1, atom2) try: subgraphs=list(networkx.connected_component_subgraphs( bond_graph, copy=False)) finally: # Add edge bond_graph.add_edge(atom1, atom2) return subgraphs
Splits the bond graph between two atoms to producing subgraphs. Notes ----- This will not work if there are cycles in the bond graph. Parameters ---------- bond_graph: networkx.Graph Graph of covalent bond network atom1: isambard.ampal.Atom First atom in the bond. atom2: isambard.ampal.Atom Second atom in the bond. Returns ------- subgraphs: [networkx.Graph] A list of subgraphs generated when a bond is broken in the covalent bond network.
def cap(v, l): s = str(v) return s if len(s) <= l else s[-l:]
Shortens string is above certain length.
def find_atoms_within_distance(atoms, cutoff_distance, point): return [x for x in atoms if distance(x, point) <= cutoff_distance]
Returns atoms within the distance from the point. Parameters ---------- atoms : [ampal.atom] A list of `ampal.atoms`. cutoff_distance : float Maximum distance from point. point : (float, float, float) Reference point, 3D coordinate. Returns ------- filtered_atoms : [ampal.atoms] `atoms` list filtered by distance.
def centre_of_atoms(atoms, mass_weighted=True): points = [x._vector for x in atoms] if mass_weighted: masses = [x.mass for x in atoms] else: masses = [] return centre_of_mass(points=points, masses=masses)
Returns centre point of any list of atoms. Parameters ---------- atoms : list List of AMPAL atom objects. mass_weighted : bool, optional If True returns centre of mass, otherwise just geometric centre of points. Returns ------- centre_of_mass : numpy.array 3D coordinate for the centre of mass.
def update_ff(self, ff, mol2=False, force_ff_assign=False): aff = False if force_ff_assign: aff = True elif 'assigned_ff' not in self.tags: aff = True elif not self.tags['assigned_ff']: aff = True if aff: self.assign_force_field(ff, mol2=mol2) return
Manages assigning the force field parameters. The aim of this method is to avoid unnecessary assignment of the force field. Parameters ---------- ff: BuffForceField The force field to be used for scoring. mol2: bool, optional If true, mol2 style labels will also be used. force_ff_assign: bool, optional If true, the force field will be completely reassigned, ignoring the cached parameters.
def get_internal_energy(self, assign_ff=True, ff=None, mol2=False, force_ff_assign=False): if not ff: ff = global_settings['buff']['force_field'] if assign_ff: self.update_ff(ff, mol2=mol2, force_ff_assign=force_ff_assign) interactions = find_intra_ampal(self, ff.distance_cutoff) buff_score = score_interactions(interactions, ff) return buff_score
Calculates the internal energy of the AMPAL object. This method is assigned to the buff_internal_energy property, using the default arguments. Parameters ---------- assign_ff: bool, optional If true the force field will be updated if required. ff: BuffForceField, optional The force field to be used for scoring. mol2: bool, optional If true, mol2 style labels will also be used. force_ff_assign: bool, optional If true, the force field will be completely reassigned, ignoring the cached parameters. Returns ------- BUFF_score: BUFFScore A BUFFScore object with information about each of the interactions and the atoms involved.
def rotate(self, angle, axis, point=None, radians=False, inc_alt_states=True): q = Quaternion.angle_and_axis(angle=angle, axis=axis, radians=radians) for atom in self.get_atoms(inc_alt_states=inc_alt_states): atom._vector = q.rotate_vector(v=atom._vector, point=point) return
Rotates every atom in the AMPAL object. Parameters ---------- angle : float Angle that AMPAL object will be rotated. axis : 3D Vector (tuple, list, numpy.array) Axis about which the AMPAL object will be rotated. point : 3D Vector (tuple, list, numpy.array), optional Point that the axis lies upon. If `None` then the origin is used. radians : bool, optional True is `angle` is define in radians, False is degrees. inc_alt_states : bool, optional If true, will rotate atoms in all states i.e. includes alternate conformations for sidechains.
def translate(self, vector, inc_alt_states=True): vector = numpy.array(vector) for atom in self.get_atoms(inc_alt_states=inc_alt_states): atom._vector += vector return
Translates every atom in the AMPAL object. Parameters ---------- vector : 3D Vector (tuple, list, numpy.array) Vector used for translation. inc_alt_states : bool, optional If true, will rotate atoms in all states i.e. includes alternate conformations for sidechains.
def rmsd(self, other, backbone=False): assert type(self) == type(other) if backbone and hasattr(self, 'backbone'): points1 = self.backbone.get_atoms() points2 = other.backbone.get_atoms() else: points1 = self.get_atoms() points2 = other.get_atoms() points1 = [x._vector for x in points1] points2 = [x._vector for x in points2] return rmsd(points1=points1, points2=points2)
Calculates the RMSD between two AMPAL objects. Notes ----- No fitting operation is performs and both AMPAL objects must have the same number of atoms. Parameters ---------- other : AMPAL Object Any AMPAL object with `get_atoms` method. backbone : bool, optional Calculates RMSD of backbone only.
def append(self, item): if isinstance(item, Monomer): self._monomers.append(item) else: raise TypeError( 'Only Monomer objects can be appended to an Polymer.') return
Appends a `Monomer to the `Polymer`. Notes ----- Does not update labelling.
def extend(self, polymer): if isinstance(polymer, Polymer): self._monomers.extend(polymer) else: raise TypeError( 'Only Polymer objects may be merged with a Polymer using unary operator "+".') return
Extends the `Polymer` with the contents of another `Polymer`. Notes ----- Does not update labelling.
def get_monomers(self, ligands=True): if ligands and self.ligands: monomers = self._monomers + self.ligands._monomers else: monomers = self._monomers return iter(monomers)
Retrieves all the `Monomers` from the AMPAL object. Parameters ---------- ligands : bool, optional If true, will include ligand `Monomers`.
def get_atoms(self, ligands=True, inc_alt_states=False): if ligands and self.ligands: monomers = self._monomers + self.ligands._monomers else: monomers = self._monomers atoms = itertools.chain( *(list(m.get_atoms(inc_alt_states=inc_alt_states)) for m in monomers)) return atoms
Flat list of all the Atoms in the Polymer. Parameters ---------- inc_alt_states : bool If true atoms from alternate conformations are included rather than only the "active" states. Returns ------- atoms : itertools.chain Returns an iterator of all the atoms. Convert to list if you require indexing.
def relabel_monomers(self, labels=None): if labels: if len(self._monomers) == len(labels): for monomer, label in zip(self._monomers, labels): monomer.id = str(label) else: error_string = ( 'Number of Monomers ({}) and number of labels ' '({}) must be equal.') raise ValueError(error_string.format( len(self._monomers), len(labels))) else: for i, monomer in enumerate(self._monomers): monomer.id = str(i + 1) return
Relabels the either in numerically or using a list of labels. Parameters ---------- labels : list, optional A list of new labels. Raises ------ ValueError Raised if the number of labels does not match the number of component Monoer objects.
def relabel_atoms(self, start=1): counter = start for atom in self.get_atoms(): atom.id = counter counter += 1 return
Relabels all `Atoms` in numerical order. Parameters ---------- start : int, optional Offset the labelling by `start` residues.
def make_pdb(self, alt_states=False, inc_ligands=True): if any([False if x.id else True for x in self._monomers]): self.relabel_monomers() if self.ligands and inc_ligands: monomers = self._monomers + self.ligands._monomers else: monomers = self._monomers pdb_str = write_pdb(monomers, self.id, alt_states=alt_states) return pdb_str
Generates a PDB string for the `Polymer`. Parameters ---------- alt_states : bool, optional Include alternate conformations for `Monomers` in PDB. inc_ligands : bool, optional Includes `Ligands` in PDB. Returns ------- pdb_str : str String of the pdb for the `Polymer`. Generated using information from the component `Monomers`.
def make_pdb(self): pdb_str = write_pdb( [self], ' ' if not self.ampal_parent else self.ampal_parent.id) return pdb_str
Generates a PDB string for the `Monomer`.
def unique_id(self): chain = self.ampal_parent.ampal_parent.id residue = self.ampal_parent.id return chain, residue, self.id
Creates a unique ID for the `Atom` based on its parents. Returns ------- unique_id : (str, str, str) (polymer.id, residue.id, atom.id)
def rotate(self, angle, axis, point=None, radians=False): q = Quaternion.angle_and_axis(angle=angle, axis=axis, radians=radians) self._vector = q.rotate_vector(v=self._vector, point=point) return
Rotates `Atom` by `angle`. Parameters ---------- angle : float Angle that `Atom` will be rotated. axis : 3D Vector (tuple, list, numpy.array) Axis about which the `Atom` will be rotated. point : 3D Vector (tuple, list, numpy.array), optional Point that the `axis` lies upon. If `None` then the origin is used. radians : bool, optional True is `angle` is define in radians, False is degrees.
def translate(self, vector): vector = numpy.array(vector) for atom in self.get_atoms(inc_alt_states=inc_alt_states): atom._vector += vector return self._vector += numpy.array(vector) return
Translates `Atom`. Parameters ---------- vector : 3D Vector (tuple, list, numpy.array) Vector used for translation. inc_alt_states : bool, optional If true, will rotate atoms in all states i.e. includes alternate conformations for sidechains.
def parse_PISCES_output(pisces_output, path=False): pisces_dict = {} if path: pisces_path = Path(pisces_output) pisces_content = pisces_path.read_text().splitlines()[1:] else: pisces_content = pisces_output.splitlines()[1:] for line in pisces_content: pdb = line.split()[0][:4].lower() chain = line.split()[0][-1] pdb_dict = {'length': line.split()[1], 'method': line.split()[2], 'resolution': line.split()[3], 'R-factor': line.split()[4], 'R-free': line.split()[5]} if pdb in pisces_dict: pisces_dict[pdb]['chains'].append(chain) else: pdb_dict['chains'] = [chain] pisces_dict[pdb] = pdb_dict return pisces_dict
Takes the output list of a PISCES cull and returns in a usable dictionary. Notes ----- Designed for outputs of protein sequence redundancy culls conducted using the PISCES server. http://dunbrack.fccc.edu/PISCES.php G. Wang and R. L. Dunbrack, Jr. PISCES: a protein sequence culling server. Bioinformatics, 19:1589-1591, 2003. Parameters ---------- pisces_output : str or path Output list of non-redundant protein chains from PISCES, or path to text file. path : bool True if path given rather than string. Returns ------- pisces_dict : dict Data output by PISCES in dictionary form.
def download_decode(URL, encoding='utf-8', verbose=True): if verbose: print("Downloading data from " + URL) req = Request(URL) try: with urlopen(req) as u: decoded_file = u.read().decode(encoding) except URLError as e: if hasattr(e, 'reason'): print('Server could not be reached.') print('Reason: ', e.reason) elif hasattr(e, 'code'): print('The server couldn\'t fulfill the request.') print('Error code: ', e.code) return None return decoded_file
Downloads data from URL and returns decoded contents.
def olderado_best_model(pdb_id): pdb_code = pdb_id[:4].lower() olderado_url = 'http://www.ebi.ac.uk/pdbe/nmr/olderado/searchEntry?pdbCode=' + pdb_code olderado_page = download_decode(olderado_url, verbose=False) if olderado_page: parsed_page = BeautifulSoup(olderado_page, 'html.parser') else: return None try: best_model = parsed_page.find_all('td')[1] except IndexError: print("No model info could be found for {0} - ensure that it's an NMR structure.".format(pdb_id)) return None try: model_no = int(best_model.string) except ValueError as v: print("Did not find a number for best model.") raise v return model_no
Checks the Olderado web server and returns the most representative conformation for PDB NMR structures. Notes ----- Uses OLDERADO from the EBI. See http://www.ebi.ac.uk/pdbe/nmr/olderado/ and citations therein. Parameters ---------- pdb_id : str The 4-character PDB code for the NMR structure of interest. Returns ------- model_no : int The conformation number of the most-representative conformation. Raises ------ ValueError If the model number it finds is not an integer. This might indicate that the website format has changed.
def buff_eval(params): specification, sequence, parsed_ind = params model = specification(*parsed_ind) model.build() model.pack_new_sequences(sequence) return model.buff_interaction_energy.total_energy
Builds and evaluates BUFF energy of model in parallelization Parameters ---------- params: list Tuple containing the specification to be built, the sequence, and the parameters for model building. Returns ------- model.bude_score: float BUFF score for model to be assigned to particle fitness value.
def buff_internal_eval(params): specification, sequence, parsed_ind = params model = specification(*parsed_ind) model.build() model.pack_new_sequences(sequence) return model.buff_internal_energy.total_energy
Builds and evaluates BUFF internal energy of a model in parallelization Parameters ---------- params: list Tuple containing the specification to be built, the sequence and the parameters for model building. Returns ------- model.bude_score: float BUFF internal energy score to be assigned to particle fitness value.
def rmsd_eval(rmsd_params): specification, sequence, parsed_ind, reference_pdb = rmsd_params model = specification(*parsed_ind) model.pack_new_sequences(sequence) ca, bb, aa = run_profit(model.pdb, reference_pdb, path1=False, path2=False) return bb
Builds a model and runs profit against a reference model. Parameters ---------- rmsd_params Returns ------- rmsd: float rmsd against reference model as calculated by profit.
def comparator_eval(comparator_params): top1, top2, params1, params2, seq1, seq2, movements = comparator_params xrot, yrot, zrot, xtrans, ytrans, ztrans = movements obj1 = top1(*params1) obj2 = top2(*params2) obj2.rotate(xrot, [1, 0, 0]) obj2.rotate(yrot, [0, 1, 0]) obj2.rotate(zrot, [0, 0, 1]) obj2.translate([xtrans, ytrans, ztrans]) model = obj1 + obj2 model.relabel_all() model.pack_new_sequences(seq1 + seq2) return model.buff_interaction_energy.total_energy
Gets BUFF score for interaction between two AMPAL objects
def parse_individual(self, individual): scaled_ind = [] for i in range(len(self._params['value_means'])): scaled_ind.append(self._params['value_means'][i] + ( individual[i] * self._params['value_ranges'][i])) fullpars = list(self._params['arrangement']) for k in range(len(self._params['variable_parameters'])): for j in range(len(fullpars)): if fullpars[j] == self._params['variable_parameters'][k]: fullpars[j] = scaled_ind[k] return fullpars
Converts a deap individual into a full list of parameters. Parameters ---------- individual: deap individual from optimization Details vary according to type of optimization, but parameters within deap individual are always between -1 and 1. This function converts them into the values used to actually build the model Returns ------- fullpars: list Full parameter list for model building.
def parameters(self, sequence, value_means, value_ranges, arrangement): self._params['sequence'] = sequence self._params['value_means'] = value_means self._params['value_ranges'] = value_ranges self._params['arrangement'] = arrangement if any(x <= 0 for x in self._params['value_ranges']): raise ValueError("range values must be greater than zero") self._params['variable_parameters'] = [] for i in range(len(self._params['value_means'])): self._params['variable_parameters'].append( "".join(['var', str(i)])) if len(set(arrangement).intersection( self._params['variable_parameters'])) != len( self._params['value_means']): raise ValueError("argument mismatch!") if len(self._params['value_ranges']) != len( self._params['value_means']): raise ValueError("argument mismatch!")
Relates the individual to be evolved to the full parameter string. Parameters ---------- sequence: str Full amino acid sequence for specification object to be optimized. Must be equal to the number of residues in the model. value_means: list List containing mean values for parameters to be optimized. value_ranges: list List containing ranges for parameters to be optimized. Values must be positive. arrangement: list Full list of fixed and variable parameters for model building. Fixed values are the appropriate value. Values to be varied should be listed as 'var0', 'var1' etc, and must be in ascending numerical order. Variables can be repeated if required.
def log_results(self): best_ind = self.halloffame[0] model_params = self.parse_individual( best_ind) # need to change name of 'params' with open( '{0}{1}_log.txt'.format( self._params['output_path'], self._params['run_id']), 'a+') as log_file: log_file.write('\nEvaluated {0} models in total\n'.format( self._params['model_count'])) log_file.write('Run ID is {0}\n'.format(self._params['run_id'])) log_file.write('Best fitness is {0}\n'.format( self.halloffame[0].fitness)) log_file.write( 'Parameters of best model are {0}\n'.format(model_params)) log_file.write( 'Best individual is {0}\n'.format(self.halloffame[0])) for i, entry in enumerate(self.halloffame[0]): if entry > 0.95: log_file.write( "Warning! Parameter {0} is at or near maximum allowed " "value\n".format(i + 1)) elif entry < -0.95: log_file.write( "Warning! Parameter {0} is at or near minimum allowed " "value\n".format(i + 1)) log_file.write('Minimization history: \n{0}'.format(self.logbook)) with open('{0}{1}_bestmodel.pdb'.format( self._params['output_path'], self._params['run_id']), 'w') as output_file: model = self._params['specification'](*model_params) model.build() model.pack_new_sequences(self._params['sequence']) output_file.write(model.pdb)
Saves files for the minimization. Notes ----- Currently saves a logfile with best individual and a pdb of the best model.
def best_model(self): if hasattr(self, 'halloffame'): model = self._params['specification']( *self.parse_individual(self.halloffame[0])) model.pack_new_sequences(self._params['sequence']) return model else: raise NameError('No best model found, have you ran the optimiser?')
Rebuilds the top scoring model from an optimisation. Returns ------- model: AMPAL Returns an AMPAL model of the top scoring parameters. Raises ------ NameError: Raises a name error if the optimiser has not been run.
def make_energy_funnel_data(self, cores=1): if not self.parameter_log: raise AttributeError( 'No parameter log data to make funnel, have you ran the ' 'optimiser?') model_cls = self._params['specification'] gen_tagged = [] for gen, models in enumerate(self.parameter_log): for model in models: gen_tagged.append((model[0], model[1], gen)) sorted_pps = sorted(gen_tagged, key=lambda x: x[1]) top_result = sorted_pps[0] top_result_model = model_cls(*top_result[0]) if (cores == 1) or (sys.platform == 'win32'): energy_rmsd_gen = map( self.funnel_rebuild, [(x, top_result_model, self._params['specification']) for x in sorted_pps[1:]]) else: with futures.ProcessPoolExecutor( max_workers=self._params['processors']) as executor: energy_rmsd_gen = executor.map( self.funnel_rebuild, [(x, top_result_model, self._params['specification']) for x in sorted_pps[1:]]) return list(energy_rmsd_gen)
Compares models created during the minimisation to the best model. Returns ------- energy_rmsd_gen: [(float, float, int)] A list of triples containing the BUFF score, RMSD to the top model and generation of a model generated during the minimisation.
def funnel_rebuild(psg_trm_spec): param_score_gen, top_result_model, specification = psg_trm_spec params, score, gen = param_score_gen model = specification(*params) rmsd = top_result_model.rmsd(model) return rmsd, score, gen
Rebuilds a model and compares it to a reference model. Parameters ---------- psg_trm: (([float], float, int), AMPAL, specification) A tuple containing the parameters, score and generation for a model as well as a model of the best scoring parameters. Returns ------- energy_rmsd_gen: (float, float, int) A triple containing the BUFF score, RMSD to the top model and generation of a model generated during the minimisation.
def initialize_pop(self): self.toolbox.register("individual", self.generate) self.toolbox.register("population", tools.initRepeat, list, self.toolbox.individual) self.population = self.toolbox.population(n=self._params['popsize']) if self._params['neighbours']: for i in range(len(self.population)): self.population[i].ident = i self.population[i].neighbours = list( set( [(i - x) % len(self.population) for x in range(1, self._params['neighbours'] + 1)] + [(i + x) % len(self.population) for x in range(1, self._params['neighbours'] + 1)] )) self.assign_fitnesses(self.population)
Assigns indices to individuals in population.
def update_pop(self): candidates = [] for ind in self.population: candidates.append(self.crossover(ind)) self._params['model_count'] += len(candidates) self.assign_fitnesses(candidates) for i in range(len(self.population)): if candidates[i].fitness > self.population[i].fitness: self.population[i] = candidates[i]
Updates the population according to crossover and fitness criteria.
def initialize_pop(self): self.population = self.toolbox.swarm(n=self._params['popsize']) if self._params['neighbours']: for i in range(len(self.population)): self.population[i].ident = i self.population[i].neighbours = list( set( [(i - x) % len(self.population) for x in range(1, self._params['neighbours'] + 1)] + [i] + [(i + x) % len(self.population) for x in range(1, self._params['neighbours'] + 1)] )) else: for i in range(len(self.population)): self.population[i].ident = i self.population[i].neighbours = [ x for x in range(len(self.population))] self.assign_fitnesses(self.population) for part in self.population: part.best = creator.Particle(part) part.best.fitness.values = part.fitness.values
Generates initial population with random positions and speeds.
def generate(self): part = creator.Particle( [random.uniform(-1, 1) for _ in range(len(self._params['value_means']))]) part.speed = [ random.uniform(-self._params['max_speed'], self._params['max_speed']) for _ in range(len(self._params['value_means']))] part.smin = -self._params['max_speed'] part.smax = self._params['max_speed'] part.ident = None part.neighbours = None return part
Generates a particle using the creator function. Notes ----- Position and speed are uniformly randomly seeded within allowed bounds. The particle also has speed limit settings taken from global values. Returns ------- particle object
def update_pop(self): valid_particles = [] invalid_particles = [] for part in self.population: if any(x > 1 or x < -1 for x in part): invalid_particles.append(part) else: valid_particles.append(part) self._params['model_count'] += len(valid_particles) for part in valid_particles: self.update_particle(part) self.assign_fitnesses(valid_particles) for part in valid_particles: if part.fitness > part.best.fitness: part.best = creator.Particle(part) part.best.fitness = part.fitness for part in invalid_particles: self.update_particle(part) self.population[:] = valid_particles + invalid_particles self.population.sort(key=lambda x: x.ident)
Assigns fitnesses to particles that are within bounds.
def initialize_pop(self): self.toolbox.register("individual", self.generate) self.toolbox.register("population", tools.initRepeat, list, self.toolbox.individual) self.population = self.toolbox.population(n=self._params['popsize']) self.assign_fitnesses(self.population) self._params['model_count'] += len(self.population)
Assigns initial fitnesses.
def initialize_pop(self): self.initialize_cma_es( sigma=self._params['sigma'], weights=self._params['weights'], lambda_=self._params['popsize'], centroid=[0] * len(self._params['value_means'])) self.toolbox.register("individual", self.make_individual) self.toolbox.register("generate", self.generate, self.toolbox.individual) self.toolbox.register("population", tools.initRepeat, list, self.initial_individual) self.toolbox.register("update", self.update) self.population = self.toolbox.population(n=self._params['popsize']) self.assign_fitnesses(self.population) self._params['model_count'] += len(self.population)
Generates the initial population and assigns fitnesses.
def initial_individual(self): ind = creator.Individual( [random.uniform(-1, 1) for _ in range(len(self._params['value_means']))]) return ind
Generates an individual with random parameters within bounds.
def computeParams(self, params): self.mu = params.get("mu", int(self.lambda_ / 2)) rweights = params.get("weights", "superlinear") if rweights == "superlinear": self.weights = numpy.log(self.mu + 0.5) - \ numpy.log(numpy.arange(1, self.mu + 1)) elif rweights == "linear": self.weights = self.mu + 0.5 - numpy.arange(1, self.mu + 1) elif rweights == "equal": self.weights = numpy.ones(self.mu) else: raise RuntimeError("Unknown weights : %s" % rweights) self.weights /= sum(self.weights) self.mueff = 1. / sum(self.weights ** 2) self.cc = params.get("ccum", 4. / (self.dim + 4.)) self.cs = params.get("cs", (self.mueff + 2.) / (self.dim + self.mueff + 3.)) self.ccov1 = params.get( "ccov1", 2. / ((self.dim + 1.3) ** 2 + self.mueff)) self.ccovmu = params.get("ccovmu", 2. * ( self.mueff - 2. + 1. / self.mueff) / ( (self.dim + 2.) ** 2 + self.mueff)) self.ccovmu = min(1 - self.ccov1, self.ccovmu) self.damps = 1. + 2. * \ max(0, numpy.sqrt((self.mueff - 1.) / (self.dim + 1.)) - 1.) + \ self.cs self.damps = params.get("damps", self.damps) return
Computes the parameters depending on :math:`\lambda`. Notes ----- It needs to be called again if :math:`\lambda` changes during evolution. Parameters ---------- params: A dictionary of the manually set parameters.
def randomise_proposed_value(self): if self.parameter_type is MMCParameterType.UNIFORM_DIST: (a, b) = self.static_dist_or_list self.proposed_value = random.uniform(a, b) elif self.parameter_type is MMCParameterType.NORMAL_DIST: (mu, sigma) = self.static_dist_or_list self.proposed_value = random.normalvariate(mu, sigma) elif self.parameter_type is MMCParameterType.DISCRETE_RANGE: (min_v, max_v, step) = self.static_dist_or_list self.proposed_value = random.choice( numpy.arange(min_v, max_v, step)) elif self.parameter_type is MMCParameterType.LIST: self.proposed_value = random.choice(self.static_dist_or_list) elif self.parameter_type is MMCParameterType.STATIC_VALUE: raise TypeError('This value is static, it cannot be mutated.') else: raise TypeError( 'Cannot randomise this parameter, unknown parameter type.') return
Creates a randomly the proposed value. Raises ------ TypeError Raised if this method is called on a static value. TypeError Raised if the parameter type is unknown.
def accept_proposed_value(self): if self.proposed_value is not None: self.current_value = self.proposed_value self.proposed_value = None return
Changes the current value to the proposed value.