text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Entry point for package and cli uses
<END_TASK>
<USER_TASK:>
Description:
def _run():
"""Entry point for package and cli uses""" |
args = parse_args()
# parse custom parameters
custom_meta = None
if args.custom_meta:
print "Adding custom parameters:"
custom_meta = {}
try:
for item in args.custom_meta.split(','):
key, value = item.split(':')
custom_meta[key] = value
print 'key: %s, value: %s' % (key, value)
except Exception as e:
sys.stderr.write("ERROR: Can not parse custom meta tags! %s\n" % (str(e)))
# we need to store some persistent info, so check if a config file
# exists (default location is ~/.centinel/config.ini). If the file
# does not exist, then create a new one at run time
configuration = centinel.config.Configuration()
if args.config:
configuration.parse_config(args.config)
else:
# if the file does not exist, then the default config file
# will be used
new_configuration = None
if os.path.exists(DEFAULT_CONFIG_FILE):
configuration.parse_config(DEFAULT_CONFIG_FILE)
else:
print 'Configuration file does not exist. Creating a new one.'
new_configuration = centinel.config.Configuration()
if not ('version' in configuration.params and
configuration.params['version']['version'] == centinel.__version__):
if not args.update_config:
print ('WARNING: configuration file is from '
'a different version (%s) of '
'Centinel. Run with --update-config to update '
'it.' % (configuration.params['version']['version']))
else:
new_configuration = centinel.config.Configuration()
backup_path = DEFAULT_CONFIG_FILE + ".old"
new_configuration.update(configuration, backup_path)
if new_configuration is not None:
configuration = new_configuration
configuration.write_out_config(DEFAULT_CONFIG_FILE)
print 'New configuration written to %s' % (DEFAULT_CONFIG_FILE)
if args.update_config:
sys.exit(0)
if args.verbose:
if 'log' not in configuration.params:
configuration.params['log'] = dict()
configuration.params['log']['log_level'] = logging.DEBUG
# add custom meta values from CLI
if custom_meta is not None:
if 'custom_meta' in configuration.params:
configuration.params['custom_meta'].update(custom_meta)
else:
configuration.params['custom_meta'] = custom_meta
centinel.conf = configuration.params
client = centinel.client.Client(configuration.params)
client.setup_logging()
# disable cert verification if the flag is set
if args.no_verify:
configuration.params['server']['verify'] = False
user = centinel.backend.User(configuration.params)
# Note: because we have mutually exclusive arguments, we don't
# have to worry about multiple arguments being called
if args.sync:
centinel.backend.sync(configuration.params)
elif args.consent:
user.informed_consent()
elif args.daemonize:
# if we don't have a valid binary location, then exit
if not os.path.exists(args.binary):
print "Error: no binary found to daemonize"
exit(1)
centinel.daemonize.daemonize(args.auto_update, args.binary,
args.user)
else:
client.run() |
<SYSTEM_TASK:>
This is a parallel version of the TLS fingerprint primitive.
<END_TASK>
<USER_TASK:>
Description:
def get_fingerprint_batch(input_list, results={}, default_port=443,
delay_time=0.5, max_threads=100):
"""
This is a parallel version of the TLS fingerprint primitive.
:param input_list: the input is a list of host:ports.
:param default_port: default port to use when no port specified
:param delay_time: delay before starting each thread
:param max_threads: maximum number of concurrent threads
:return:
""" |
threads = []
thread_error = False
thread_wait_timeout = 200
ind = 1
total_item_count = len(input_list)
for row in input_list:
if len(row.split(":")) == 2:
host, port = row.split(":")
elif len(row.split(":")) == 1:
host = row
port = default_port
else:
continue
port = int(port)
wait_time = 0
while threading.active_count() > max_threads:
time.sleep(1)
wait_time += 1
if wait_time > thread_wait_timeout:
thread_error = True
break
if thread_error:
results["error"] = "Threads took too long to finish."
break
# add just a little bit of delay before starting the thread
# to avoid overwhelming the connection.
time.sleep(delay_time)
log_prefix = "%d/%d: " % (ind, total_item_count)
thread = threading.Thread(target=get_fingerprint,
args=(host, port,
results, log_prefix))
ind += 1
thread.setDaemon(1)
thread_open_success = False
retries = 0
while not thread_open_success and retries < MAX_THREAD_START_RETRY:
try:
thread.start()
threads.append(thread)
thread_open_success = True
except:
retries += 1
time.sleep(THREAD_START_DELAY)
logging.error("%sThread start failed for %s, retrying... (%d/%d)" % (log_prefix, host, retries, MAX_THREAD_START_RETRY))
if retries == MAX_THREAD_START_RETRY:
logging.error("%sCan't start a new thread for %s after %d retries." % (log_prefix, host, retries))
for thread in threads:
thread.join(thread_wait_timeout)
return results |
<SYSTEM_TASK:>
Returns redirecting URL if there is a HTML refresh meta tag,
<END_TASK>
<USER_TASK:>
Description:
def meta_redirect(content):
"""
Returns redirecting URL if there is a HTML refresh meta tag,
returns None otherwise
:param content: HTML content
""" |
decoded = content.decode("utf-8", errors="replace")
try:
soup = BeautifulSoup.BeautifulSoup(decoded)
except Exception as e:
return None
result = soup.find("meta", attrs={"http-equiv": re.compile("^refresh$", re.I)})
if result:
try:
wait, text = result["content"].split(";")
text = text.strip()
if text.lower().startswith("url="):
url = text[4:]
return url
except:
# there are normal meta tag with refresh that are not
# redirect and don't have a URL in it
pass
return None |
<SYSTEM_TASK:>
Actually gets the http. Moved this to it's own private method since
<END_TASK>
<USER_TASK:>
Description:
def _get_http_request(netloc, path="/", headers=None, ssl=False):
"""
Actually gets the http. Moved this to it's own private method since
it is called several times for following redirects
:param host:
:param path:
:param headers:
:param ssl:
:return:
""" |
if ssl:
port = 443
else:
port = 80
host = netloc
if len(netloc.split(":")) == 2:
host, port = netloc.split(":")
request = {"host": host,
"port": port,
"path": path,
"ssl": ssl,
"method": "GET"}
if headers:
request["headers"] = headers
response = {}
try:
conn = ICHTTPConnection(host=host, port=port, timeout=10)
conn.request(path, headers, ssl, timeout=10)
response["status"] = conn.status
response["reason"] = conn.reason
response["headers"] = conn.headers
body = conn.body
try:
response["body"] = body.encode('utf-8')
except UnicodeDecodeError:
# if utf-8 fails to encode, just use base64
response["body.b64"] = body.encode('base64')
except Exception as err:
response["failure"] = str(err)
result = {"response": response,
"request": request}
return result |
<SYSTEM_TASK:>
This is a parallel version of the HTTP GET primitive.
<END_TASK>
<USER_TASK:>
Description:
def get_requests_batch(input_list, results={}, delay_time=0.5, max_threads=100):
"""
This is a parallel version of the HTTP GET primitive.
:param input_list: the input is a list of either dictionaries containing
query information, or just domain names (and NOT URLs).
:param delay_time: delay before starting each thread
:param max_threads: maximum number of concurrent threads
:return: results in dict format
Note: the input list can look like this:
[
{ "host": "www.google.com", "path": "/", "headers": {},
"ssl": False, "url": "http://www.google.com/" },
"www.twitter.com",
"www.youtube.com",
{ "host": "www.facebook.com", "path": "/", "headers": {},
"ssl": True, "url": "http://www.facebook.com" },
...
]
""" |
threads = []
thread_error = False
thread_wait_timeout = 200
ind = 1
total_item_count = len(input_list)
# randomly select one user agent for one input list
user_agent = random.choice(user_agent_pool)
for row in input_list:
headers = {}
path = "/"
ssl = False
theme = "http"
if type(row) is dict:
if "host" not in row:
continue
host = row["host"]
if "path" in row:
path = row["path"]
if "headers" in row:
if type(row["headers"]) is dict:
headers = row["headers"]
if "ssl" in row:
ssl = row["ssl"]
theme = "https"
if "url" in row:
url = row["url"]
else:
url = "%s://%s%s" % (theme, host, path)
else:
host = row
url = "%s://%s%s" % (theme, host, path)
wait_time = 0
while threading.active_count() > max_threads:
time.sleep(1)
wait_time += 1
if wait_time > thread_wait_timeout:
thread_error = True
break
if thread_error:
results["error"] = "Threads took too long to finish."
break
if "User-Agent" not in headers:
headers["User-Agent"] = user_agent
# add just a little bit of delay before starting the thread
# to avoid overwhelming the connection.
time.sleep(delay_time)
log_prefix = "%d/%d: " % (ind, total_item_count)
thread = threading.Thread(target=get_request,
args=(host, path, headers, ssl,
results, url, log_prefix))
ind += 1
thread.setDaemon(1)
thread_open_success = False
retries = 0
while not thread_open_success and retries < MAX_THREAD_START_RETRY:
try:
thread.start()
threads.append(thread)
thread_open_success = True
except:
retries += 1
time.sleep(THREAD_START_DELAY)
logging.error("%sThread start failed for %s, retrying... (%d/%d)" % (log_prefix, url, retries, MAX_THREAD_START_RETRY))
if retries == MAX_THREAD_START_RETRY:
logging.error("%sCan't start a new thread for %s after %d retries." % (log_prefix, url, retries))
for thread in threads:
thread.join(thread_wait_timeout)
return results |
<SYSTEM_TASK:>
Create a script with the given content, mv it to the
<END_TASK>
<USER_TASK:>
Description:
def create_script_for_location(content, destination):
"""Create a script with the given content, mv it to the
destination, and make it executable
Parameters:
content- the content to put in the script
destination- the directory to copy to
Note: due to constraints on os.rename, destination must be an
absolute path to a file, not just a directory
""" |
temp = tempfile.NamedTemporaryFile(mode='w', delete=False)
temp.write(content)
temp.close()
shutil.move(temp.name, destination)
cur_perms = os.stat(destination).st_mode
set_perms = cur_perms | stat.S_IXOTH | stat.S_IXGRP | stat.S_IXUSR
os.chmod(destination, set_perms) |
<SYSTEM_TASK:>
Create crontab entries to run centinel every hour and
<END_TASK>
<USER_TASK:>
Description:
def daemonize(package, bin_loc, user):
"""Create crontab entries to run centinel every hour and
autoupdate every day
Parameters:
package- name of the currently installed package (will be used for
autoupdate). If this parameter is None, the autoupdater will
not be used
bin_loc- location of the centinel binary/script.
Note: this works by creating temporary files, adding the content
of the cron scripts to these temporary files, moving these files
into the appropriate cron folders, and making these scripts
executable
Note: if the script already exists, this will delete it
""" |
path = "/etc/cron.hourly/centinel-" + user
if user != "root":
# create a script to run centinel every hour as the current user
hourly = "".join(["#!/bin/bash\n",
"# cron job for centinel\n",
"su ", user, " -c '", bin_loc, " --sync'\n",
"su ", user, " -c '", bin_loc, "'\n",
"su ", user, " -c '", bin_loc, " --sync'\n"])
else:
# create a script to run centinel every hour as root
hourly = "".join(["#!/bin/bash\n",
"# cron job for centinel\n",
bin_loc, " --sync\n",
bin_loc, "\n",
bin_loc, " --sync\n"])
create_script_for_location(hourly, path)
# create a script to get the client to autoupdate every day
if package is None:
return
updater = "".join(["#!/bin/bash\n",
"# autoupdater for centinel\n"
"sudo pip install --upgrade ", package, "\n"])
create_script_for_location(updater, "/etc/cron.daily/centinel-autoupdate")
print "Successfully created cron jobs for user " + user |
<SYSTEM_TASK:>
Create all available VPN configuration files in the given directory
<END_TASK>
<USER_TASK:>
Description:
def create_config_files(directory):
"""Create all available VPN configuration files in the given directory
Note: I am basically just following along with what their script
client does
""" |
# get the config file template
template_url = ("https://securenetconnection.com/vpnconfig/"
"openvpn-template.ovpn")
resp = requests.get(template_url)
resp.raise_for_status()
template = resp.content
# get the available servers and create a config file for each server
server_url = ("https://securenetconnection.com/vpnconfig/"
"servers-cli.php")
resp = requests.get(server_url)
resp.raise_for_status()
servers = resp.content.split("\n")
if not os.path.exists(directory):
os.makedirs(directory)
with open(os.path.join(directory, "servers.txt"), 'w') as f:
f.write(resp.content)
for server_line in servers:
if server_line.strip() == "":
continue
server_line = server_line.split("|")
try:
ip, desc, country, udp_sup, tcp_sup = server_line
except ValueError:
ip, desc, country, udp_sup, tcp_sup, no_rand = server_line
with open(os.path.join(directory, ip + ".ovpn"), 'w') as file_o:
file_o.write(template)
# create tcp if available, else udp
tcp_sup = tcp_sup.strip()
if tcp_sup:
port, proto = 443, "tcp"
else:
port, proto = 53, "udp"
file_o.write("remote {0} {1}\n".format(ip, port))
file_o.write("proto {0}\n".format(proto))
# add automatic dns server update
file_o.write("up /etc/openvpn/update-resolv-conf\n")
file_o.write("down /etc/openvpn/update-resolv-conf\n") |
<SYSTEM_TASK:>
Download the scheduler.info file and perform a smart comparison
<END_TASK>
<USER_TASK:>
Description:
def sync_scheduler(self):
"""Download the scheduler.info file and perform a smart comparison
with what we currently have so that we don't overwrite the
last_run timestamp
To do a smart comparison, we go over each entry in the
server's scheduler file. If a scheduler entry is not present
in the server copy, we delete it in the client copy and if the
scheduler entry is present in the server copy, then we
overwrite the frequency count in the client copy
""" |
# get the server scheduler.info file
url = "%s/%s/%s" % (self.config['server']['server_url'],
"experiments", "scheduler.info")
try:
req = requests.get(url, proxies=self.config['proxy']['proxy'],
auth=self.auth,
verify=self.verify)
req.raise_for_status()
except Exception as exp:
logging.exception("Error trying to download scheduler.info: %s" % exp)
raise exp
try:
server_sched = json.loads(req.content)
except Exception as exp:
logging.exception("Error parsing server scheduler: %s" % exp)
raise exp
sched_filename = os.path.join(self.config['dirs']['experiments_dir'],
'scheduler.info')
if not os.path.exists(sched_filename):
with open(sched_filename, 'w') as file_p:
json.dump(server_sched, file_p, indent=2,
separators=(',', ': '))
return
client_sched = {}
try:
with open(sched_filename, 'r') as file_p:
client_sched = json.load(file_p)
except Exception as exp:
client_sched = {}
logging.exception("Error loading scheduler file: %s" % exp)
logging.info("Making an empty scheduler")
# delete any scheduled tasks as necessary
#
# Note: this looks ugly, but we can't modify dictionaries
# while we iterate over them
client_exp_keys = client_sched.keys()
for exp in client_exp_keys:
if exp not in server_sched:
del client_sched[exp]
# and update all the other frequencies
for exp in server_sched:
if exp in client_sched:
client_sched[exp]['frequency'] = server_sched[exp]['frequency']
else:
client_sched[exp] = server_sched[exp]
# write out the results
with open(sched_filename, 'w') as file_p:
json.dump(client_sched, file_p, indent=2,
separators=(',', ': ')) |
<SYSTEM_TASK:>
Create a URL for the user to give their consent through
<END_TASK>
<USER_TASK:>
Description:
def informed_consent(self):
"""Create a URL for the user to give their consent through""" |
if self.typeable_handle is None:
consent_url = [self.config['server']['server_url'],
"/get_initial_consent?username="]
consent_url.append(urlsafe_b64encode(self.username))
consent_url.append("&password=")
consent_url.append(urlsafe_b64encode(self.password))
else:
consent_url = [self.config['server']['server_url'],
"/consent/"]
consent_url.append(self.typeable_handle)
consent_url = "".join(consent_url)
print "Please go to %s to give your consent." % (consent_url)
return consent_url |
<SYSTEM_TASK:>
Unfortunately, Python is not smart enough to return an absolute
<END_TASK>
<USER_TASK:>
Description:
def return_abs_path(directory, path):
"""
Unfortunately, Python is not smart enough to return an absolute
path with tilde expansion, so I writing functionality to do this
:param directory:
:param path:
:return:
""" |
if directory is None or path is None:
return
directory = os.path.expanduser(directory)
return os.path.abspath(os.path.join(directory, path)) |
<SYSTEM_TASK:>
Entry point for all uses of centinel
<END_TASK>
<USER_TASK:>
Description:
def _run():
"""Entry point for all uses of centinel""" |
args = parse_args()
# register signal handler
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
# set up logging
log_formatter = logging.Formatter("%(asctime)s %(filename)s(line %(lineno)d) "
"%(levelname)s: %(message)s")
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_formatter)
root_logger.addHandler(console_handler)
# add file handler if specified
if args.log_file:
file_handler = logging.FileHandler(args.log_file)
file_handler.setFormatter(log_formatter)
root_logger.addHandler(file_handler)
# check vm_num and vm_index value
if args.vm_num < 1:
print "vm_num value cannot be negative!"
return
if args.vm_index < 1 or args.vm_index > args.vm_num:
print "vm_index value cannot be negative or greater than vm_num!"
return
if args.create_conf_dir:
if args.create_HMA:
hma_dir = return_abs_path(args.create_conf_dir, 'vpns')
hma.create_config_files(hma_dir)
elif args.create_IPVANISH:
ipvanish_dir = return_abs_path(args.create_conf_dir, 'vpns')
ipvanish.create_config_files(ipvanish_dir)
elif args.create_PUREVPN:
purevpn_dir = return_abs_path(args.create_conf_dir, 'vpns')
purevpn.create_config_files(purevpn_dir)
elif args.create_VPNGATE:
vpngate_dir = return_abs_path(args.create_conf_dir, 'vpns')
vpngate.create_config_files(vpngate_dir)
# create the config files for the openvpn config files
create_config_files(args.create_conf_dir)
else:
# sanity check tls_auth and key_direction
if (args.tls_auth is not None and args.key_direction is None) or \
(args.tls_auth is None and args.key_direction is not None):
logging.error("tls_auth and key_direction must be specified "
"together!")
return
scan_vpns(directory=args.directory, auth_file=args.auth_file,
crt_file=args.crt_file, tls_auth=args.tls_auth,
key_direction=args.key_direction, exclude_list=args.exclude_list,
shuffle_lists=args.shuffle_lists, vm_num=args.vm_num,
vm_index=args.vm_index, reduce_vp=args.reduce_vp) |
<SYSTEM_TASK:>
Given a configuration file, read in and interpret the results
<END_TASK>
<USER_TASK:>
Description:
def parse_config(self, config_file):
"""
Given a configuration file, read in and interpret the results
:param config_file:
:return:
""" |
with open(config_file, 'r') as f:
config = json.load(f)
self.params = config
if self.params['proxy']['proxy_type']:
self.params['proxy'] = {self.params['proxy']['proxy_type']:
self.params['proxy']['proxy_url']} |
<SYSTEM_TASK:>
Update the old configuration file with new values.
<END_TASK>
<USER_TASK:>
Description:
def update(self, old, backup_path=None):
"""
Update the old configuration file with new values.
:param old: old configuration to update.
:param backup_path: path to write a backup of the old config file.
:return:
""" |
for category in old.params.keys():
for parameter in old.params[category].keys():
if (category in self.params and parameter in self.params[category] and
(old.params[category][parameter] != self.params[category][parameter]) and
(category != "version")):
print ("Config value '%s.%s' "
"in old configuration is different "
"from the new version\n"
"[old value] = %s\n"
"[new value] = %s"
"" % (category, parameter,
old.params[category][parameter],
self.params[category][parameter]))
answer = raw_input("Do you want to overwrite? ([y]/n) ")
while answer.lower() not in ['y', 'yes', 'n', 'no']:
answer = raw_input("Answer not recongnized. Enter 'y' or 'n'. ")
if answer in ['n', 'no']:
old_value = old.params[category][parameter]
self.params[category][parameter] = old_value
elif not (category in self.params and
parameter in self.params[category]):
print ("Deprecated config option '%s.%s' has "
"been removed." % (category, parameter))
if backup_path is not None:
old.write_out_config(backup_path)
print "Backup saved in %s." % backup_path |
<SYSTEM_TASK:>
Write out the configuration file
<END_TASK>
<USER_TASK:>
Description:
def write_out_config(self, config_file):
"""
Write out the configuration file
:param config_file:
:return:
Note: this will erase all comments from the config file
""" |
with open(config_file, 'w') as f:
json.dump(self.params, f, indent=2,
separators=(',', ': ')) |
<SYSTEM_TASK:>
divide url into host and path two parts
<END_TASK>
<USER_TASK:>
Description:
def divide_url(self, url):
"""
divide url into host and path two parts
""" |
if 'https://' in url:
host = url[8:].split('/')[0]
path = url[8 + len(host):]
elif 'http://' in url:
host = url[7:].split('/')[0]
path = url[7 + len(host):]
else:
host = url.split('/')[0]
path = url[len(host):]
return host, path |
<SYSTEM_TASK:>
Get the md5 sum of each file in the folder and return to the user
<END_TASK>
<USER_TASK:>
Description:
def hash_folder(folder, regex='[!_]*'):
"""
Get the md5 sum of each file in the folder and return to the user
:param folder: the folder to compute the sums over
:param regex: an expression to limit the files we match
:return:
Note: by default we will hash every file in the folder
Note: we will not match anything that starts with an underscore
""" |
file_hashes = {}
for path in glob.glob(os.path.join(folder, regex)):
# exclude folders
if not os.path.isfile(path):
continue
with open(path, 'r') as fileP:
md5_hash = hashlib.md5(fileP.read()).digest()
file_name = os.path.basename(path)
file_hashes[file_name] = urlsafe_b64encode(md5_hash)
return file_hashes |
<SYSTEM_TASK:>
Given a dictionary of file hashes from the client and the
<END_TASK>
<USER_TASK:>
Description:
def compute_files_to_download(client_hashes, server_hashes):
"""
Given a dictionary of file hashes from the client and the
server, specify which files should be downloaded from the server
:param client_hashes: a dictionary where the filenames are keys and the
values are md5 hashes as strings
:param server_hashes: a dictionary where the filenames are keys and the
values are md5 hashes as strings
:return: a list of 2 lists -> [to_dload, to_delete]
to_dload- a list of filenames to get from the server
to_delete- a list of filenames to delete from the folder
Note: we will get a file from the server if a) it is not on the
client or b) the md5 differs between the client and server
Note: we will mark a file for deletion if it is not available on
the server
""" |
to_dload, to_delete = [], []
for filename in server_hashes:
if filename not in client_hashes:
to_dload.append(filename)
continue
if client_hashes[filename] != server_hashes[filename]:
to_dload.append(filename)
for filename in client_hashes:
if filename not in server_hashes:
to_delete.append(filename)
return [to_dload, to_delete] |
<SYSTEM_TASK:>
This function creates a context manager that is used to display a
<END_TASK>
<USER_TASK:>
Description:
def spinner(beep=False, disable=False, force=False):
"""This function creates a context manager that is used to display a
spinner on stdout as long as the context has not exited.
The spinner is created only if stdout is not redirected, or if the spinner
is forced using the `force` parameter.
Parameters
----------
beep : bool
Beep when spinner finishes.
disable : bool
Hide spinner.
force : bool
Force creation of spinner even when stdout is redirected.
Example
-------
with spinner():
do_something()
do_something_else()
""" |
return Spinner(beep, disable, force) |
<SYSTEM_TASK:>
Will ask user to click link to accept app and write code
<END_TASK>
<USER_TASK:>
Description:
def verifier(self, url):
""" Will ask user to click link to accept app and write code """ |
webbrowser.open(url)
print('A browser should have opened up with a link to allow us to access')
print('your account, follow the instructions on the link and paste the verifier')
print('Code into here to give us access, if the browser didn\'t open, the link is:')
print(url)
print()
return input('Verifier: ').lstrip(" ").rstrip(" ") |
<SYSTEM_TASK:>
Read config from file
<END_TASK>
<USER_TASK:>
Description:
def read_config(self):
""" Read config from file """ |
try:
with open(self.config_file, 'r') as f:
self.config = json.loads(f.read())
f.close()
except IOError:
return False
return True |
<SYSTEM_TASK:>
Post note and return the URL of the posted note
<END_TASK>
<USER_TASK:>
Description:
def post_note(self):
""" Post note and return the URL of the posted note """ |
if self.args.note_title:
note_title = self.args.note_title
else:
note_title = None
note_content = self.args.note_content
mynote = self.pump.Note(display_name=note_title, content=note_content)
mynote.to = self.pump.me.followers
mynote.cc = self.pump.Public
mynote.send()
return mynote.id or None |
<SYSTEM_TASK:>
Get the id of a PumpObject.
<END_TASK>
<USER_TASK:>
Description:
def get_obj_id(self, item):
""" Get the id of a PumpObject.
:param item: id string or PumpObject
""" |
if item is not None:
if isinstance(item, six.string_types):
return item
elif hasattr(item, 'id'):
return item.id |
<SYSTEM_TASK:>
Check if we should stop returning objects
<END_TASK>
<USER_TASK:>
Description:
def done(self):
""" Check if we should stop returning objects """ |
if self._done:
return self._done
if self._limit is None:
self._done = False
elif self.itemcount >= self._limit:
self._done = True
return self._done |
<SYSTEM_TASK:>
Build a list of objects from feed's cached items or API page
<END_TASK>
<USER_TASK:>
Description:
def _build_cache(self):
""" Build a list of objects from feed's cached items or API page""" |
self.cache = []
if self.done:
return
for i in (self.get_cached() if self._cached else self.get_page(self.url)):
if not self._cached:
# some objects don't have objectType set (inbox activities)
if not i.get("objectType"):
i["objectType"] = self.feed.object_types[0]
obj = Mapper(pypump=self.feed._pump).get_object(i)
else:
obj = i
self.cache.append(obj)
# ran out of items
if len(self.cache) <= 0:
self._done = True
# check what to do next time
if getattr(self.feed, 'issue65', False):
# work around API bug for favorites feed, see https://github.com/xray7224/PyPump/issues/65
if self._offset is None:
self._offset = 0
self._offset += 20
elif self._since is not None:
if self.feed.links.get('prev'):
self.url = self.feed.links['prev']
del self.feed.links['prev'] # avoid using it again
else:
if self.feed.links.get('next'):
self.url = self.feed.links['next']
del self.feed.links['next'] # avoid using it again
else:
self.url = None |
<SYSTEM_TASK:>
Get a feed's items.
<END_TASK>
<USER_TASK:>
Description:
def items(self, offset=None, limit=20, since=None, before=None, *args, **kwargs):
""" Get a feed's items.
:param offset: Amount of items to skip before returning data
:param since: Return items added after this id (ordered old -> new)
:param before: Return items added before this id (ordered new -> old)
:param limit: Amount of items to return
""" |
return ItemList(self, offset=offset, limit=limit, since=since, before=before, cached=self.is_cached) |
<SYSTEM_TASK:>
Direct inbox feed,
<END_TASK>
<USER_TASK:>
Description:
def direct(self):
""" Direct inbox feed,
contains activities addressed directly to the owner of the inbox.
""" |
url = self._subfeed("direct")
if "direct" in self.url or "major" in self.url or "minor" in self.url:
return self
if self._direct is None:
self._direct = self.__class__(url, pypump=self._pump)
return self._direct |
<SYSTEM_TASK:>
Major inbox feed, contains major activities such as notes and images.
<END_TASK>
<USER_TASK:>
Description:
def major(self):
""" Major inbox feed, contains major activities such as notes and images. """ |
url = self._subfeed("major")
if "major" in self.url or "minor" in self.url:
return self
if self._major is None:
self._major = self.__class__(url, pypump=self._pump)
return self._major |
<SYSTEM_TASK:>
Minor inbox feed, contains minor activities such as likes, shares and follows.
<END_TASK>
<USER_TASK:>
Description:
def minor(self):
""" Minor inbox feed, contains minor activities such as likes, shares and follows. """ |
url = self._subfeed("minor")
if "minor" in self.url or "major" in self.url:
return self
if self._minor is None:
self._minor = self.__class__(url, pypump=self._pump)
return self._minor |
<SYSTEM_TASK:>
Converts the post to something compatible with `json.dumps`
<END_TASK>
<USER_TASK:>
Description:
def serialize(self):
""" Converts the post to something compatible with `json.dumps` """ |
data = super(Note, self).serialize()
data.update({
"verb": "post",
"object": {
"objectType": self.object_type,
"content": self.content,
}
})
if self.display_name:
data["object"]["displayName"] = self.display_name
return data |
<SYSTEM_TASK:>
Sends the request
<END_TASK>
<USER_TASK:>
Description:
def request(self, server=None):
""" Sends the request """ |
request = {
"headers": {"Content-Type": "application/json"},
"timeout": self._pump.timeout,
"data": self.context,
}
url = "{proto}://{server}/{endpoint}".format(
proto=self._pump.protocol,
server=server or self.server,
endpoint=self.ENDPOINT,
)
response = self._pump._requester(requests.post, url, **request)
try:
server_data = response.json()
except ValueError:
raise ClientException(response.content)
if "error" in server_data:
raise ClientException(server_data["error"], self.context)
_log.debug("Client registration recieved: %(id)s %(secret)s %(expire)s", {
"id": server_data["client_id"],
"secret": server_data["client_secret"],
"expire": server_data["expires_at"],
})
return server_data |
<SYSTEM_TASK:>
Registers the client with the Pump API retrieving the id and secret
<END_TASK>
<USER_TASK:>
Description:
def register(self, server=None):
""" Registers the client with the Pump API retrieving the id and secret """ |
if (self.key or self.secret):
return self.update()
server_data = self.request(server)
self.key = server_data["client_id"]
self.secret = server_data["client_secret"]
self.expirey = server_data["expires_at"] |
<SYSTEM_TASK:>
Updates the information the Pump server has about the client
<END_TASK>
<USER_TASK:>
Description:
def update(self):
""" Updates the information the Pump server has about the client """ |
error = ""
if self.key is None:
error = "To update a client you need to provide a key"
if self.secret is None:
error = "To update a client you need to provide the secret"
if error:
raise ClientException(error)
self.request()
return True |
<SYSTEM_TASK:>
Compiler subroutine to test whether some functions are available
<END_TASK>
<USER_TASK:>
Description:
def compile_extensions(macros, compat=False):
"""
Compiler subroutine to test whether some functions are available
on the target system. Since the rrdtool headers shipped with most
packages do not disclose any versioning information, we cannot test
whether a given function is available that way. Instead, use this to
manually try to compile code and see if it works.
Taken from http://stackoverflow.com/questions/28843765/setup-py-check-if-non-python-library-dependency-exists.
""" |
import distutils.sysconfig
import distutils.ccompiler
import tempfile
import shutil
from textwrap import dedent
# common vars
libraries = ['rrd']
include_dirs = [package_dir, '/usr/local/include']
library_dirs = ['/usr/local/lib']
compiler_args = dict(
libraries=libraries,
include_dirs=include_dirs,
library_dirs=library_dirs,
define_macros=macros)
exts = [Extension('rrdtool', sources=['rrdtoolmodule.c'], **compiler_args)]
if compat:
return exts
# in non-compat mode, try to link to check if the new symbols are present in librrd
c_code = dedent('''
#include <rrd.h>
#include "rrdtoolmodule.h"
int main(int argc, char *argv[]) {
rrd_fetch_cb_register(NULL); /* exists in rrdtool >= 1.5.0 */
return 0;
}
''')
tmp_dir = tempfile.mkdtemp(prefix='tmp_python_rrdtool')
bin_file_name = os.path.join(tmp_dir, 'test_rrdtool')
file_name = bin_file_name + '.c'
with open(file_name, 'w') as fp:
fp.write(c_code)
# try to compile it
compiler = distutils.ccompiler.new_compiler()
assert isinstance(compiler, distutils.ccompiler.CCompiler)
for s in include_dirs:
compiler.add_include_dir(s)
for s in library_dirs:
compiler.add_library_dir(s)
for s in libraries:
compiler.add_library(s)
for s in macros:
compiler.define_macro(*s)
distutils.sysconfig.customize_compiler(compiler)
try:
compiler.link_executable(
compiler.compile([file_name]),
bin_file_name,
libraries=libraries)
except CompileError:
sys.exit('Error: Unable to compile the binary module. Do you have the rrdtool header and libraries installed?')
ret = None
except LinkError as exc:
shutil.rmtree(tmp_dir)
raise # re-raise
else:
return exts # seems to be available, compile in regular way
shutil.rmtree(tmp_dir)
return ret |
<SYSTEM_TASK:>
Adds a member to the collection.
<END_TASK>
<USER_TASK:>
Description:
def add(self, obj):
""" Adds a member to the collection.
:param obj: Object to add.
Example:
>>> mycollection.add(pump.Person('[email protected]'))
""" |
activity = {
"verb": "add",
"object": {
"objectType": obj.object_type,
"id": obj.id
},
"target": {
"objectType": self.object_type,
"id": self.id
}
}
self._post_activity(activity)
# Remove the cash so it's re-generated next time it's needed
self._members = None |
<SYSTEM_TASK:>
Removes a member from the collection.
<END_TASK>
<USER_TASK:>
Description:
def remove(self, obj):
""" Removes a member from the collection.
:param obj: Object to remove.
Example:
>>> mycollection.remove(pump.Person('[email protected]'))
""" |
activity = {
"verb": "remove",
"object": {
"objectType": obj.object_type,
"id": obj.id
},
"target": {
"objectType": self.object_type,
"id": self.id
}
}
self._post_activity(activity)
# Remove the cash so it's re-generated next time it's needed
self._members = None |
<SYSTEM_TASK:>
Posts a activity to feed
<END_TASK>
<USER_TASK:>
Description:
def _post_activity(self, activity, unserialize=True):
""" Posts a activity to feed """ |
# I think we always want to post to feed
feed_url = "{proto}://{server}/api/user/{username}/feed".format(
proto=self._pump.protocol,
server=self._pump.client.server,
username=self._pump.client.nickname
)
data = self._pump.request(feed_url, method="POST", data=activity)
if not data:
return False
if "error" in data:
raise PumpException(data["error"])
if unserialize:
if "target" in data:
# we probably want to unserialize target if it's there
# true for collection.{add,remove}
self.unserialize(data["target"])
else:
# copy activity attributes into object
if "author" not in data["object"]:
data["object"]["author"] = data["actor"]
for key in ["to", "cc", "bto", "bcc"]:
if key not in data["object"] and key in data:
data["object"][key] = data[key]
self.unserialize(data["object"])
return True |
<SYSTEM_TASK:>
Parses and adds block of links
<END_TASK>
<USER_TASK:>
Description:
def _add_links(self, links, key="href", proxy_key="proxyURL", endpoints=None):
""" Parses and adds block of links """ |
if endpoints is None:
endpoints = ["likes", "replies", "shares", "self", "followers",
"following", "lists", "favorites", "members"]
if links.get("links"):
for endpoint in links['links']:
# It would seem occasionally the links["links"][endpoint] is
# just a string (what would be the href value). I don't know
# why, it's likely a bug in pump.io but for now we'll support
# this too.
if isinstance(links['links'][endpoint], dict):
self._add_link(endpoint, links['links'][endpoint]["href"])
else:
self._add_link(endpoint, links["links"][endpoint])
for endpoint in endpoints:
if links.get(endpoint, None) is None:
continue
if "pump_io" in links[endpoint]:
self._add_link(endpoint, links[endpoint]["pump_io"][proxy_key])
elif "url" in links[endpoint]:
self._add_link(endpoint, links[endpoint]["url"])
else:
self._add_link(endpoint, links[endpoint][key])
return self.links |
<SYSTEM_TASK:>
Sets who the object is sent to
<END_TASK>
<USER_TASK:>
Description:
def _set_people(self, people):
""" Sets who the object is sent to """ |
if hasattr(people, "object_type"):
people = [people]
elif hasattr(people, "__iter__"):
people = list(people)
return people |
<SYSTEM_TASK:>
Uploads a file from a filename on your system.
<END_TASK>
<USER_TASK:>
Description:
def from_file(self, filename):
""" Uploads a file from a filename on your system.
:param filename: Path to file on your system.
Example:
>>> myimage.from_file('/path/to/dinner.png')
""" |
mimetype = mimetypes.guess_type(filename)[0] or "application/octal-stream"
headers = {
"Content-Type": mimetype,
"Content-Length": str(os.path.getsize(filename)),
}
# upload file
file_data = self._pump.request(
"/api/user/{0}/uploads".format(self._pump.client.nickname),
method="POST",
data=open(filename, "rb").read(),
headers=headers,
)
# now post it to the feed
data = {
"verb": "post",
"object": file_data,
}
data.update(self.serialize())
if not self.content and not self.display_name and not self.license:
self._post_activity(data)
else:
self._post_activity(data, unserialize=False)
# update post with display_name and content
if self.content:
file_data['content'] = self.content
if self.display_name:
file_data['displayName'] = self.display_name
if self.license:
file_data['license'] = self.license
data = {
"verb": "update",
"object": file_data,
}
self._post_activity(data)
return self |
<SYSTEM_TASK:>
Returns a fully qualified URL
<END_TASK>
<USER_TASK:>
Description:
def _build_url(self, endpoint):
""" Returns a fully qualified URL """ |
server = None
if "://" in endpoint:
# looks like an url, let's break it down
server, endpoint = self._deconstruct_url(endpoint)
endpoint = endpoint.lstrip("/")
url = "{proto}://{server}/{endpoint}".format(
proto=self.protocol,
server=self.client.server if server is None else server,
endpoint=endpoint,
)
return url |
<SYSTEM_TASK:>
Breaks down URL and returns server and endpoint
<END_TASK>
<USER_TASK:>
Description:
def _deconstruct_url(self, url):
""" Breaks down URL and returns server and endpoint """ |
url = url.split("://", 1)[-1]
server, endpoint = url.split("/", 1)
return (server, endpoint) |
<SYSTEM_TASK:>
Creates Client object with key and secret for server
<END_TASK>
<USER_TASK:>
Description:
def _add_client(self, url, key=None, secret=None):
""" Creates Client object with key and secret for server
and adds it to _server_cache if it doesnt already exist """ |
if "://" in url:
server, endpoint = self._deconstruct_url(url)
else:
server = url
if server not in self._server_cache:
if not (key and secret):
client = Client(
webfinger=self.client.webfinger,
name=self.client.name,
type=self.client.type,
)
client.set_pump(self)
client.register(server)
else:
client = Client(
webfinger=self.client.webfinger,
key=key,
secret=secret,
type=self.client.type,
name=self.client.name,
)
client.set_pump(self)
self._server_cache[server] = client |
<SYSTEM_TASK:>
Make request to endpoint with OAuth.
<END_TASK>
<USER_TASK:>
Description:
def request(self, endpoint, method="GET", data="",
raw=False, params=None, retries=None, client=None,
headers=None, timeout=None, **kwargs):
""" Make request to endpoint with OAuth.
Returns dictionary with response data.
:param endpoint: endpoint path, or a fully qualified URL if raw=True.
:param method: GET (default), POST or DELETE.
:param data: data to send in the request body.
:param raw: use endpoint as entered without trying to modify it.
:param params: dictionary of parameters to send in the query string.
:param retries: number of times to retry if a request fails.
:param client: OAuth client data, if False do request without OAuth.
:param headers: dictionary of HTTP headers.
:param timeout: the timeout for a request, in seconds.
Example:
>>> pump.request('https://e14n.com/api/user/evan/profile', raw=True)
{u'displayName': u'Evan Prodromou',
u'favorites': {u'totalItems': 7227,
u'url': u'https://e14n.com/api/user/evan/favorites'},
u'id': u'acct:[email protected]',
u'image': {u'height': 96,
u'url': u'https://e14n.com/uploads/evan/2014/9/24/knyf1g_thumb.jpg',
u'width': 96},
u'liked': False,
u'location': {u'displayName': u'Montreal, Quebec, Canada',
u'objectType': u'place'},
u'objectType': u'person',
u'preferredUsername': u'evan',
u'published': u'2013-02-20T15:34:52Z',
u'summary': u'I wanna make it with you. http://payb.tc/evanp',
u'updated': u'2014-09-24T02:38:32Z',
u'url': u'https://e14n.com/evan'}
""" |
retries = self.retries if retries is None else retries
timeout = self.timeout if timeout is None else timeout
# check client has been setup
if client is None:
client = self.setup_oauth_client(endpoint)
c = client.client
fnc = OAuth1Session(c.client_key,
client_secret=c.client_secret,
resource_owner_key=c.resource_owner_key,
resource_owner_secret=c.resource_owner_secret
)
elif client is False:
fnc = requests
params = {} if params is None else params
if data and isinstance(data, dict):
data = json.dumps(data)
if not raw:
url = self._build_url(endpoint)
else:
url = endpoint
headers = headers or {"Content-Type": "application/json"}
request = {
"headers": headers,
"params": params,
"timeout": timeout,
}
request.update(kwargs)
if method == "POST":
fnc = fnc.post
request.update({"data": data})
elif method == "PUT":
fnc = fnc.put
request.update({"data": data})
elif method == "GET":
fnc = fnc.get
elif method == "DELETE":
fnc = fnc.delete
for attempt in range(1 + retries):
response = self._requester(
fnc=fnc,
endpoint=endpoint,
raw=raw,
**request
)
if response.status_code == 200:
# huray!
return response.json()
if response.status_code == 400:
# can't do much
try:
try:
data = response.json()
error = data["error"]
except ValueError:
error = response.content
if not error:
raise IndexError # yesss i know.
except IndexError:
error = "400 - Bad request."
raise PyPumpException(error)
if response.ok:
return response
error = "Request Failed to {url} (response: {data} | status: {status})"
error = error.format(
url=url,
data=response.content,
status=response.status_code
)
raise PyPumpException(error) |
<SYSTEM_TASK:>
Sets up client for requests to pump
<END_TASK>
<USER_TASK:>
Description:
def setup_oauth_client(self, url=None):
""" Sets up client for requests to pump """ |
if url and "://" in url:
server, endpoint = self._deconstruct_url(url)
else:
server = self.client.server
if server not in self._server_cache:
self._add_client(server)
if server == self.client.server:
self.oauth = OAuth1(
client_key=self.store["client-key"],
client_secret=self.store["client-secret"],
resource_owner_key=self.store["oauth-access-token"],
resource_owner_secret=self.store["oauth-access-secret"],
)
return self.oauth
else:
return OAuth1(
client_key=self._server_cache[server].key,
client_secret=self._server_cache[server].secret,
) |
<SYSTEM_TASK:>
Get OAuth access token so we can make requests
<END_TASK>
<USER_TASK:>
Description:
def request_access(self, verifier):
""" Get OAuth access token so we can make requests """ |
client = OAuth1(
client_key=self._server_cache[self.client.server].key,
client_secret=self._server_cache[self.client.server].secret,
resource_owner_key=self.store["oauth-request-token"],
resource_owner_secret=self.store["oauth-request-secret"],
verifier=verifier,
)
request = {"auth": client}
response = self._requester(
requests.post,
"oauth/access_token",
**request
)
data = parse.parse_qs(response.text)
self.store["oauth-access-token"] = data[self.PARAM_TOKEN][0]
self.store["oauth-access-secret"] = data[self.PARAM_TOKEN_SECRET][0]
self._server_tokens = {} |
<SYSTEM_TASK:>
Return boolean if is logged in
<END_TASK>
<USER_TASK:>
Description:
def logged_in(self):
""" Return boolean if is logged in """ |
if "oauth-access-token" not in self.store:
return False
response = self.request("/api/whoami", allow_redirects=False)
# It should response with a redirect to our profile if it's logged in
if response.status_code != 302:
return False
# the location should be the profile we have
if response.headers["location"] != self.me.links["self"]:
return False
return True |
<SYSTEM_TASK:>
Create a Tensor descriptor object.
<END_TASK>
<USER_TASK:>
Description:
def cudnnCreateTensorDescriptor():
"""
Create a Tensor descriptor object.
Allocates a cudnnTensorDescriptor_t structure and returns a pointer to it.
Returns
-------
tensor_descriptor : int
Tensor descriptor.
""" |
tensor = ctypes.c_void_p()
status = _libcudnn.cudnnCreateTensorDescriptor(ctypes.byref(tensor))
cudnnCheckStatus(status)
return tensor.value |
<SYSTEM_TASK:>
Initialize a previously created Tensor 4D object.
<END_TASK>
<USER_TASK:>
Description:
def cudnnSetTensor4dDescriptor(tensorDesc, format, dataType, n, c, h, w):
"""
Initialize a previously created Tensor 4D object.
This function initializes a previously created Tensor4D descriptor object. The strides of
the four dimensions are inferred from the format parameter and set in such a way that
the data is contiguous in memory with no padding between dimensions.
Parameters
----------
tensorDesc : cudnnTensorDescriptor
Handle to a previously created tensor descriptor.
format : cudnnTensorFormat
Type of format.
dataType : cudnnDataType
Data type.
n : int
Number of images.
c : int
Number of feature maps per image.
h : int
Height of each feature map.
w : int
Width of each feature map.
""" |
status = _libcudnn.cudnnSetTensor4dDescriptor(tensorDesc, format, dataType,
n, c, h, w)
cudnnCheckStatus(status) |
<SYSTEM_TASK:>
Initialize a Tensor descriptor object with strides.
<END_TASK>
<USER_TASK:>
Description:
def cudnnSetTensor4dDescriptorEx(tensorDesc, dataType, n, c, h, w, nStride, cStride, hStride, wStride):
""""
Initialize a Tensor descriptor object with strides.
This function initializes a previously created generic Tensor descriptor object into a
4D tensor, similarly to cudnnSetTensor4dDescriptor but with the strides explicitly
passed as parameters. This can be used to lay out the 4D tensor in any order or simply to
define gaps between dimensions.
Parameters
----------
tensorDesc : cudnnTensorDescriptor_t
Handle to a previously created tensor descriptor.
dataType : cudnnDataType
Data type.
n : int
Number of images.
c : int
Number of feature maps per image.
h : int
Height of each feature map.
w : int
Width of each feature map.
nStride : int
Stride between two consective images.
cStride : int
Stride between two consecutive feature maps.
hStride : int
Stride between two consecutive rows.
wStride : int
Stride between two consecutive columns.
""" |
status = _libcudnn.cudnnSetTensor4dDescriptorEx(tensorDesc, dataType, n, c, h, w,
nStride, cStride, hStride, wStride)
cudnnCheckStatus(status) |
<SYSTEM_TASK:>
Get parameters of a Tensor descriptor object.
<END_TASK>
<USER_TASK:>
Description:
def cudnnGetTensor4dDescriptor(tensorDesc):
""""
Get parameters of a Tensor descriptor object.
This function queries the parameters of the previouly initialized Tensor4D descriptor
object.
Parameters
----------
tensorDesc : cudnnTensorDescriptor
Handle to a previously initialized tensor descriptor.
Returns
-------
dataType : cudnnDataType
Data type.
n : int
Number of images.
c : int
Number of feature maps per image.
h : int
Height of each feature map.
w : int
Width of each feature map.
nStride : int
Stride between two consective images.
cStride : int
Stride between two consecutive feature maps.
hStride : int
Stride between two consecutive rows.
wStride : int
Stride between two consecutive columns.
""" |
dataType = ctypes.c_int()
n = ctypes.c_int()
c = ctypes.c_int()
h = ctypes.c_int()
w = ctypes.c_int()
nStride = ctypes.c_int()
cStride = ctypes.c_int()
hStride = ctypes.c_int()
wStride = ctypes.c_int()
status = _libcudnn.cudnnGetTensor4dDescriptor(tensorDesc, ctypes.byref(dataType), ctypes.byref(n),
ctypes.byref(c), ctypes.byref(h), ctypes.byref(w),
ctypes.byref(nStride), ctypes.byref(cStride),
ctypes.byref(hStride), ctypes.byref(wStride))
cudnnCheckStatus(status)
return dataType.value, n.value, c.value, h.value, w.value, nStride.value, cStride.value, \
hStride.value, wStride.value |
<SYSTEM_TASK:>
Create a filter descriptor.
<END_TASK>
<USER_TASK:>
Description:
def cudnnCreateFilterDescriptor():
""""
Create a filter descriptor.
This function creates a filter descriptor object by allocating the memory needed
to hold its opaque structure.
Parameters
----------
Returns
-------
wDesc : cudnnFilterDescriptor
Handle to a newly allocated filter descriptor.
""" |
wDesc = ctypes.c_void_p()
status = _libcudnn.cudnnCreateFilterDescriptor(ctypes.byref(wDesc))
cudnnCheckStatus(status)
return wDesc.value |
<SYSTEM_TASK:>
Initialize a filter descriptor.
<END_TASK>
<USER_TASK:>
Description:
def cudnnSetFilter4dDescriptor(wDesc, dataType, format, k, c, h, w):
""""
Initialize a filter descriptor.
This function initializes a previously created filter descriptor object into a 4D filter.
Filters layout must be contiguous in memory.
Parameters
----------
wDesc : cudnnFilterDescriptor
Handle to a previously created filter descriptor.
dataType : cudnnDataType
Data type.
format: cudnnTensorFormat
Tensor format
k : int
Number of output feature maps.
c : int
Number of input feature maps.
h : int
Height of each filter.
w : int
Width of each filter.
""" |
status = _libcudnn.cudnnSetFilter4dDescriptor(wDesc, dataType, format, k, c, h, w)
cudnnCheckStatus(status) |
<SYSTEM_TASK:>
Get parameters of filter descriptor.
<END_TASK>
<USER_TASK:>
Description:
def cudnnGetFilter4dDescriptor(wDesc):
""""
Get parameters of filter descriptor.
This function queries the parameters of the previouly initialized filter descriptor object.
Parameters
----------
wDesc : cudnnFilterDescriptor
Handle to a previously created filter descriptor.
Returns
-------
dataType : cudnnDataType
Data type.
format: cudnnTensorFormat
Tensor format
k : int
Number of output feature maps.
c : int
Number of input feature maps.
h : int
Height of each filter.
w : int
Width of each filter.
""" |
dataType = ctypes.c_int()
format = ctypes.c_int()
k = ctypes.c_int()
c = ctypes.c_int()
h = ctypes.c_int()
w = ctypes.c_int()
status = _libcudnn.cudnnGetFilter4dDescriptor(wDesc, ctypes.byref(dataType),
ctypes.byref(format),
ctypes.byref(k), ctypes.byref(c),
ctypes.byref(h), ctypes.byref(w))
cudnnCheckStatus(status)
return dataType.value, format.value, k.value, c.value, h.value, w.value |
<SYSTEM_TASK:>
Create a convolution descriptor.
<END_TASK>
<USER_TASK:>
Description:
def cudnnCreateConvolutionDescriptor():
""""
Create a convolution descriptor.
This function creates a convolution descriptor object by allocating the memory needed to
hold its opaque structure.
Returns
-------
convDesc : cudnnConvolutionDescriptor
Handle to newly allocated convolution descriptor.
""" |
convDesc = ctypes.c_void_p()
status = _libcudnn.cudnnCreateConvolutionDescriptor(ctypes.byref(convDesc))
cudnnCheckStatus(status)
return convDesc.value |
<SYSTEM_TASK:>
Initialize a convolution descriptor.
<END_TASK>
<USER_TASK:>
Description:
def cudnnSetConvolution2dDescriptor(convDesc, pad_h, pad_w, u, v, dilation_h, dilation_w, mode,
computeType):
""""
Initialize a convolution descriptor.
This function initializes a previously created convolution descriptor object into a 2D
correlation. This function assumes that the tensor and filter descriptors corresponds
to the formard convolution path and checks if their settings are valid. That same
convolution descriptor can be reused in the backward path provided it corresponds to
the same layer.
Parameters
----------
convDesc : cudnnConvolutionDescriptor
Handle to a previously created convolution descriptor.
pad_h : int
zero-padding height: number of rows of zeros implicitly concatenated
onto the top and onto the bottom of input images.
pad_w : int
zero-padding width: number of columns of zeros implicitly concatenated
onto the left and onto the right of input images.
u : int
Vertical filter stride.
v : int
Horizontal filter stride.
dilation_h : int
Filter height dilation.
dilation_w : int
Filter width dilation.
mode : cudnnConvolutionMode
Select between CUDNN_CONVOLUTION or CUDNN_CROSS_CORRELATION.
computeType : cudnnDataType
Compute precision
""" |
status = _libcudnn.cudnnSetConvolution2dDescriptor(convDesc, pad_h, pad_w, u, v,
dilation_h, dilation_w, mode,
computeType)
cudnnCheckStatus(status) |
<SYSTEM_TASK:>
Get a convolution descriptor.
<END_TASK>
<USER_TASK:>
Description:
def cudnnGetConvolution2dDescriptor(convDesc):
""""
Get a convolution descriptor.
This function queries a previously initialized 2D convolution descriptor object.
Parameters
----------
convDesc : cudnnConvolutionDescriptor
Handle to a previously created convolution descriptor.
Returns
-------
pad_h : int
zero-padding height: number of rows of zeros implicitly concatenated onto
the top and onto the bottom of input images.
pad_w : int
zero-padding width: number of columns of zeros implicitly concatenated
onto the left and onto the right of input images.
u : int
Vertical filter stride.
v : int
Horizontal filter stride.
dilation_h : int
Filter height dilation.
dilation_w : int
Filter width dilation.
mode : cudnnConvolutionMode
Either CUDNN_CONVOLUTION or CUDNN_CROSS_CORRELATION.
computeType : cudnnDataType
Compute precision
""" |
pad_h = ctypes.c_int()
pad_w = ctypes.c_int()
u = ctypes.c_int()
v = ctypes.c_int()
dilation_h = ctypes.c_int()
dilation_w = ctypes.c_int()
mode = ctypes.c_int()
computeType = ctypes.c_int()
status = _libcudnn.cudnnGetConvolution2dDescriptor(convDesc, ctypes.byref(pad_h),
ctypes.byref(pad_w), ctypes.byref(u),
ctypes.byref(v), ctypes.byref(dilation_h),
ctypes.byref(dilation_w),
ctypes.byref(mode), ctypes.byref(computeType))
cudnnCheckStatus(status)
return (pad_h.value, pad_w.value, u.value, v.value, upscalex.value, upscaley.value, mode.value,
computeType.value) |
<SYSTEM_TASK:>
Return the dimensions of the output tensor given a convolution descriptor.
<END_TASK>
<USER_TASK:>
Description:
def cudnnGetConvolution2dForwardOutputDim(convDesc, inputTensorDesc, wDesc):
""""
Return the dimensions of the output tensor given a convolution descriptor.
This function returns the dimensions of the resulting 4D tensor of a 2D
convolution, given the convolution descriptor, the input tensor descriptor and
the filter descriptor. This function can help to setup the output tensor and allocate
the proper amount of memory prior to launching the actual convolution.
Parameters
----------
convDesc : cudnnConvolutionDescriptor
Handle to a previously created convolution descriptor.
inputTensorDesc: cudnnTensorDescriptor
Handle to a previously initialized tensor descriptor.
wDesc: cudnnFilterDescriptor
Handle to a previously initialized filter descriptor.
Returns
-------
n : int
Number of output images.
c : int
Number of output feature maps per image.
h : int
Height of each output feature map.
w : int
Width of each output feature map.
""" |
n = ctypes.c_int()
c = ctypes.c_int()
h = ctypes.c_int()
w = ctypes.c_int()
status = _libcudnn.cudnnGetConvolution2dForwardOutputDim(convDesc, inputTensorDesc,
wDesc, ctypes.byref(n),
ctypes.byref(c), ctypes.byref(h),
ctypes.byref(w))
cudnnCheckStatus(status)
return n.value, c.value, h.value, w.value |
<SYSTEM_TASK:>
This function returns the best algorithm to choose for the forward convolution
<END_TASK>
<USER_TASK:>
Description:
def cudnnGetConvolutionForwardAlgorithm(handle, srcDesc, wDesc,
convDesc, destDesc, preference, memoryLimitInbytes):
""""
This function returns the best algorithm to choose for the forward convolution
depending on the critera expressed in the cudnnConvolutionFwdPreference_t enumerant.
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
srcDesc : cudnnTensorDescriptor
Handle to a previously initialized tensor descriptor.
wDesc : cudnnFilterDescriptor
Handle to a previously initialized filter descriptor.
convDesc : cudnnConvolutionDescriptor
Previously initialized convolution descriptor.
destDesc : cudnnTensorDescriptor
Handle to a previously initialized tensor descriptor.
preference : cudnnConvolutionFwdPreference
Enumerant to express the preference criteria in terms of memory
requirement and speed.
memoryLimitInbytes: size_t
The maximum amount of GPU memory the user is willing to use as a workspace
when preference is CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT.
Returns
-------
algo: cudnnConvolutionFwdAlgo
Enumerant that specifies which convolution algorithm should be used to
compute the results according to the specified preference.
""" |
algo = ctypes.c_int()
status = _libcudnn.cudnnGetConvolutionForwardAlgorithm(handle, srcDesc, wDesc,
convDesc, destDesc, preference,
ctypes.c_size_t(memoryLimitInbytes),
ctypes.byref(algo))
cudnnCheckStatus(status)
return algo |
<SYSTEM_TASK:>
This function returns the amount of GPU memory workspace the user needs
<END_TASK>
<USER_TASK:>
Description:
def cudnnGetConvolutionForwardWorkspaceSize(handle, srcDesc, wDesc,
convDesc, destDesc, algo):
""""
This function returns the amount of GPU memory workspace the user needs
to allocate to be able to call cudnnConvolutionForward with the specified algorithm.
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
srcDesc : cudnnTensorDescriptor
Handle to a previously initialized tensor descriptor.
wDesc : cudnnFilterDescriptor
Handle to a previously initialized filter descriptor.
convDesc : cudnnConvolutionDescriptor
Previously initialized convolution descriptor.
destDesc : cudnnTensorDescriptor
Handle to a previously initialized tensor descriptor.
algo : cudnnConvolutionFwdAlgo
Enumerant that specifies the chosen convolution algorithm.
Returns
-------
sizeInBytes: c_size_t
Amount of GPU memory needed as workspace to be able to execute a
forward convolution with the sepcified algo.
""" |
sizeInBytes = ctypes.c_size_t()
status = _libcudnn.cudnnGetConvolutionForwardWorkspaceSize(handle, srcDesc, wDesc,
convDesc, destDesc, algo,
ctypes.byref(sizeInBytes))
cudnnCheckStatus(status)
return sizeInBytes |
<SYSTEM_TASK:>
This routing computes the softmax function
<END_TASK>
<USER_TASK:>
Description:
def cudnnSoftmaxForward(handle, algorithm, mode, alpha, srcDesc, srcData, beta, destDesc, destData):
""""
This routing computes the softmax function
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
algorithm : cudnnSoftmaxAlgorithm
Enumerant to specify the softmax algorithm.
mode : cudnnSoftmaxMode
Enumerant to specify the softmax mode.
alpha: float
Scaling factor with which every element of the input tensors is multiplied.
srcDesc : cudnnTensorDescriptor
Handle to the previously initialized input tensor descriptor.
srcData : void_p
Data pointer to GPU memory associated with the tensor descriptor
srcDesc.
beta: float
Scaling factor which is applied on every element of the output tensor prior
to adding the result of the activation Note that if beta is zero, the output
is not read and can contain any uninitialized data (including Nan numbers).
destDesc : cudnnTensorDescriptor
Handle to the previously initialized output tensor descriptor.
destData : void_p
Data pointer to GPU memory associated with the output tensor descriptor
destDesc.
""" |
dataType = cudnnGetTensor4dDescriptor(destDesc)[0]
if dataType == cudnnDataType['CUDNN_DATA_DOUBLE']:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
betaRef = ctypes.byref(ctypes.c_double(beta))
else:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
betaRef = ctypes.byref(ctypes.c_float(beta))
status = _libcudnn.cudnnSoftmaxForward(handle, algorithm, mode, alphaRef,
srcDesc, srcData, betaRef,
destDesc, destData)
cudnnCheckStatus(status) |
<SYSTEM_TASK:>
Create pooling descriptor.
<END_TASK>
<USER_TASK:>
Description:
def cudnnCreatePoolingDescriptor():
""""
Create pooling descriptor.
This function creates a pooling descriptor object by allocating the memory needed to
hold its opaque structure,
Returns
-------
poolingDesc : cudnnPoolingDescriptor
Newly allocated pooling descriptor.
""" |
poolingDesc = ctypes.c_void_p()
status = _libcudnn.cudnnCreatePoolingDescriptor(ctypes.byref(poolingDesc))
cudnnCheckStatus(status)
return poolingDesc.value |
<SYSTEM_TASK:>
Initialize a 2D pooling descriptor.
<END_TASK>
<USER_TASK:>
Description:
def cudnnSetPooling2dDescriptor(poolingDesc, mode, windowHeight, windowWidth,
verticalPadding, horizontalPadding, verticalStride, horizontalStride):
""""
Initialize a 2D pooling descriptor.
This function initializes a previously created pooling descriptor object.
Parameters
----------
poolingDesc : cudnnPoolingDescriptor
Handle to a previously created pooling descriptor.
mode : cudnnPoolingMode
Enumerant to specify the pooling mode.
windowHeight : int
Height of the pooling window.
windowWidth : int
Width of the pooling window.
verticalPadding: int
Size of vertical padding.
horizontalPadding: int
Size of horizontal padding.
verticalStride : int
Pooling vertical stride.
horizontalStride : int
Pooling horizontal stride.
""" |
status = _libcudnn.cudnnSetPooling2dDescriptor(poolingDesc, mode, windowHeight,
windowWidth, verticalPadding, horizontalPadding,
verticalStride, horizontalStride)
cudnnCheckStatus(status) |
<SYSTEM_TASK:>
This function queries a previously created pooling descriptor object.
<END_TASK>
<USER_TASK:>
Description:
def cudnnGetPooling2dDescriptor(poolingDesc):
""""
This function queries a previously created pooling descriptor object.
Parameters
----------
poolingDesc : cudnnPoolingDescriptor
Handle to a previously created 2D pooling descriptor.
Returns
-------
mode : cudnnPoolingMode
Enumerant to specify the pooling mode.
windowHeight : int
Height of the pooling window.
windowWidth : int
Width of the pooling window.
verticalPadding: int
Size of vertical padding.
horizontalPadding: int
Size of horizontal padding.
verticalStride : int
Pooling vertical stride.
horizontalStride : int
Pooling horizontal stride.
""" |
mode = ctypes.c_int()
windowHeight = ctypes.c_int()
windowWidth = ctypes.c_int()
verticalPadding = ctypes.c_int()
horizontalPadding = ctypes.c_int()
verticalStride = ctypes.c_int()
horizontalStride = ctypes.c_int()
status = _libcudnn.cudnnGetPooling2dDescriptor(poolingDesc, ctypes.byref(mode), ctypes.byref(windowHeight),
ctypes.byref(windowWidth), ctypes.byref(verticalPadding),
ctypes.byref(horizontalPadding), ctypes.byref(verticalStride),
ctypes.byref(horizontalStride))
cudnnCheckStatus(status)
return mode.value, windowHeight.value, windowWidth.value, verticalStride.value, horizontalStride.value |
<SYSTEM_TASK:>
Gradient of activation function.
<END_TASK>
<USER_TASK:>
Description:
def cudnnActivationBackward(handle, mode, alpha, srcDesc, srcData, srcDiffDesc, srcDiffData,
destDesc, destData, beta, destDiffDesc, destDiffData):
""""
Gradient of activation function.
This routine computes the gradient of a neuron activation function.
In-place operation is allowed for this routine; i.e., srcData and destData
pointers may be equal and srcDiffData and destDiffData pointers may be equal.
However, this requires the corresponding tensor descriptors to be identical
(particularly, the strides of the input and output must match for in-place operation
to be allowed).
Parameters
----------
handle : cudnnHandle
Handle to a previously created cuDNN context.
mode : cudnnActivationMode
Enumerant to specify the activation mode.
alpha: float
Scaling factor with which every element of the input tensor is multiplied.
srcDesc : cudnnTensorDescriptor
Handle to the previously initialized input tensor descriptor.
srcData : void_p
Data pointer to GPU memory associated with the tensor descriptor
srcDesc.
srcDiffDesc : cudnnTensorDescriptor
Handle to the previously initialized input differential tensor descriptor.
srcDiffData : void_p
Data pointer to GPU memory associated with the tensor descriptor
srcDiffData.
destDesc : cudnnTensorDescriptor
Handle to the previously initialized output tensor descriptor.
destData : void_p
Data pointer to GPU memory associated with the output tensor descriptor
destDesc.
beta: float
Scaling factor which is applied on every element of the output tensor prior
to adding the result of the activation gradient. Note that if beta is zero, the
output is not read and can contain any uninitialized data (including Nan numbers).
destDiffDesc : cudnnTensorDescriptor
Handle to the previously initialized output differential tensor descriptor.
destDiffData : void_p
Data pointer to GPU memory associated with the output tensor descriptor
destDiffDesc.
""" |
dataType = cudnnGetTensor4dDescriptor(destDesc)[0]
if dataType == cudnnDataType['CUDNN_DATA_DOUBLE']:
alphaRef = ctypes.byref(ctypes.c_double(alpha))
betaRef = ctypes.byref(ctypes.c_double(beta))
else:
alphaRef = ctypes.byref(ctypes.c_float(alpha))
betaRef = ctypes.byref(ctypes.c_float(beta))
status = _libcudnn.cudnnActivationBackward(handle, mode, alphaRef, srcDesc, srcData,
srcDiffDesc, srcDiffData,
destDesc, destData, betaRef,
destDiffDesc, destDiffData)
cudnnCheckStatus(status) |
<SYSTEM_TASK:>
This will add the prefix to the key if one exists on the store
<END_TASK>
<USER_TASK:>
Description:
def __prefix_key(self, key):
""" This will add the prefix to the key if one exists on the store """ |
# If there isn't a prefix don't bother
if self.prefix is None:
return key
# Don't prefix key if it already has it
if key.startswith(self.prefix + "-"):
return key
return "{0}-{1}".format(self.prefix, key) |
<SYSTEM_TASK:>
Prints the specified message if it's not None and waits for a keypress.
<END_TASK>
<USER_TASK:>
Description:
def pause(message='Press any key to continue . . . '):
"""
Prints the specified message if it's not None and waits for a keypress.
""" |
if message is not None:
print(message, end='')
sys.stdout.flush()
getch()
print() |
<SYSTEM_TASK:>
Returns all the covalent bonds in a list of `Atom` pairs.
<END_TASK>
<USER_TASK:>
Description:
def covalent_bonds(atoms, threshold=1.1):
"""Returns all the covalent bonds in a list of `Atom` pairs.
Notes
-----
Uses information `element_data`, which can be accessed directly
through this module i.e. `isambard.ampal.interactions.element_data`.
Parameters
----------
atoms : [(`Atom`, `Atom`)]
List of pairs of `Atoms`.
threshold : float, optional
Allows deviation from ideal covalent bond distance to be included.
For example, a value of 1.1 would allow interactions up to 10% further
from the ideal distance to be included.
""" |
bonds=[]
for a, b in atoms:
bond_distance=(
element_data[a.element.title()]['atomic radius'] + element_data[
b.element.title()]['atomic radius']) / 100
dist=distance(a._vector, b._vector)
if dist <= bond_distance * threshold:
bonds.append(CovalentBond(a, b, dist))
return bonds |
<SYSTEM_TASK:>
Finds all covalent bonds in the AMPAL object.
<END_TASK>
<USER_TASK:>
Description:
def find_covalent_bonds(ampal, max_range=2.2, threshold=1.1, tag=True):
"""Finds all covalent bonds in the AMPAL object.
Parameters
----------
ampal : AMPAL Object
Any AMPAL object with a `get_atoms` method.
max_range : float, optional
Used to define the sector size, so interactions at longer ranges
will not be found.
threshold : float, optional
Allows deviation from ideal covalent bond distance to be included.
For example, a value of 1.1 would allow interactions up to 10% further
from the ideal distance to be included.
tag : bool, optional
If `True`, will add the covalent bond to the tags dictionary of
each `Atom` involved in the interaction under the `covalent_bonds`
key.
""" |
sectors=gen_sectors(ampal.get_atoms(), max_range * 1.1)
bonds=[]
for sector in sectors.values():
atoms=itertools.combinations(sector, 2)
bonds.extend(covalent_bonds(atoms, threshold=threshold))
bond_set=list(set(bonds))
if tag:
for bond in bond_set:
a, b=bond.a, bond.b
if 'covalent_bonds' not in a.tags:
a.tags['covalent_bonds']=[b]
else:
a.tags['covalent_bonds'].append(b)
if 'covalent_bonds' not in b.tags:
b.tags['covalent_bonds']=[a]
else:
b.tags['covalent_bonds'].append(a)
return bond_set |
<SYSTEM_TASK:>
Generates a graph of the covalent bond network described by the interactions.
<END_TASK>
<USER_TASK:>
Description:
def generate_covalent_bond_graph(covalent_bonds):
"""Generates a graph of the covalent bond network described by the interactions.
Parameters
----------
covalent_bonds: [CovalentBond]
List of `CovalentBond`.
Returns
-------
bond_graph: networkx.Graph
A graph of the covalent bond network.
""" |
bond_graph=networkx.Graph()
for inter in covalent_bonds:
bond_graph.add_edge(inter.a, inter.b)
return bond_graph |
<SYSTEM_TASK:>
Splits the bond graph between two atoms to producing subgraphs.
<END_TASK>
<USER_TASK:>
Description:
def generate_bond_subgraphs_from_break(bond_graph, atom1, atom2):
"""Splits the bond graph between two atoms to producing subgraphs.
Notes
-----
This will not work if there are cycles in the bond graph.
Parameters
----------
bond_graph: networkx.Graph
Graph of covalent bond network
atom1: isambard.ampal.Atom
First atom in the bond.
atom2: isambard.ampal.Atom
Second atom in the bond.
Returns
-------
subgraphs: [networkx.Graph]
A list of subgraphs generated when a bond is broken in the covalent
bond network.
""" |
bond_graph.remove_edge(atom1, atom2)
try:
subgraphs=list(networkx.connected_component_subgraphs(
bond_graph, copy=False))
finally:
# Add edge
bond_graph.add_edge(atom1, atom2)
return subgraphs |
<SYSTEM_TASK:>
Shortens string is above certain length.
<END_TASK>
<USER_TASK:>
Description:
def cap(v, l):
"""Shortens string is above certain length.""" |
s = str(v)
return s if len(s) <= l else s[-l:] |
<SYSTEM_TASK:>
Returns atoms within the distance from the point.
<END_TASK>
<USER_TASK:>
Description:
def find_atoms_within_distance(atoms, cutoff_distance, point):
"""Returns atoms within the distance from the point.
Parameters
----------
atoms : [ampal.atom]
A list of `ampal.atoms`.
cutoff_distance : float
Maximum distance from point.
point : (float, float, float)
Reference point, 3D coordinate.
Returns
-------
filtered_atoms : [ampal.atoms]
`atoms` list filtered by distance.
""" |
return [x for x in atoms if distance(x, point) <= cutoff_distance] |
<SYSTEM_TASK:>
Returns centre point of any list of atoms.
<END_TASK>
<USER_TASK:>
Description:
def centre_of_atoms(atoms, mass_weighted=True):
""" Returns centre point of any list of atoms.
Parameters
----------
atoms : list
List of AMPAL atom objects.
mass_weighted : bool, optional
If True returns centre of mass, otherwise just geometric centre of points.
Returns
-------
centre_of_mass : numpy.array
3D coordinate for the centre of mass.
""" |
points = [x._vector for x in atoms]
if mass_weighted:
masses = [x.mass for x in atoms]
else:
masses = []
return centre_of_mass(points=points, masses=masses) |
<SYSTEM_TASK:>
Assigns force field parameters to Atoms in the AMPAL object.
<END_TASK>
<USER_TASK:>
Description:
def assign_force_field(self, ff, mol2=False):
"""Assigns force field parameters to Atoms in the AMPAL object.
Parameters
----------
ff: BuffForceField
The force field to be used for scoring.
mol2: bool, optional
If true, mol2 style labels will also be used.
""" |
if hasattr(self, 'ligands'):
atoms = self.get_atoms(ligands=True, inc_alt_states=True)
else:
atoms = self.get_atoms(inc_alt_states=True)
for atom in atoms:
w_str = None
a_ff_id = None
if atom.element == 'H':
continue
elif atom.ampal_parent.mol_code in ff:
if atom.res_label in ff[atom.ampal_parent.mol_code]:
a_ff_id = (atom.ampal_parent.mol_code, atom.res_label)
elif atom.res_label in ff['WLD']:
a_ff_id = ('WLD', atom.res_label)
else:
w_str = ('{} atom is not parameterised in the selected '
'force field for {} residues, this will be '
'ignored.').format(
atom.res_label, atom.ampal_parent.mol_code)
elif atom.res_label in ff['WLD']:
a_ff_id = ('WLD', atom.res_label)
elif mol2 and (atom.ampal_parent.mol_code.capitalize() in ff['MOL2']):
a_ff_id = ('MOL2', atom.res_label.capitalize())
else:
if not mol2:
w_str = ('{} ({}) atom is not parameterised in the selected'
' residue force field. Try activating the heavy '
' atom force field (haff).').format(
atom.element, atom.res_label)
else:
w_str = ('{} ({}) atom is not parameterised in the selected'
' force field.').format(atom.element, atom.res_label)
if w_str:
warnings.warn(w_str, NotParameterisedWarning)
atom._ff_id = a_ff_id
self.tags['assigned_ff'] = True
return |
<SYSTEM_TASK:>
Manages assigning the force field parameters.
<END_TASK>
<USER_TASK:>
Description:
def update_ff(self, ff, mol2=False, force_ff_assign=False):
"""Manages assigning the force field parameters.
The aim of this method is to avoid unnecessary assignment of the
force field.
Parameters
----------
ff: BuffForceField
The force field to be used for scoring.
mol2: bool, optional
If true, mol2 style labels will also be used.
force_ff_assign: bool, optional
If true, the force field will be completely reassigned, ignoring the
cached parameters.
""" |
aff = False
if force_ff_assign:
aff = True
elif 'assigned_ff' not in self.tags:
aff = True
elif not self.tags['assigned_ff']:
aff = True
if aff:
self.assign_force_field(ff, mol2=mol2)
return |
<SYSTEM_TASK:>
Calculates the internal energy of the AMPAL object.
<END_TASK>
<USER_TASK:>
Description:
def get_internal_energy(self, assign_ff=True, ff=None, mol2=False,
force_ff_assign=False):
"""Calculates the internal energy of the AMPAL object.
This method is assigned to the buff_internal_energy property,
using the default arguments.
Parameters
----------
assign_ff: bool, optional
If true the force field will be updated if required.
ff: BuffForceField, optional
The force field to be used for scoring.
mol2: bool, optional
If true, mol2 style labels will also be used.
force_ff_assign: bool, optional
If true, the force field will be completely reassigned, ignoring the
cached parameters.
Returns
-------
BUFF_score: BUFFScore
A BUFFScore object with information about each of the interactions and
the atoms involved.
""" |
if not ff:
ff = global_settings['buff']['force_field']
if assign_ff:
self.update_ff(ff, mol2=mol2, force_ff_assign=force_ff_assign)
interactions = find_intra_ampal(self, ff.distance_cutoff)
buff_score = score_interactions(interactions, ff)
return buff_score |
<SYSTEM_TASK:>
Rotates every atom in the AMPAL object.
<END_TASK>
<USER_TASK:>
Description:
def rotate(self, angle, axis, point=None, radians=False, inc_alt_states=True):
"""Rotates every atom in the AMPAL object.
Parameters
----------
angle : float
Angle that AMPAL object will be rotated.
axis : 3D Vector (tuple, list, numpy.array)
Axis about which the AMPAL object will be rotated.
point : 3D Vector (tuple, list, numpy.array), optional
Point that the axis lies upon. If `None` then the origin is used.
radians : bool, optional
True is `angle` is define in radians, False is degrees.
inc_alt_states : bool, optional
If true, will rotate atoms in all states i.e. includes
alternate conformations for sidechains.
""" |
q = Quaternion.angle_and_axis(angle=angle, axis=axis, radians=radians)
for atom in self.get_atoms(inc_alt_states=inc_alt_states):
atom._vector = q.rotate_vector(v=atom._vector, point=point)
return |
<SYSTEM_TASK:>
Translates every atom in the AMPAL object.
<END_TASK>
<USER_TASK:>
Description:
def translate(self, vector, inc_alt_states=True):
"""Translates every atom in the AMPAL object.
Parameters
----------
vector : 3D Vector (tuple, list, numpy.array)
Vector used for translation.
inc_alt_states : bool, optional
If true, will rotate atoms in all states i.e. includes
alternate conformations for sidechains.
""" |
vector = numpy.array(vector)
for atom in self.get_atoms(inc_alt_states=inc_alt_states):
atom._vector += vector
return |
<SYSTEM_TASK:>
Calculates the RMSD between two AMPAL objects.
<END_TASK>
<USER_TASK:>
Description:
def rmsd(self, other, backbone=False):
"""Calculates the RMSD between two AMPAL objects.
Notes
-----
No fitting operation is performs and both AMPAL objects must
have the same number of atoms.
Parameters
----------
other : AMPAL Object
Any AMPAL object with `get_atoms` method.
backbone : bool, optional
Calculates RMSD of backbone only.
""" |
assert type(self) == type(other)
if backbone and hasattr(self, 'backbone'):
points1 = self.backbone.get_atoms()
points2 = other.backbone.get_atoms()
else:
points1 = self.get_atoms()
points2 = other.get_atoms()
points1 = [x._vector for x in points1]
points2 = [x._vector for x in points2]
return rmsd(points1=points1, points2=points2) |
<SYSTEM_TASK:>
Appends a `Monomer to the `Polymer`.
<END_TASK>
<USER_TASK:>
Description:
def append(self, item):
"""Appends a `Monomer to the `Polymer`.
Notes
-----
Does not update labelling.
""" |
if isinstance(item, Monomer):
self._monomers.append(item)
else:
raise TypeError(
'Only Monomer objects can be appended to an Polymer.')
return |
<SYSTEM_TASK:>
Extends the `Polymer` with the contents of another `Polymer`.
<END_TASK>
<USER_TASK:>
Description:
def extend(self, polymer):
"""Extends the `Polymer` with the contents of another `Polymer`.
Notes
-----
Does not update labelling.
""" |
if isinstance(polymer, Polymer):
self._monomers.extend(polymer)
else:
raise TypeError(
'Only Polymer objects may be merged with a Polymer using unary operator "+".')
return |
<SYSTEM_TASK:>
Retrieves all the `Monomers` from the AMPAL object.
<END_TASK>
<USER_TASK:>
Description:
def get_monomers(self, ligands=True):
"""Retrieves all the `Monomers` from the AMPAL object.
Parameters
----------
ligands : bool, optional
If true, will include ligand `Monomers`.
""" |
if ligands and self.ligands:
monomers = self._monomers + self.ligands._monomers
else:
monomers = self._monomers
return iter(monomers) |
<SYSTEM_TASK:>
Flat list of all the Atoms in the Polymer.
<END_TASK>
<USER_TASK:>
Description:
def get_atoms(self, ligands=True, inc_alt_states=False):
"""Flat list of all the Atoms in the Polymer.
Parameters
----------
inc_alt_states : bool
If true atoms from alternate conformations are included rather
than only the "active" states.
Returns
-------
atoms : itertools.chain
Returns an iterator of all the atoms. Convert to list if you
require indexing.
""" |
if ligands and self.ligands:
monomers = self._monomers + self.ligands._monomers
else:
monomers = self._monomers
atoms = itertools.chain(
*(list(m.get_atoms(inc_alt_states=inc_alt_states)) for m in monomers))
return atoms |
<SYSTEM_TASK:>
Relabels the either in numerically or using a list of labels.
<END_TASK>
<USER_TASK:>
Description:
def relabel_monomers(self, labels=None):
"""Relabels the either in numerically or using a list of labels.
Parameters
----------
labels : list, optional
A list of new labels.
Raises
------
ValueError
Raised if the number of labels does not match the number of
component Monoer objects.
""" |
if labels:
if len(self._monomers) == len(labels):
for monomer, label in zip(self._monomers, labels):
monomer.id = str(label)
else:
error_string = (
'Number of Monomers ({}) and number of labels '
'({}) must be equal.')
raise ValueError(error_string.format(
len(self._monomers), len(labels)))
else:
for i, monomer in enumerate(self._monomers):
monomer.id = str(i + 1)
return |
<SYSTEM_TASK:>
Relabels all `Atoms` in numerical order.
<END_TASK>
<USER_TASK:>
Description:
def relabel_atoms(self, start=1):
"""Relabels all `Atoms` in numerical order.
Parameters
----------
start : int, optional
Offset the labelling by `start` residues.
""" |
counter = start
for atom in self.get_atoms():
atom.id = counter
counter += 1
return |
<SYSTEM_TASK:>
Generates a PDB string for the `Polymer`.
<END_TASK>
<USER_TASK:>
Description:
def make_pdb(self, alt_states=False, inc_ligands=True):
"""Generates a PDB string for the `Polymer`.
Parameters
----------
alt_states : bool, optional
Include alternate conformations for `Monomers` in PDB.
inc_ligands : bool, optional
Includes `Ligands` in PDB.
Returns
-------
pdb_str : str
String of the pdb for the `Polymer`. Generated using information
from the component `Monomers`.
""" |
if any([False if x.id else True for x in self._monomers]):
self.relabel_monomers()
if self.ligands and inc_ligands:
monomers = self._monomers + self.ligands._monomers
else:
monomers = self._monomers
pdb_str = write_pdb(monomers, self.id, alt_states=alt_states)
return pdb_str |
<SYSTEM_TASK:>
Rotates `Atom` by `angle`.
<END_TASK>
<USER_TASK:>
Description:
def rotate(self, angle, axis, point=None, radians=False):
"""Rotates `Atom` by `angle`.
Parameters
----------
angle : float
Angle that `Atom` will be rotated.
axis : 3D Vector (tuple, list, numpy.array)
Axis about which the `Atom` will be rotated.
point : 3D Vector (tuple, list, numpy.array), optional
Point that the `axis` lies upon. If `None` then the origin is used.
radians : bool, optional
True is `angle` is define in radians, False is degrees.
""" |
q = Quaternion.angle_and_axis(angle=angle, axis=axis, radians=radians)
self._vector = q.rotate_vector(v=self._vector, point=point)
return |
<SYSTEM_TASK:>
Parse mmcif file into a dictionary.
<END_TASK>
<USER_TASK:>
Description:
def dict_from_mmcif(mmcif, path=True):
"""Parse mmcif file into a dictionary.
Notes
-----
Full list of keys/value types, and further information on them can be viewed here:
http://mmcif.wwpdb.org/docs/pdb_to_pdbx_correspondences.html
All values in the returned dict are str or list(str).
This means that some of the data values are string representations of integers
- parse these outside of this function if desired.
An alternative approach to this can be found in Biopython (via the function Bio.PDB.MMCIF2Dict.MMCIF2Dict).
mmcif files are subject to the usual "here be dragons" problems of the PDB and difficult file formats.
As such, this function is likely to be in a permanent state of flux as more dragons are found.
Parameters
----------
mmcif : str
mmcif string or a path to an mmcif file.
path : bool
True if mmcif is a path.
Returns
-------
cif_data : dict
Keys are cif data names, e.g. '_struct_keywords.text'.
Values are str or list(str).
""" |
if path:
with open(mmcif, 'r') as foo:
lines = foo.readlines()
else:
lines = mmcif.splitlines()
lines = [' '.join(x.strip().split()) for x in lines]
# Some of the data in a .cif files are stored between 'loop_' to initiate a loop, and '#' to terminate it.
# The variable 'loop' is a flag to keep track of this behaviour.
loop = False
# Set up the dictionary to populate as the lines of the .cif file are iterated over.
cif_data = {}
for i, line in enumerate(lines):
if not line:
continue
# hash signifies end of a loop. Ensure loop flag is set to False.
if line == '#':
loop = False
continue
if not loop:
# This line initiates a loop section, in which keys are listed first,
# followed by lines of data in which the values are listed in the same order as the above keys.
# The values in the loop section will be stored as lists - there are multiple values for one key.
# An example of this type of data is the 'REVDAT' section, which stores details on the (potentially
# numerous) various revisions made to the PDB file during its history.
if line[:5] == 'loop_':
loop = True
key_list = []
continue
# Lines beginning with '_' start with data names, i.e. keys in the cif_data dictionary.
elif line[0] == '_':
# If line consists only of a key, then subsequent lines may contain the associated value.
if len(line.split()) == 1:
current_key = line
count = 1
while True:
# Look forward until a key is found, keeping count of the number of lines in between.
try:
if lines[i + count][0] != '_':
count += 1
# prevent infinite loop.
elif i + count > len(lines):
break
else:
if count > 1:
try:
cif_data[current_key] = ' '.join(lines[i + 1: i + count])
except IndexError:
cif_data[current_key] = None
else:
cif_data[current_key] = None
break
except IndexError:
break
continue
# Simplest case. Line is a key-value pair, with the key identified by its first character, '_'.
elif len(line.split()) > 1:
line = line.split()
try:
cif_data[line[0]] = ' '.join(line[1:])
except IndexError:
cif_data[line[0]] = None
continue
# Line is one of multiple lines that are combined into a value in the while True: loop above.
else:
continue
else:
# Within a loop section, keys are identified by their first character '_'.
# Add them to the list of keys in the loop.
if line[0] == '_':
if len(line.split()) == 1:
key_list.append(line)
if line not in cif_data.keys():
cif_data[line] = []
# Within a loop section, the values are listed within a single space-separated line in the same order
# that the keys were listed at the start of the loop.
else:
# Cannot do a simple split if any of the values themselves are strings containing at least one space.
if '\"' in line and line.count('\"') % 2 == 0:
line_parts = [x.strip() for x in line.split('\"') if x]
line = []
for part in line_parts:
if line_parts.index(part) % 2 == 0:
for x in part.split():
line.append(x)
else:
line.append(part)
elif '\'' in line and line.count('\'') % 2 == 0:
line = [x.strip() for x in line.split('\'') if x]
elif len(key_list) == len(line.split()):
line = line.split()
if len(key_list) == len(line):
for j, v in enumerate(line):
cif_data[key_list[j]].append(line[j])
else:
# CURRENTLY THERE IS A PROBLEM WITH REALLY LONG LOOPS eg _pdbx_refine_tls*, _pdbx_struct_oper_list*
# The values span multiple lines, and therefore do not satisfy
# the condition of the above 'if' statement.
# A correction for this needs to keep track of the value count on subsequent lines,
# until the 'if' condition is met.
continue
return cif_data |
<SYSTEM_TASK:>
Parse cif_data dict for a subset of its data.
<END_TASK>
<USER_TASK:>
Description:
def get_protein_dict(cif_data):
""" Parse cif_data dict for a subset of its data.
Notes
-----
cif_data dict contains all the data from the .cif file, with values as strings.
This function returns a more 'human readable' dictionary of key-value pairs.
The keys have simpler (and still often more descriptive!) names, and the values are not restricted to being strings.
To add more key-value pairs to the protein_dict, follow the patterns used in this function.
Add the key and youre name for it to mmcif_data_names.
Will it need further parsing, like with the dates in the function below?
If the value is not a string, add it to a list of data-types at the end of the function.
More information on what key-value pairs can be obtained can be gleaned by examining cif_data and/or by viewing the
mmcif resource on the PDB website: http://mmcif.wwpdb.org/docs/pdb_to_pdbx_correspondences.html
WARNING: Do not alter the keys of protein_dict without caution.
The keys of protein_dict MUST match the column names of the Protein model in the protgraph database.
Parameters
----------
cif_data : dict
Key/value pairs taken directly from a .cif file.
Output of the function dict_from_mmcif.
Returns
-------
protein_dict : dict
A dictionary containing a parsed subset of the data in cif_data.
The keys have the same name as fields in the Protein model.
""" |
# Dictionary relating the keys of protein_dict (column names in Protein model) to the keys of cif_data.
mmcif_data_names = {
'keywords': '_struct_keywords.text',
'header': '_struct_keywords.pdbx_keywords',
'space_group': '_symmetry.space_group_name_H-M',
'experimental_method': '_exptl.method',
'crystal_growth': '_exptl_crystal_grow.pdbx_details',
'resolution': '_refine.ls_d_res_high',
'r_value_obs': '_refine.ls_R_factor_obs',
'atoms_protein': '_refine_hist.pdbx_number_atoms_protein',
'atoms_solvent': '_refine_hist.number_atoms_solvent',
'atoms_ligand': '_refine_hist.pdbx_number_atoms_ligand',
'atoms_nucleic_acid': '_refine_hist.pdbx_number_atoms_nucleic_acid',
'atoms_total': '_refine_hist.number_atoms_total',
'title': '_struct.title',
'pdb_descriptor': '_struct.pdbx_descriptor',
'model_details': '_struct.pdbx_model_details',
'casp_flag': '_struct.pdbx_CASP_flag',
'model_type_details': '_struct.pdbx_model_type_details',
'ncbi_taxonomy': '_entity_src_nat.pdbx_ncbi_taxonomy_id',
'ncbi_taxonomy_gene': '_entity_src_gen.pdbx_gene_src_ncbi_taxonomy_id',
'ncbi_taxonomy_host_org': '_entity_src_gen.pdbx_host_org_ncbi_taxonomy_id',
}
# Set up initial protein_dict.
protein_dict = {}
for column_name, cif_name in mmcif_data_names.items():
try:
data = cif_data[cif_name]
except IndexError:
data = None
except KeyError:
data = None
protein_dict[column_name] = data
# These entries are modified from the mmcif dictionary.
# There may be many revision dates in cif_data. We save the original deposition, release and last_modified dates.
# If there are many dates, they will be in a list in cif_data, otherwise it's one date in a string
# Is there a tidier way to do this?
if isinstance(cif_data['_database_PDB_rev.date_original'], str):
protein_dict['deposition_date'] = cif_data['_database_PDB_rev.date_original']
else:
protein_dict['deposition_date'] = cif_data['_database_PDB_rev.date_original'][0]
if isinstance(cif_data['_database_PDB_rev.date'], str):
protein_dict['release_date'] = cif_data['_database_PDB_rev.date']
protein_dict['last_modified_date'] = cif_data['_database_PDB_rev.date']
else:
protein_dict['release_date'] = cif_data['_database_PDB_rev.date'][0]
protein_dict['last_modified_date'] = cif_data['_database_PDB_rev.date'][-1]
# crystal_growth should be a string or None
crystal_growth = protein_dict['crystal_growth']
if type(crystal_growth) == list and len(crystal_growth) >= 1:
protein_dict['crystal_growth'] = crystal_growth[0]
else:
protein_dict['crystal_growth'] = None
# taxonomy data types should be ints, not lists
taxonomy_keys = ['ncbi_taxonomy', 'ncbi_taxonomy_gene', 'ncbi_taxonomy_host_org']
for taxonomy_key in taxonomy_keys:
if protein_dict[taxonomy_key]:
if type(protein_dict[taxonomy_key]) == list:
try:
protein_dict[taxonomy_key] = int(protein_dict[taxonomy_key][0])
except ValueError or IndexError:
protein_dict[taxonomy_key] = None
# Convert data types from strings to their correct data type.
ints = ['atoms_ligand', 'atoms_nucleic_acid', 'atoms_protein', 'atoms_solvent', 'atoms_total']
floats = ['r_value_obs', 'resolution']
dates = ['deposition_date', 'release_date', 'last_modified_date']
for k, v in protein_dict.items():
if v:
if v == '?' or v == 'None' or v == '.':
protein_dict[k] = None
elif k in ints:
protein_dict[k] = int(v)
elif k in floats:
protein_dict[k] = float(v)
elif k in dates:
protein_dict[k] = datetime.datetime.strptime(v, '%Y-%m-%d')
# Parse awkward strings from cif_data.
elif type(v) == str:
v = v.replace('loop_', '')
v = v.replace(' # ', '')
if v[0] == v[-1] == '\'':
protein_dict[k] = v[1:-1]
return protein_dict |
<SYSTEM_TASK:>
Takes the output list of a PISCES cull and returns in a usable dictionary.
<END_TASK>
<USER_TASK:>
Description:
def parse_PISCES_output(pisces_output, path=False):
""" Takes the output list of a PISCES cull and returns in a usable dictionary.
Notes
-----
Designed for outputs of protein sequence redundancy culls conducted using the PISCES server.
http://dunbrack.fccc.edu/PISCES.php
G. Wang and R. L. Dunbrack, Jr. PISCES: a protein sequence culling server. Bioinformatics, 19:1589-1591, 2003.
Parameters
----------
pisces_output : str or path
Output list of non-redundant protein chains from PISCES, or path to text file.
path : bool
True if path given rather than string.
Returns
-------
pisces_dict : dict
Data output by PISCES in dictionary form.
""" |
pisces_dict = {}
if path:
pisces_path = Path(pisces_output)
pisces_content = pisces_path.read_text().splitlines()[1:]
else:
pisces_content = pisces_output.splitlines()[1:]
for line in pisces_content:
pdb = line.split()[0][:4].lower()
chain = line.split()[0][-1]
pdb_dict = {'length': line.split()[1],
'method': line.split()[2],
'resolution': line.split()[3],
'R-factor': line.split()[4],
'R-free': line.split()[5]}
if pdb in pisces_dict:
pisces_dict[pdb]['chains'].append(chain)
else:
pdb_dict['chains'] = [chain]
pisces_dict[pdb] = pdb_dict
return pisces_dict |
<SYSTEM_TASK:>
Downloads data from URL and returns decoded contents.
<END_TASK>
<USER_TASK:>
Description:
def download_decode(URL, encoding='utf-8', verbose=True):
""" Downloads data from URL and returns decoded contents.""" |
if verbose:
print("Downloading data from " + URL)
req = Request(URL)
try:
with urlopen(req) as u:
decoded_file = u.read().decode(encoding)
except URLError as e:
if hasattr(e, 'reason'):
print('Server could not be reached.')
print('Reason: ', e.reason)
elif hasattr(e, 'code'):
print('The server couldn\'t fulfill the request.')
print('Error code: ', e.code)
return None
return decoded_file |
<SYSTEM_TASK:>
Checks the Olderado web server and returns the most representative conformation for PDB NMR structures.
<END_TASK>
<USER_TASK:>
Description:
def olderado_best_model(pdb_id):
""" Checks the Olderado web server and returns the most representative conformation for PDB NMR structures.
Notes
-----
Uses OLDERADO from the EBI.
See http://www.ebi.ac.uk/pdbe/nmr/olderado/ and citations therein.
Parameters
----------
pdb_id : str
The 4-character PDB code for the NMR structure of interest.
Returns
-------
model_no : int
The conformation number of the most-representative conformation.
Raises
------
ValueError
If the model number it finds is not an integer. This might indicate that the website format has changed.
""" |
pdb_code = pdb_id[:4].lower()
olderado_url = 'http://www.ebi.ac.uk/pdbe/nmr/olderado/searchEntry?pdbCode=' + pdb_code
olderado_page = download_decode(olderado_url, verbose=False)
if olderado_page:
parsed_page = BeautifulSoup(olderado_page, 'html.parser')
else:
return None
try:
best_model = parsed_page.find_all('td')[1]
except IndexError:
print("No model info could be found for {0} - ensure that it's an NMR structure.".format(pdb_id))
return None
try:
model_no = int(best_model.string)
except ValueError as v:
print("Did not find a number for best model.")
raise v
return model_no |
<SYSTEM_TASK:>
Builds and evaluates BUFF energy of model in parallelization
<END_TASK>
<USER_TASK:>
Description:
def buff_eval(params):
"""Builds and evaluates BUFF energy of model in parallelization
Parameters
----------
params: list
Tuple containing the specification to be built, the sequence,
and the parameters for model building.
Returns
-------
model.bude_score: float
BUFF score for model to be assigned to particle fitness value.
""" |
specification, sequence, parsed_ind = params
model = specification(*parsed_ind)
model.build()
model.pack_new_sequences(sequence)
return model.buff_interaction_energy.total_energy |
<SYSTEM_TASK:>
Builds and evaluates BUFF internal energy of a model in parallelization
<END_TASK>
<USER_TASK:>
Description:
def buff_internal_eval(params):
"""Builds and evaluates BUFF internal energy of a model in parallelization
Parameters
----------
params: list
Tuple containing the specification to be built, the sequence
and the parameters for model building.
Returns
-------
model.bude_score: float
BUFF internal energy score to be assigned to particle fitness
value.
""" |
specification, sequence, parsed_ind = params
model = specification(*parsed_ind)
model.build()
model.pack_new_sequences(sequence)
return model.buff_internal_energy.total_energy |
<SYSTEM_TASK:>
Builds a model and runs profit against a reference model.
<END_TASK>
<USER_TASK:>
Description:
def rmsd_eval(rmsd_params):
"""Builds a model and runs profit against a reference model.
Parameters
----------
rmsd_params
Returns
-------
rmsd: float
rmsd against reference model as calculated by profit.
""" |
specification, sequence, parsed_ind, reference_pdb = rmsd_params
model = specification(*parsed_ind)
model.pack_new_sequences(sequence)
ca, bb, aa = run_profit(model.pdb, reference_pdb, path1=False, path2=False)
return bb |
<SYSTEM_TASK:>
Gets BUFF score for interaction between two AMPAL objects
<END_TASK>
<USER_TASK:>
Description:
def comparator_eval(comparator_params):
"""Gets BUFF score for interaction between two AMPAL objects
""" |
top1, top2, params1, params2, seq1, seq2, movements = comparator_params
xrot, yrot, zrot, xtrans, ytrans, ztrans = movements
obj1 = top1(*params1)
obj2 = top2(*params2)
obj2.rotate(xrot, [1, 0, 0])
obj2.rotate(yrot, [0, 1, 0])
obj2.rotate(zrot, [0, 0, 1])
obj2.translate([xtrans, ytrans, ztrans])
model = obj1 + obj2
model.relabel_all()
model.pack_new_sequences(seq1 + seq2)
return model.buff_interaction_energy.total_energy |
<SYSTEM_TASK:>
Relates the individual to be evolved to the full parameter string.
<END_TASK>
<USER_TASK:>
Description:
def parameters(self, sequence, value_means, value_ranges, arrangement):
"""Relates the individual to be evolved to the full parameter string.
Parameters
----------
sequence: str
Full amino acid sequence for specification object to be
optimized. Must be equal to the number of residues in the
model.
value_means: list
List containing mean values for parameters to be optimized.
value_ranges: list
List containing ranges for parameters to be optimized.
Values must be positive.
arrangement: list
Full list of fixed and variable parameters for model
building. Fixed values are the appropriate value. Values
to be varied should be listed as 'var0', 'var1' etc,
and must be in ascending numerical order.
Variables can be repeated if required.
""" |
self._params['sequence'] = sequence
self._params['value_means'] = value_means
self._params['value_ranges'] = value_ranges
self._params['arrangement'] = arrangement
if any(x <= 0 for x in self._params['value_ranges']):
raise ValueError("range values must be greater than zero")
self._params['variable_parameters'] = []
for i in range(len(self._params['value_means'])):
self._params['variable_parameters'].append(
"".join(['var', str(i)]))
if len(set(arrangement).intersection(
self._params['variable_parameters'])) != len(
self._params['value_means']):
raise ValueError("argument mismatch!")
if len(self._params['value_ranges']) != len(
self._params['value_means']):
raise ValueError("argument mismatch!") |
<SYSTEM_TASK:>
Compares models created during the minimisation to the best model.
<END_TASK>
<USER_TASK:>
Description:
def make_energy_funnel_data(self, cores=1):
"""Compares models created during the minimisation to the best model.
Returns
-------
energy_rmsd_gen: [(float, float, int)]
A list of triples containing the BUFF score, RMSD to the
top model and generation of a model generated during the
minimisation.
""" |
if not self.parameter_log:
raise AttributeError(
'No parameter log data to make funnel, have you ran the '
'optimiser?')
model_cls = self._params['specification']
gen_tagged = []
for gen, models in enumerate(self.parameter_log):
for model in models:
gen_tagged.append((model[0], model[1], gen))
sorted_pps = sorted(gen_tagged, key=lambda x: x[1])
top_result = sorted_pps[0]
top_result_model = model_cls(*top_result[0])
if (cores == 1) or (sys.platform == 'win32'):
energy_rmsd_gen = map(
self.funnel_rebuild,
[(x, top_result_model,
self._params['specification']) for x in sorted_pps[1:]])
else:
with futures.ProcessPoolExecutor(
max_workers=self._params['processors']) as executor:
energy_rmsd_gen = executor.map(
self.funnel_rebuild,
[(x, top_result_model, self._params['specification'])
for x in sorted_pps[1:]])
return list(energy_rmsd_gen) |
<SYSTEM_TASK:>
Rebuilds a model and compares it to a reference model.
<END_TASK>
<USER_TASK:>
Description:
def funnel_rebuild(psg_trm_spec):
"""Rebuilds a model and compares it to a reference model.
Parameters
----------
psg_trm: (([float], float, int), AMPAL, specification)
A tuple containing the parameters, score and generation for a
model as well as a model of the best scoring parameters.
Returns
-------
energy_rmsd_gen: (float, float, int)
A triple containing the BUFF score, RMSD to the top model
and generation of a model generated during the minimisation.
""" |
param_score_gen, top_result_model, specification = psg_trm_spec
params, score, gen = param_score_gen
model = specification(*params)
rmsd = top_result_model.rmsd(model)
return rmsd, score, gen |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.