sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def prompt_save_images(args):
"""Prompt user to save images when crawling (for pdf and HTML formats)."""
if args['images'] or args['no_images']:
return
if (args['pdf'] or args['html']) and (args['crawl'] or args['crawl_all']):
save_msg = ('Choosing to save images will greatly slow the'
' crawling process.\nSave images anyways? (y/n): ')
try:
save_images = utils.confirm_input(input(save_msg))
except (KeyboardInterrupt, EOFError):
return
args['images'] = save_images
args['no_images'] = not save_images | Prompt user to save images when crawling (for pdf and HTML formats). | entailment |
def command_line_runner():
"""Handle command-line interaction."""
parser = get_parser()
args = vars(parser.parse_args())
if args['version']:
print(__version__)
return
if args['clear_cache']:
utils.clear_cache()
print('Cleared {0}.'.format(utils.CACHE_DIR))
return
if not args['query']:
parser.print_help()
return
# Enable cache unless user sets environ variable SCRAPE_DISABLE_CACHE
if not os.getenv('SCRAPE_DISABLE_CACHE'):
utils.enable_cache()
# Save images unless user sets environ variable SCRAPE_DISABLE_IMGS
if os.getenv('SCRAPE_DISABLE_IMGS'):
args['no_images'] = True
# Prompt user for filetype if none specified
prompt_filetype(args)
# Prompt user to save images when crawling (for pdf and HTML formats)
prompt_save_images(args)
# Scrape webpage content
scrape(args) | Handle command-line interaction. | entailment |
def load_raw(cls, model_fn, schema, *args, **kwargs):
"""
Loads a trained classifier from the raw Weka model format.
Must specify the model schema and classifier name, since
these aren't currently deduced from the model format.
"""
c = cls(*args, **kwargs)
c.schema = schema.copy(schema_only=True)
c._model_data = open(model_fn, 'rb').read()
return c | Loads a trained classifier from the raw Weka model format.
Must specify the model schema and classifier name, since
these aren't currently deduced from the model format. | entailment |
def train(self, training_data, testing_data=None, verbose=False):
"""
Updates the classifier with new data.
"""
model_fn = None
training_fn = None
clean_training = False
testing_fn = None
clean_testing = False
try:
# Validate training data.
if isinstance(training_data, basestring):
assert os.path.isfile(training_data)
training_fn = training_data
else:
assert isinstance(training_data, arff.ArffFile)
fd, training_fn = tempfile.mkstemp(suffix='.arff')
os.close(fd)
with open(training_fn, 'w') as fout:
fout.write(training_data.write())
clean_training = True
assert training_fn
# Validate testing data.
if testing_data:
if isinstance(testing_data, basestring):
assert os.path.isfile(testing_data)
testing_fn = testing_data
else:
assert isinstance(testing_data, arff.ArffFile)
fd, testing_fn = tempfile.mkstemp(suffix='.arff')
os.close(fd)
with open(testing_fn, 'w') as fout:
fout.write(testing_data.write())
clean_testing = True
else:
testing_fn = training_fn
assert testing_fn
# Validate model file.
fd, model_fn = tempfile.mkstemp()
os.close(fd)
if self._model_data:
fout = open(model_fn, 'wb')
fout.write(self._model_data)
fout.close()
# Call Weka Jar.
args = dict(
CP=CP,
classifier_name=self.name,
model_fn=model_fn,
training_fn=training_fn,
testing_fn=testing_fn,
ckargs=self._get_ckargs_str(),
)
if self._model_data:
# Load existing model.
cmd = (
"java -cp %(CP)s %(classifier_name)s -l \"%(model_fn)s\" "
"-t \"%(training_fn)s\" -T \"%(testing_fn)s\" -d \"%(model_fn)s\"") % args
else:
# Create new model file.
cmd = (
"java -cp %(CP)s %(classifier_name)s -t \"%(training_fn)s\" "
"-T \"%(testing_fn)s\" -d \"%(model_fn)s\" %(ckargs)s") % args
if verbose:
print(cmd)
p = Popen(
cmd,
shell=True,
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=sys.platform != "win32")
stdin, stdout, stderr = (p.stdin, p.stdout, p.stderr)
stdout_str = stdout.read()
stderr_str = stderr.read()
self.last_training_stdout = stdout_str
self.last_training_stderr = stderr_str
if verbose:
print('stdout:')
print(stdout_str)
print('stderr:')
print(stderr_str)
# exclude "Warning" lines not to raise an error for a simple warning
stderr_str = '\n'.join(l for l in stderr_str.decode('utf8').split('\n') if not "Warning" in l)
if stderr_str:
raise TrainingError(stderr_str)
# Save schema.
if not self.schema:
self.schema = arff.ArffFile.load(training_fn, schema_only=True).copy(schema_only=True)
# Save model.
with open(model_fn, 'rb') as fin:
self._model_data = fin.read()
assert self._model_data
finally:
# Cleanup files.
if model_fn:
os.remove(model_fn)
if training_fn and clean_training:
os.remove(training_fn)
if testing_fn and clean_testing:
os.remove(testing_fn) | Updates the classifier with new data. | entailment |
def predict(self, query_data, verbose=False, distribution=False, cleanup=True):
"""
Iterates over the predicted values and probability (if supported).
Each iteration yields a tuple of the form (prediction, probability).
If the file is a test file (i.e. contains no query variables),
then the tuple will be of the form (prediction, actual).
See http://weka.wikispaces.com/Making+predictions
for further explanation on interpreting Weka prediction output.
"""
model_fn = None
query_fn = None
clean_query = False
stdout = None
try:
# Validate query data.
if isinstance(query_data, basestring):
assert os.path.isfile(query_data)
query_fn = query_data
else:
#assert isinstance(query_data, arff.ArffFile) #TODO: doesn't work in Python 3.*?
assert type(query_data).__name__ == 'ArffFile', 'Must be of type ArffFile, not "%s"' % type(query_data).__name__
fd, query_fn = tempfile.mkstemp(suffix='.arff')
if verbose:
print('writing', query_fn)
os.close(fd)
open(query_fn, 'w').write(query_data.write())
clean_query = True
assert query_fn
# Validate model file.
fd, model_fn = tempfile.mkstemp()
os.close(fd)
assert self._model_data, "You must train this classifier before predicting."
fout = open(model_fn, 'wb')
fout.write(self._model_data)
fout.close()
# print(open(model_fn).read()
# print(open(query_fn).read()
# Call Weka Jar.
args = dict(
CP=CP,
classifier_name=self.name,
model_fn=model_fn,
query_fn=query_fn,
#ckargs = self._get_ckargs_str(),
distribution=('-distribution' if distribution else ''),
)
cmd = ("java -cp %(CP)s %(classifier_name)s -p 0 %(distribution)s -l \"%(model_fn)s\" -T \"%(query_fn)s\"") % args
if verbose:
print(cmd)
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
stdin, stdout, stderr = (p.stdin, p.stdout, p.stderr)
stdout_str = stdout.read()
stderr_str = stderr.read()
if verbose:
print('stdout:')
print(stdout_str)
print('stderr:')
print(stderr_str)
if stderr_str:
raise PredictionError(stderr_str)
if stdout_str:
# inst# actual predicted error prediction
#header = 'inst,actual,predicted,error'.split(',')
query = arff.ArffFile.load(query_fn)
query_variables = [
query.attributes[i]
for i, v in enumerate(query.data[0])
if v == arff.MISSING]
if not query_variables:
query_variables = [query.attributes[-1]]
# assert query_variables, \
# "There must be at least one query variable in the query."
if verbose:
print('query_variables:', query_variables)
header = 'predicted'.split(',')
# sample line: 1 1:? 4:36 + 1
# Expected output without distribution:
#=== Predictions on test data ===
#
# inst# actual predicted error prediction
# 1 1:? 11:Acer_tr + 1
#=== Predictions on test data ===
#
# inst# actual predicted error
# 1 ? 7 ?
#=== Predictions on test data ===
#
# inst# actual predicted error prediction
# 1 1:? 1:0 0.99
# 2 1:? 1:0 0.99
# 3 1:? 1:0 0.99
# 4 1:? 1:0 0.99
# 5 1:? 1:0 0.99
# Expected output with distribution:
#=== Predictions on test data ===
#
# inst# actual predicted error distribution
# 1 1:? 11:Acer_tr + 0,0,0,0,0,0,0,0,0,0,*1,0,0,0,0,0...
# Expected output with simple format:
# inst# actual predicted error
# 1 ? -3.417 ?
q = re.findall(
r'J48 pruned tree\s+\-+:\s+([0-9]+)\s+',
stdout_str.decode('utf-8'), re.MULTILINE|re.DOTALL)
if q:
class_label = q[0]
prob = 1.0
yield PredictionResult(
actual=None,
predicted=class_label,
probability=prob,)
elif re.findall(r'error\s+(?:distribution|prediction)', stdout_str.decode('utf-8')):
# Check for distribution output.
matches = re.findall(
r"^\s*[0-9\.]+\s+[a-zA-Z0-9\.\?\:]+\s+(?P<cls_value>[a-zA-Z0-9_\.\?\:]+)\s+\+?\s+(?P<prob>[a-zA-Z0-9\.\?\,\*]+)",
stdout_str.decode('utf-8'),
re.MULTILINE)
assert matches, ("No results found matching distribution pattern in stdout: %s") % stdout_str
for match in matches:
prediction, prob = match
class_index, class_label = prediction.split(':')
class_index = int(class_index)
if distribution:
# Convert list of probabilities into a hash linking the prob
# to the associated class value.
prob = dict(zip(
query.attribute_data[query.attributes[-1]],
map(float, prob.replace('*', '').split(','))))
else:
prob = float(prob)
class_label = query.attribute_data[query.attributes[-1]][class_index-1]
yield PredictionResult(
actual=None,
predicted=class_label,
probability=prob,)
else:
# Otherwise, assume a simple output.
matches = re.findall(
# inst# actual predicted
r"^\s*([0-9\.]+)\s+([a-zA-Z0-9\-\.\?\:]+)\s+([a-zA-Z0-9\-_\.\?\:]+)\s+",
stdout_str.decode('utf-8'),
re.MULTILINE)
assert matches, "No results found matching simple pattern in stdout: %s" % stdout_str
#print('matches:',len(matches)
for match in matches:
inst, actual, predicted = match
class_name = query.attributes[-1]
actual_value = query.get_attribute_value(class_name, actual)
predicted_value = query.get_attribute_value(class_name, predicted)
yield PredictionResult(
actual=actual_value,
predicted=predicted_value,
probability=None,)
finally:
# Cleanup files.
if cleanup:
if model_fn:
self._model_data = open(model_fn, 'rb').read()
os.remove(model_fn)
if query_fn and clean_query:
os.remove(query_fn) | Iterates over the predicted values and probability (if supported).
Each iteration yields a tuple of the form (prediction, probability).
If the file is a test file (i.e. contains no query variables),
then the tuple will be of the form (prediction, actual).
See http://weka.wikispaces.com/Making+predictions
for further explanation on interpreting Weka prediction output. | entailment |
def get_training_coverage(self):
"""
Returns a ratio of classifiers that were able to be trained successfully.
"""
total = len(self.training_results)
i = sum(1 for data in self.training_results.values() if not isinstance(data, basestring))
return i/float(total) | Returns a ratio of classifiers that were able to be trained successfully. | entailment |
def get_new_links(self, url, resp):
"""Get new links from a URL and filter them."""
links_on_page = resp.xpath('//a/@href')
links = [utils.clean_url(u, url) for u in links_on_page]
# Remove non-links through filtering by protocol
links = [x for x in links if utils.check_protocol(x)]
# Restrict new URLs by the domain of the input URL
if not self.args['nonstrict']:
domain = utils.get_domain(url)
links = [x for x in links if utils.get_domain(x) == domain]
# Filter URLs by regex keywords, if any
if self.args['crawl']:
links = utils.re_filter(links, self.args['crawl'])
return links | Get new links from a URL and filter them. | entailment |
def page_crawled(self, page_resp):
"""Check if page has been crawled by hashing its text content.
Add new pages to the page cache.
Return whether page was found in cache.
"""
page_text = utils.parse_text(page_resp)
page_hash = utils.hash_text(''.join(page_text))
if page_hash not in self.page_cache:
utils.cache_page(self.page_cache, page_hash, self.args['cache_size'])
return False
return True | Check if page has been crawled by hashing its text content.
Add new pages to the page cache.
Return whether page was found in cache. | entailment |
def crawl_links(self, seed_url=None):
"""Find new links given a seed URL and follow them breadth-first.
Save page responses as PART.html files.
Return the PART.html filenames created during crawling.
"""
if seed_url is not None:
self.seed_url = seed_url
if self.seed_url is None:
sys.stderr.write('Crawling requires a seed URL.\n')
return []
prev_part_num = utils.get_num_part_files()
crawled_links = set()
uncrawled_links = OrderedSet()
uncrawled_links.add(self.seed_url)
try:
while uncrawled_links:
# Check limit on number of links and pages to crawl
if self.limit_reached(len(crawled_links)):
break
url = uncrawled_links.pop(last=False)
# Remove protocol, fragments, etc. to get unique URLs
unique_url = utils.remove_protocol(utils.clean_url(url))
if unique_url not in crawled_links:
raw_resp = utils.get_raw_resp(url)
if raw_resp is None:
if not self.args['quiet']:
sys.stderr.write('Failed to parse {0}.\n'.format(url))
continue
resp = lh.fromstring(raw_resp)
if self.page_crawled(resp):
continue
crawled_links.add(unique_url)
new_links = self.get_new_links(url, resp)
uncrawled_links.update(new_links)
if not self.args['quiet']:
print('Crawled {0} (#{1}).'.format(url, len(crawled_links)))
# Write page response to PART.html file
utils.write_part_file(self.args, url, raw_resp, resp, len(crawled_links))
except (KeyboardInterrupt, EOFError):
pass
curr_part_num = utils.get_num_part_files()
return utils.get_part_filenames(curr_part_num, prev_part_num) | Find new links given a seed URL and follow them breadth-first.
Save page responses as PART.html files.
Return the PART.html filenames created during crawling. | entailment |
def get_proxies():
"""Get available proxies to use with requests library."""
proxies = getproxies()
filtered_proxies = {}
for key, value in proxies.items():
if key.startswith('http://'):
if not value.startswith('http://'):
filtered_proxies[key] = 'http://{0}'.format(value)
else:
filtered_proxies[key] = value
return filtered_proxies | Get available proxies to use with requests library. | entailment |
def get_resp(url):
"""Get webpage response as an lxml.html.HtmlElement object."""
try:
headers = {'User-Agent': random.choice(USER_AGENTS)}
try:
request = requests.get(url, headers=headers, proxies=get_proxies())
except MissingSchema:
url = add_protocol(url)
request = requests.get(url, headers=headers, proxies=get_proxies())
return lh.fromstring(request.text.encode('utf-8') if PY2 else request.text)
except Exception:
sys.stderr.write('Failed to retrieve {0}.\n'.format(url))
raise | Get webpage response as an lxml.html.HtmlElement object. | entailment |
def enable_cache():
"""Enable requests library cache."""
try:
import requests_cache
except ImportError as err:
sys.stderr.write('Failed to enable cache: {0}\n'.format(str(err)))
return
if not os.path.exists(CACHE_DIR):
os.makedirs(CACHE_DIR)
requests_cache.install_cache(CACHE_FILE) | Enable requests library cache. | entailment |
def hash_text(text):
"""Return MD5 hash of a string."""
md5 = hashlib.md5()
md5.update(text)
return md5.hexdigest() | Return MD5 hash of a string. | entailment |
def cache_page(page_cache, page_hash, cache_size):
"""Add a page to the page cache."""
page_cache.append(page_hash)
if len(page_cache) > cache_size:
page_cache.pop(0) | Add a page to the page cache. | entailment |
def re_filter(text, regexps):
"""Filter text using regular expressions."""
if not regexps:
return text
matched_text = []
compiled_regexps = [re.compile(x) for x in regexps]
for line in text:
if line in matched_text:
continue
for regexp in compiled_regexps:
found = regexp.search(line)
if found and found.group():
matched_text.append(line)
return matched_text or text | Filter text using regular expressions. | entailment |
def remove_whitespace(text):
"""Remove unnecessary whitespace while keeping logical structure.
Keyword arguments:
text -- text to remove whitespace from (list)
Retain paragraph structure but remove other whitespace,
such as between words on a line and at the start and end of the text.
"""
clean_text = []
curr_line = ''
# Remove any newlines that follow two lines of whitespace consecutively
# Also remove whitespace at start and end of text
while text:
if not curr_line:
# Find the first line that is not whitespace and add it
curr_line = text.pop(0)
while not curr_line.strip() and text:
curr_line = text.pop(0)
if curr_line.strip():
clean_text.append(curr_line)
else:
# Filter the rest of the lines
curr_line = text.pop(0)
if not text:
# Add the final line if it is not whitespace
if curr_line.strip():
clean_text.append(curr_line)
continue
if curr_line.strip():
clean_text.append(curr_line)
else:
# If the current line is whitespace then make sure there is
# no more than one consecutive line of whitespace following
if not text[0].strip():
if len(text) > 1 and text[1].strip():
clean_text.append(curr_line)
else:
clean_text.append(curr_line)
# Now filter each individual line for extraneous whitespace
cleaner_text = []
for line in clean_text:
clean_line = ' '.join(line.split())
if not clean_line.strip():
clean_line += '\n'
cleaner_text.append(clean_line)
return cleaner_text | Remove unnecessary whitespace while keeping logical structure.
Keyword arguments:
text -- text to remove whitespace from (list)
Retain paragraph structure but remove other whitespace,
such as between words on a line and at the start and end of the text. | entailment |
def parse_text(infile, xpath=None, filter_words=None, attributes=None):
"""Filter text using XPath, regex keywords, and tag attributes.
Keyword arguments:
infile -- HTML or text content to parse (list)
xpath -- an XPath expression (str)
filter_words -- regex keywords (list)
attributes -- HTML tag attributes (list)
Return a list of strings of text.
"""
infiles = []
text = []
if xpath is not None:
infile = parse_html(infile, xpath)
if isinstance(infile, list):
if isinstance(infile[0], lh.HtmlElement):
infiles = list(infile)
else:
text = [line + '\n' for line in infile]
elif isinstance(infile, lh.HtmlElement):
infiles = [infile]
else:
text = [infile]
else:
infiles = [infile]
if attributes is not None:
attributes = [clean_attr(x) for x in attributes]
attributes = [x for x in attributes if x]
else:
attributes = ['text()']
if not text:
text_xpath = '//*[not(self::script) and not(self::style)]'
for attr in attributes:
for infile in infiles:
if isinstance(infile, lh.HtmlElement):
new_text = infile.xpath('{0}/{1}'.format(text_xpath, attr))
else:
# re.split preserves delimiters place in the list
new_text = [x for x in re.split('(\n)', infile) if x]
text += new_text
if filter_words is not None:
text = re_filter(text, filter_words)
return [''.join(x for x in line if x in string.printable)
for line in remove_whitespace(text) if line] | Filter text using XPath, regex keywords, and tag attributes.
Keyword arguments:
infile -- HTML or text content to parse (list)
xpath -- an XPath expression (str)
filter_words -- regex keywords (list)
attributes -- HTML tag attributes (list)
Return a list of strings of text. | entailment |
def get_parsed_text(args, infilename):
"""Parse and return text content of infiles.
Keyword arguments:
args -- program arguments (dict)
infilenames -- name of user-inputted and/or downloaded file (str)
Return a list of strings of text.
"""
parsed_text = []
if infilename.endswith('.html'):
# Convert HTML to lxml object for content parsing
html = lh.fromstring(read_files(infilename))
text = None
else:
html = None
text = read_files(infilename)
if html is not None:
parsed_text = parse_text(html, args['xpath'], args['filter'],
args['attributes'])
elif text is not None:
parsed_text = parse_text(text, args['xpath'], args['filter'])
else:
if not args['quiet']:
sys.stderr.write('Failed to parse text from {0}.\n'
.format(infilename))
return parsed_text | Parse and return text content of infiles.
Keyword arguments:
args -- program arguments (dict)
infilenames -- name of user-inputted and/or downloaded file (str)
Return a list of strings of text. | entailment |
def parse_html(infile, xpath):
"""Filter HTML using XPath."""
if not isinstance(infile, lh.HtmlElement):
infile = lh.fromstring(infile)
infile = infile.xpath(xpath)
if not infile:
raise ValueError('XPath {0} returned no results.'.format(xpath))
return infile | Filter HTML using XPath. | entailment |
def clean_url(url, base_url=None):
"""Add base netloc and path to internal URLs and remove www, fragments."""
parsed_url = urlparse(url)
fragment = '{url.fragment}'.format(url=parsed_url)
if fragment:
url = url.split(fragment)[0]
# Identify internal URLs and fix their format
netloc = '{url.netloc}'.format(url=parsed_url)
if base_url is not None and not netloc:
parsed_base = urlparse(base_url)
split_base = '{url.scheme}://{url.netloc}{url.path}/'.format(url=parsed_base)
url = urljoin(split_base, url)
netloc = '{url.netloc}'.format(url=urlparse(url))
if 'www.' in netloc:
url = url.replace(netloc, netloc.replace('www.', ''))
return url.rstrip(string.punctuation) | Add base netloc and path to internal URLs and remove www, fragments. | entailment |
def add_url_suffix(url):
"""Add .com suffix to URL if none found."""
url = url.rstrip('/')
if not has_suffix(url):
return '{0}.com'.format(url)
return url | Add .com suffix to URL if none found. | entailment |
def get_outfilename(url, domain=None):
"""Construct the output filename from domain and end of path."""
if domain is None:
domain = get_domain(url)
path = '{url.path}'.format(url=urlparse(url))
if '.' in path:
tail_url = path.split('.')[-2]
else:
tail_url = path
if tail_url:
if '/' in tail_url:
tail_pieces = [x for x in tail_url.split('/') if x]
tail_url = tail_pieces[-1]
# Keep length of return string below or equal to max_len
max_len = 24
if domain:
max_len -= (len(domain) + 1)
if len(tail_url) > max_len:
if '-' in tail_url:
tail_pieces = [x for x in tail_url.split('-') if x]
tail_url = tail_pieces.pop(0)
if len(tail_url) > max_len:
tail_url = tail_url[:max_len]
else:
# Add as many tail pieces that can fit
tail_len = 0
for piece in tail_pieces:
tail_len += len(piece)
if tail_len <= max_len:
tail_url += '-' + piece
else:
break
else:
tail_url = tail_url[:max_len]
if domain:
return '{0}-{1}'.format(domain, tail_url).lower()
return tail_url
return domain.lower() | Construct the output filename from domain and end of path. | entailment |
def get_single_outfilename(args):
"""Use first possible entry in query as filename."""
for arg in args['query']:
if arg in args['files']:
return ('.'.join(arg.split('.')[:-1])).lower()
for url in args['urls']:
if arg.strip('/') in url:
domain = get_domain(url)
return get_outfilename(url, domain)
sys.stderr.write('Failed to construct a single out filename.\n')
return '' | Use first possible entry in query as filename. | entailment |
def modify_filename_id(filename):
"""Modify filename to have a unique numerical identifier."""
split_filename = os.path.splitext(filename)
id_num_re = re.compile('(\(\d\))')
id_num = re.findall(id_num_re, split_filename[-2])
if id_num:
new_id_num = int(id_num[-1].lstrip('(').rstrip(')')) + 1
# Reconstruct filename with incremented id and its extension
filename = ''.join((re.sub(id_num_re, '({0})'.format(new_id_num),
split_filename[-2]), split_filename[-1]))
else:
split_filename = os.path.splitext(filename)
# Reconstruct filename with new id and its extension
filename = ''.join(('{0} (2)'.format(split_filename[-2]),
split_filename[-1]))
return filename | Modify filename to have a unique numerical identifier. | entailment |
def overwrite_file_check(args, filename):
"""If filename exists, overwrite or modify it to be unique."""
if not args['overwrite'] and os.path.exists(filename):
# Confirm overwriting of the file, or modify filename
if args['no_overwrite']:
overwrite = False
else:
try:
overwrite = confirm_input(input('Overwrite {0}? (yes/no): '
.format(filename)))
except (KeyboardInterrupt, EOFError):
sys.exit()
if not overwrite:
new_filename = modify_filename_id(filename)
while os.path.exists(new_filename):
new_filename = modify_filename_id(new_filename)
return new_filename
return filename | If filename exists, overwrite or modify it to be unique. | entailment |
def print_text(args, infilenames, outfilename=None):
"""Print text content of infiles to stdout.
Keyword arguments:
args -- program arguments (dict)
infilenames -- names of user-inputted and/or downloaded files (list)
outfilename -- only used for interface purposes (None)
"""
for infilename in infilenames:
parsed_text = get_parsed_text(args, infilename)
if parsed_text:
for line in parsed_text:
print(line)
print('') | Print text content of infiles to stdout.
Keyword arguments:
args -- program arguments (dict)
infilenames -- names of user-inputted and/or downloaded files (list)
outfilename -- only used for interface purposes (None) | entailment |
def write_pdf_files(args, infilenames, outfilename):
"""Write pdf file(s) to disk using pdfkit.
Keyword arguments:
args -- program arguments (dict)
infilenames -- names of user-inputted and/or downloaded files (list)
outfilename -- name of output pdf file (str)
"""
if not outfilename.endswith('.pdf'):
outfilename = outfilename + '.pdf'
outfilename = overwrite_file_check(args, outfilename)
options = {}
try:
if args['multiple']:
# Multiple files are written one at a time, so infilenames will
# never contain more than one file here
infilename = infilenames[0]
if not args['quiet']:
print('Attempting to write to {0}.'.format(outfilename))
else:
options['quiet'] = None
if args['xpath']:
# Process HTML with XPath before writing
html = parse_html(read_files(infilename), args['xpath'])
if isinstance(html, list):
if isinstance(html[0], str):
pk.from_string('\n'.join(html), outfilename,
options=options)
else:
pk.from_string('\n'.join(lh.tostring(x) for x in html),
outfilename, options=options)
elif isinstance(html, str):
pk.from_string(html, outfilename, options=options)
else:
pk.from_string(lh.tostring(html), outfilename,
options=options)
else:
pk.from_file(infilename, outfilename, options=options)
elif args['single']:
if not args['quiet']:
print('Attempting to write {0} page(s) to {1}.'
.format(len(infilenames), outfilename))
else:
options['quiet'] = None
if args['xpath']:
# Process HTML with XPath before writing
html = parse_html(read_files(infilenames), args['xpath'])
if isinstance(html, list):
if isinstance(html[0], str):
pk.from_string('\n'.join(html), outfilename,
options=options)
else:
pk.from_string('\n'.join(lh.tostring(x) for x in html),
outfilename, options=options)
elif isinstance(html, str):
pk.from_string(html, outfilename, options=options)
else:
pk.from_string(lh.tostring(html), outfilename,
options=options)
else:
pk.from_file(infilenames, outfilename, options=options)
return True
except (OSError, IOError) as err:
sys.stderr.write('An error occurred while writing {0}:\n{1}'
.format(outfilename, str(err)))
return False | Write pdf file(s) to disk using pdfkit.
Keyword arguments:
args -- program arguments (dict)
infilenames -- names of user-inputted and/or downloaded files (list)
outfilename -- name of output pdf file (str) | entailment |
def write_csv_files(args, infilenames, outfilename):
"""Write csv file(s) to disk.
Keyword arguments:
args -- program arguments (dict)
infilenames -- names of user-inputted and/or downloaded files (list)
outfilename -- name of output text file (str)
"""
def csv_convert(line):
"""Strip punctuation and insert commas"""
clean_line = []
for word in line.split(' '):
clean_line.append(word.strip(string.punctuation))
return ', '.join(clean_line)
if not outfilename.endswith('.csv'):
outfilename = outfilename + '.csv'
outfilename = overwrite_file_check(args, outfilename)
all_text = [] # Text must be aggregated if writing to a single output file
for i, infilename in enumerate(infilenames):
parsed_text = get_parsed_text(args, infilename)
if parsed_text:
if args['multiple']:
if not args['quiet']:
print('Attempting to write to {0}.'.format(outfilename))
csv_text = [csv_convert(x) for x in parsed_text]
print(csv_text)
write_file(csv_text, outfilename)
elif args['single']:
all_text += parsed_text
# Newline added between multiple files being aggregated
if len(infilenames) > 1 and i < len(infilenames) - 1:
all_text.append('\n')
# Write all text to a single output file
if args['single'] and all_text:
if not args['quiet']:
print('Attempting to write {0} page(s) to {1}.'
.format(len(infilenames), outfilename))
csv_text = [csv_convert(x) for x in all_text]
print(csv_text)
write_file(csv_text, outfilename) | Write csv file(s) to disk.
Keyword arguments:
args -- program arguments (dict)
infilenames -- names of user-inputted and/or downloaded files (list)
outfilename -- name of output text file (str) | entailment |
def write_text_files(args, infilenames, outfilename):
"""Write text file(s) to disk.
Keyword arguments:
args -- program arguments (dict)
infilenames -- names of user-inputted and/or downloaded files (list)
outfilename -- name of output text file (str)
"""
if not outfilename.endswith('.txt'):
outfilename = outfilename + '.txt'
outfilename = overwrite_file_check(args, outfilename)
all_text = [] # Text must be aggregated if writing to a single output file
for i, infilename in enumerate(infilenames):
parsed_text = get_parsed_text(args, infilename)
if parsed_text:
if args['multiple']:
if not args['quiet']:
print('Attempting to write to {0}.'.format(outfilename))
write_file(parsed_text, outfilename)
elif args['single']:
all_text += parsed_text
# Newline added between multiple files being aggregated
if len(infilenames) > 1 and i < len(infilenames) - 1:
all_text.append('\n')
# Write all text to a single output file
if args['single'] and all_text:
if not args['quiet']:
print('Attempting to write {0} page(s) to {1}.'
.format(len(infilenames), outfilename))
write_file(all_text, outfilename) | Write text file(s) to disk.
Keyword arguments:
args -- program arguments (dict)
infilenames -- names of user-inputted and/or downloaded files (list)
outfilename -- name of output text file (str) | entailment |
def write_file(data, outfilename):
"""Write a single file to disk."""
if not data:
return False
try:
with open(outfilename, 'w') as outfile:
for line in data:
if line:
outfile.write(line)
return True
except (OSError, IOError) as err:
sys.stderr.write('An error occurred while writing {0}:\n{1}'
.format(outfilename, str(err)))
return False | Write a single file to disk. | entailment |
def get_num_part_files():
"""Get the number of PART.html files currently saved to disk."""
num_parts = 0
for filename in os.listdir(os.getcwd()):
if filename.startswith('PART') and filename.endswith('.html'):
num_parts += 1
return num_parts | Get the number of PART.html files currently saved to disk. | entailment |
def write_part_images(url, raw_html, html, filename):
"""Write image file(s) associated with HTML to disk, substituting filenames.
Keywords arguments:
url -- the URL from which the HTML has been extracted from (str)
raw_html -- unparsed HTML file content (list)
html -- parsed HTML file content (lxml.html.HtmlElement) (default: None)
filename -- the PART.html filename (str)
Return raw HTML with image names replaced with local image filenames.
"""
save_dirname = '{0}_files'.format(os.path.splitext(filename)[0])
if not os.path.exists(save_dirname):
os.makedirs(save_dirname)
images = html.xpath('//img/@src')
internal_image_urls = [x for x in images if x.startswith('/')]
headers = {'User-Agent': random.choice(USER_AGENTS)}
for img_url in images:
img_name = img_url.split('/')[-1]
if "?" in img_name:
img_name = img_name.split('?')[0]
if not os.path.splitext(img_name)[1]:
img_name = '{0}.jpeg'.format(img_name)
try:
full_img_name = os.path.join(save_dirname, img_name)
with open(full_img_name, 'wb') as img:
if img_url in internal_image_urls:
# Internal images need base url added
full_img_url = '{0}{1}'.format(url.rstrip('/'), img_url)
else:
# External image
full_img_url = img_url
img_content = requests.get(full_img_url, headers=headers,
proxies=get_proxies()).content
img.write(img_content)
raw_html = raw_html.replace(escape(img_url), full_img_name)
except (OSError, IOError):
pass
time.sleep(random.uniform(0, 0.5)) # Slight delay between downloads
return raw_html | Write image file(s) associated with HTML to disk, substituting filenames.
Keywords arguments:
url -- the URL from which the HTML has been extracted from (str)
raw_html -- unparsed HTML file content (list)
html -- parsed HTML file content (lxml.html.HtmlElement) (default: None)
filename -- the PART.html filename (str)
Return raw HTML with image names replaced with local image filenames. | entailment |
def write_part_file(args, url, raw_html, html=None, part_num=None):
"""Write PART.html file(s) to disk, images in PART_files directory.
Keyword arguments:
args -- program arguments (dict)
raw_html -- unparsed HTML file content (list)
html -- parsed HTML file content (lxml.html.HtmlElement) (default: None)
part_num -- PART(#).html file number (int) (default: None)
"""
if part_num is None:
part_num = get_num_part_files() + 1
filename = 'PART{0}.html'.format(part_num)
# Decode bytes to string in Python 3 versions
if not PY2 and isinstance(raw_html, bytes):
raw_html = raw_html.encode('ascii', 'ignore')
# Convert html to an lh.HtmlElement object for parsing/saving images
if html is None:
html = lh.fromstring(raw_html)
# Parse HTML if XPath entered
if args['xpath']:
raw_html = parse_html(html, args['xpath'])
if isinstance(raw_html, list):
if not isinstance(raw_html[0], lh.HtmlElement):
raise ValueError('XPath should return an HtmlElement object.')
else:
if not isinstance(raw_html, lh.HtmlElement):
raise ValueError('XPath should return an HtmlElement object.')
# Write HTML and possibly images to disk
if raw_html:
if not args['no_images'] and (args['pdf'] or args['html']):
raw_html = write_part_images(url, raw_html, html, filename)
with open(filename, 'w') as part:
if not isinstance(raw_html, list):
raw_html = [raw_html]
if isinstance(raw_html[0], lh.HtmlElement):
for elem in raw_html:
part.write(lh.tostring(elem))
else:
for line in raw_html:
part.write(line) | Write PART.html file(s) to disk, images in PART_files directory.
Keyword arguments:
args -- program arguments (dict)
raw_html -- unparsed HTML file content (list)
html -- parsed HTML file content (lxml.html.HtmlElement) (default: None)
part_num -- PART(#).html file number (int) (default: None) | entailment |
def get_part_filenames(num_parts=None, start_num=0):
"""Get numbered PART.html filenames."""
if num_parts is None:
num_parts = get_num_part_files()
return ['PART{0}.html'.format(i) for i in range(start_num+1, num_parts+1)] | Get numbered PART.html filenames. | entailment |
def read_files(filenames):
"""Read a file into memory."""
if isinstance(filenames, list):
for filename in filenames:
with open(filename, 'r') as infile:
return infile.read()
else:
with open(filenames, 'r') as infile:
return infile.read() | Read a file into memory. | entailment |
def remove_part_images(filename):
"""Remove PART(#)_files directory containing images from disk."""
dirname = '{0}_files'.format(os.path.splitext(filename)[0])
if os.path.exists(dirname):
shutil.rmtree(dirname) | Remove PART(#)_files directory containing images from disk. | entailment |
def remove_part_files(num_parts=None):
"""Remove PART(#).html files and image directories from disk."""
filenames = get_part_filenames(num_parts)
for filename in filenames:
remove_part_images(filename)
remove_file(filename) | Remove PART(#).html files and image directories from disk. | entailment |
def confirm_input(user_input):
"""Check user input for yes, no, or an exit signal."""
if isinstance(user_input, list):
user_input = ''.join(user_input)
try:
u_inp = user_input.lower().strip()
except AttributeError:
u_inp = user_input
# Check for exit signal
if u_inp in ('q', 'quit', 'exit'):
sys.exit()
if u_inp in ('y', 'yes'):
return True
return False | Check user input for yes, no, or an exit signal. | entailment |
def mkdir_and_cd(dirname):
"""Change directory and/or create it if necessary."""
if not os.path.exists(dirname):
os.makedirs(dirname)
os.chdir(dirname)
else:
os.chdir(dirname) | Change directory and/or create it if necessary. | entailment |
def convert(data, in_format, out_format, name=None, pretty=False):
"""Converts between two inputted chemical formats.
Args:
data: A string representing the chemical file to be converted. If the
`in_format` is "json", this can also be a Python object
in_format: The format of the `data` string. Can be "json" or any format
recognized by Open Babel
out_format: The format to convert to. Can be "json" or any format
recognized by Open Babel
name: (Optional) If `out_format` is "json", will save the specified
value in a "name" property
pretty: (Optional) If True and `out_format` is "json", will pretty-
print the output for human readability
Returns:
A string representing the inputted `data` in the specified `out_format`
"""
# Decide on a json formatter depending on desired prettiness
dumps = json.dumps if pretty else json.compress
# Shortcut for avoiding pybel dependency
if not has_ob and in_format == 'json' and out_format == 'json':
return dumps(json.loads(data) if is_string(data) else data)
elif not has_ob:
raise ImportError("Chemical file format conversion requires pybel.")
# These use the open babel library to interconvert, with additions for json
if in_format == 'json':
mol = json_to_pybel(json.loads(data) if is_string(data) else data)
elif in_format == 'pybel':
mol = data
else:
mol = pybel.readstring(in_format, data)
# Infer structure in cases where the input format has no specification
if not mol.OBMol.HasNonZeroCoords():
mol.make3D()
# Make P1 if that's a thing, recalculating bonds in process
if in_format == 'mmcif' and hasattr(mol, 'unitcell'):
mol.unitcell.FillUnitCell(mol.OBMol)
mol.OBMol.ConnectTheDots()
mol.OBMol.PerceiveBondOrders()
mol.OBMol.Center()
if out_format == 'pybel':
return mol
elif out_format == 'object':
return pybel_to_json(mol, name)
elif out_format == 'json':
return dumps(pybel_to_json(mol, name))
else:
return mol.write(out_format) | Converts between two inputted chemical formats.
Args:
data: A string representing the chemical file to be converted. If the
`in_format` is "json", this can also be a Python object
in_format: The format of the `data` string. Can be "json" or any format
recognized by Open Babel
out_format: The format to convert to. Can be "json" or any format
recognized by Open Babel
name: (Optional) If `out_format` is "json", will save the specified
value in a "name" property
pretty: (Optional) If True and `out_format` is "json", will pretty-
print the output for human readability
Returns:
A string representing the inputted `data` in the specified `out_format` | entailment |
def json_to_pybel(data, infer_bonds=False):
"""Converts python data structure to pybel.Molecule.
This will infer bond data if not specified.
Args:
data: The loaded json data of a molecule, as a Python object
infer_bonds (Optional): If no bonds specified in input, infer them
Returns:
An instance of `pybel.Molecule`
"""
obmol = ob.OBMol()
obmol.BeginModify()
for atom in data['atoms']:
obatom = obmol.NewAtom()
obatom.SetAtomicNum(table.GetAtomicNum(str(atom['element'])))
obatom.SetVector(*atom['location'])
if 'label' in atom:
pd = ob.OBPairData()
pd.SetAttribute('_atom_site_label')
pd.SetValue(atom['label'])
obatom.CloneData(pd)
# If there is no bond data, try to infer them
if 'bonds' not in data or not data['bonds']:
if infer_bonds:
obmol.ConnectTheDots()
obmol.PerceiveBondOrders()
# Otherwise, use the bonds in the data set
else:
for bond in data['bonds']:
if 'atoms' not in bond:
continue
obmol.AddBond(bond['atoms'][0] + 1, bond['atoms'][1] + 1,
bond['order'])
# Check for unit cell data
if 'unitcell' in data:
uc = ob.OBUnitCell()
uc.SetData(*(ob.vector3(*v) for v in data['unitcell']))
uc.SetSpaceGroup('P1')
obmol.CloneData(uc)
obmol.EndModify()
mol = pybel.Molecule(obmol)
# Add partial charges
if 'charge' in data['atoms'][0]:
mol.OBMol.SetPartialChargesPerceived()
for atom, pyatom in zip(data['atoms'], mol.atoms):
pyatom.OBAtom.SetPartialCharge(atom['charge'])
return mol | Converts python data structure to pybel.Molecule.
This will infer bond data if not specified.
Args:
data: The loaded json data of a molecule, as a Python object
infer_bonds (Optional): If no bonds specified in input, infer them
Returns:
An instance of `pybel.Molecule` | entailment |
def pybel_to_json(molecule, name=None):
"""Converts a pybel molecule to json.
Args:
molecule: An instance of `pybel.Molecule`
name: (Optional) If specified, will save a "name" property
Returns:
A Python dictionary containing atom and bond data
"""
# Save atom element type and 3D location.
atoms = [{'element': table.GetSymbol(atom.atomicnum),
'location': list(atom.coords)}
for atom in molecule.atoms]
# Recover auxiliary data, if exists
for json_atom, pybel_atom in zip(atoms, molecule.atoms):
if pybel_atom.partialcharge != 0:
json_atom['charge'] = pybel_atom.partialcharge
if pybel_atom.OBAtom.HasData('_atom_site_label'):
obatom = pybel_atom.OBAtom
json_atom['label'] = obatom.GetData('_atom_site_label').GetValue()
if pybel_atom.OBAtom.HasData('color'):
obatom = pybel_atom.OBAtom
json_atom['color'] = obatom.GetData('color').GetValue()
# Save number of bonds and indices of endpoint atoms
bonds = [{'atoms': [b.GetBeginAtom().GetIndex(),
b.GetEndAtom().GetIndex()],
'order': b.GetBondOrder()}
for b in ob.OBMolBondIter(molecule.OBMol)]
output = {'atoms': atoms, 'bonds': bonds, 'units': {}}
# If there's unit cell data, save it to the json output
if hasattr(molecule, 'unitcell'):
uc = molecule.unitcell
output['unitcell'] = [[v.GetX(), v.GetY(), v.GetZ()]
for v in uc.GetCellVectors()]
density = (sum(atom.atomicmass for atom in molecule.atoms) /
(uc.GetCellVolume() * 0.6022))
output['density'] = density
output['units']['density'] = 'kg / L'
# Save the formula to json. Use Hill notation, just to have a standard.
element_count = Counter(table.GetSymbol(a.atomicnum) for a in molecule)
hill_count = []
for element in ['C', 'H']:
if element in element_count:
hill_count += [(element, element_count[element])]
del element_count[element]
hill_count += sorted(element_count.items())
# If it's a crystal, then reduce the Hill formula
div = (reduce(gcd, (c[1] for c in hill_count))
if hasattr(molecule, 'unitcell') else 1)
output['formula'] = ''.join(n if c / div == 1 else '%s%d' % (n, c / div)
for n, c in hill_count)
output['molecular_weight'] = molecule.molwt / div
output['units']['molecular_weight'] = 'g / mol'
# If the input has been given a name, add that
if name:
output['name'] = name
return output | Converts a pybel molecule to json.
Args:
molecule: An instance of `pybel.Molecule`
name: (Optional) If specified, will save a "name" property
Returns:
A Python dictionary containing atom and bond data | entailment |
def default(self, obj):
"""Fired when an unserializable object is hit."""
if hasattr(obj, '__dict__'):
return obj.__dict__.copy()
elif HAS_NUMPY and isinstance(obj, np.ndarray):
return obj.copy().tolist()
else:
raise TypeError(("Object of type {:s} with value of {:s} is not "
"JSON serializable").format(type(obj), repr(obj))) | Fired when an unserializable object is hit. | entailment |
def draw(data, format='auto', size=(400, 300), drawing_type='ball and stick',
camera_type='perspective', shader='lambert', display_html=True,
element_properties=None, show_save=False):
"""Draws an interactive 3D visualization of the inputted chemical.
Args:
data: A string or file representing a chemical.
format: The format of the `data` variable (default is 'auto').
size: Starting dimensions of visualization, in pixels.
drawing_type: Specifies the molecular representation. Can be 'ball and
stick', 'wireframe', or 'space filling'.
camera_type: Can be 'perspective' or 'orthographic'.
shader: Specifies shading algorithm to use. Can be 'toon', 'basic',
'phong', or 'lambert'.
display_html: If True (default), embed the html in a IPython display.
If False, return the html as a string.
element_properites: A dictionary providing color and radius information
for custom elements or overriding the defaults in imolecule.js
show_save: If True, displays a save icon for rendering molecule as an
image.
The `format` can be any value specified by Open Babel
(http://openbabel.org/docs/2.3.1/FileFormats/Overview.html). The 'auto'
option uses the extension for files (ie. my_file.mol -> mol) and defaults
to SMILES (smi) for strings.
"""
# Catch errors on string-based input before getting js involved
draw_options = ['ball and stick', 'wireframe', 'space filling']
camera_options = ['perspective', 'orthographic']
shader_options = ['toon', 'basic', 'phong', 'lambert']
if drawing_type not in draw_options:
raise Exception("Invalid drawing type! Please use one of: " +
", ".join(draw_options))
if camera_type not in camera_options:
raise Exception("Invalid camera type! Please use one of: " +
", ".join(camera_options))
if shader not in shader_options:
raise Exception("Invalid shader! Please use one of: " +
", ".join(shader_options))
json_mol = generate(data, format)
if element_properties is None:
element_properties = dict()
json_element_properties = to_json(element_properties)
div_id = uuid.uuid4()
html = """<div id="molecule_%s"></div>
<script type="text/javascript">
require.config({baseUrl: '/',
paths: {imolecule: ['%s', '%s']}});
require(['imolecule'], function () {
var $d = $('#molecule_%s');
$d.width(%d); $d.height(%d);
$d.imolecule = jQuery.extend({}, imolecule);
$d.imolecule.create($d, {drawingType: '%s',
cameraType: '%s',
shader: '%s',
showSave: %s});
$d.imolecule.addElements(%s);
$d.imolecule.draw(%s);
$d.resizable({
aspectRatio: %d / %d,
resize: function (evt, ui) {
$d.imolecule.renderer.setSize(ui.size.width,
ui.size.height);
}
});
});
</script>""" % (div_id, local_path[:-3], remote_path[:-3],
div_id, size[0], size[1], drawing_type,
camera_type, shader,
'true' if show_save else 'false',
json_element_properties,
json_mol, size[0], size[1])
# Execute js and display the results in a div (see script for more)
if display_html:
try:
__IPYTHON__
except NameError:
# We're running outside ipython, let's generate a static HTML and
# show it in the browser
import shutil
import webbrowser
from tempfile import mkdtemp
from time import time
try: # Python 3
from urllib.parse import urljoin
from urllib.request import pathname2url
except ImportError: # Python 2
from urlparse import urljoin
from urllib import pathname2url
from tornado import template
t = template.Loader(file_path).load('viewer.template')
html = t.generate(title="imolecule", json_mol=json_mol,
drawing_type=drawing_type, shader=shader,
camera_type=camera_type,
json_element_properties=json_element_properties)
tempdir = mkdtemp(prefix='imolecule_{:.0f}_'.format(time()))
html_filename = os.path.join(tempdir, 'index.html')
with open(html_filename, 'wb') as f:
f.write(html)
libs = (('server', 'css', 'chosen.css'),
('server', 'css', 'server.css'),
('js', 'jquery-1.11.1.min.js'),
('server', 'js', 'chosen.jquery.min.js'),
('js', 'build', 'imolecule.min.js'))
for lib in libs:
shutil.copy(os.path.join(file_path, *lib), tempdir)
html_file_url = urljoin('file:', pathname2url(html_filename))
print('Opening html file: {}'.format(html_file_url))
webbrowser.open(html_file_url)
else:
# We're running in ipython: display widget
display(HTML(html))
else:
return html | Draws an interactive 3D visualization of the inputted chemical.
Args:
data: A string or file representing a chemical.
format: The format of the `data` variable (default is 'auto').
size: Starting dimensions of visualization, in pixels.
drawing_type: Specifies the molecular representation. Can be 'ball and
stick', 'wireframe', or 'space filling'.
camera_type: Can be 'perspective' or 'orthographic'.
shader: Specifies shading algorithm to use. Can be 'toon', 'basic',
'phong', or 'lambert'.
display_html: If True (default), embed the html in a IPython display.
If False, return the html as a string.
element_properites: A dictionary providing color and radius information
for custom elements or overriding the defaults in imolecule.js
show_save: If True, displays a save icon for rendering molecule as an
image.
The `format` can be any value specified by Open Babel
(http://openbabel.org/docs/2.3.1/FileFormats/Overview.html). The 'auto'
option uses the extension for files (ie. my_file.mol -> mol) and defaults
to SMILES (smi) for strings. | entailment |
def generate(data, format="auto"):
"""Converts input chemical formats to json and optimizes structure.
Args:
data: A string or file representing a chemical
format: The format of the `data` variable (default is 'auto')
The `format` can be any value specified by Open Babel
(http://openbabel.org/docs/2.3.1/FileFormats/Overview.html). The 'auto'
option uses the extension for files (ie. my_file.mol -> mol) and defaults
to SMILES (smi) for strings.
"""
# Support both files and strings and attempt to infer file type
try:
with open(data) as in_file:
if format == 'auto':
format = data.split('.')[-1]
data = in_file.read()
except:
if format == 'auto':
format = 'smi'
return format_converter.convert(data, format, 'json') | Converts input chemical formats to json and optimizes structure.
Args:
data: A string or file representing a chemical
format: The format of the `data` variable (default is 'auto')
The `format` can be any value specified by Open Babel
(http://openbabel.org/docs/2.3.1/FileFormats/Overview.html). The 'auto'
option uses the extension for files (ie. my_file.mol -> mol) and defaults
to SMILES (smi) for strings. | entailment |
def to_json(data, compress=False):
"""Converts the output of `generate(...)` to formatted json.
Floats are rounded to three decimals and positional vectors are printed on
one line with some whitespace buffer.
"""
return json.compress(data) if compress else json.dumps(data) | Converts the output of `generate(...)` to formatted json.
Floats are rounded to three decimals and positional vectors are printed on
one line with some whitespace buffer. | entailment |
def start_server():
"""Starts up the imolecule server, complete with argparse handling."""
parser = argparse.ArgumentParser(description="Opens a browser-based "
"client that interfaces with the "
"chemical format converter.")
parser.add_argument('--debug', action="store_true", help="Prints all "
"transmitted data streams.")
parser.add_argument('--port', type=int, default=8000, help="The port "
"on which to serve the website.")
parser.add_argument('--timeout', type=int, default=5, help="The maximum "
"time, in seconds, allowed for a process to run "
"before returning an error.")
parser.add_argument('--workers', type=int, default=2, help="The number of "
"worker processes to use with the server.")
parser.add_argument('--no-browser', action="store_true", help="Disables "
"opening a browser window on startup.")
global args
args = parser.parse_args()
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
handlers = [(r'/', IndexHandler), (r'/websocket', WebSocket),
(r'/static/(.*)', tornado.web.StaticFileHandler,
{'path': os.path.normpath(os.path.dirname(__file__))})]
application = tornado.web.Application(handlers)
application.listen(args.port)
if not args.no_browser:
webbrowser.open('http://localhost:%d/' % args.port, new=2)
try:
tornado.ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
sys.stderr.write("Received keyboard interrupt. Stopping server.\n")
tornado.ioloop.IOLoop.instance().stop()
sys.exit(1) | Starts up the imolecule server, complete with argparse handling. | entailment |
def on_message(self, message):
"""Evaluates the function pointed to by json-rpc."""
json_rpc = json.loads(message)
logging.log(logging.DEBUG, json_rpc)
if self.pool is None:
self.pool = multiprocessing.Pool(processes=args.workers)
# Spawn a process to protect the server against segfaults
async = self.pool.apply_async(_worker_process, [json_rpc])
try:
result = async.get(timeout=args.timeout)
error = 0
except multiprocessing.TimeoutError:
result = ("File format conversion timed out! This is due "
"either to a large input file or a segmentation "
"fault in the underlying open babel library.")
error = 1
self.pool.terminate()
self.pool = multiprocessing.Pool(processes=args.workers)
except Exception:
result = traceback.format_exc()
error = 1
logging.log(logging.DEBUG, result)
self.write_message(json.dumps({'result': result, 'error': error,
'id': json_rpc['id']},
separators=(',', ':'))) | Evaluates the function pointed to by json-rpc. | entailment |
def nearly_eq(valA, valB, maxf=None, minf=None, epsilon=None):
'''
implementation based on:
http://floating-point-gui.de/errors/comparison/
'''
if valA == valB:
return True
if maxf is None:
maxf = float_info.max
if minf is None:
minf = float_info.min
if epsilon is None:
epsilon = float_info.epsilon
absA = abs(valA)
absB = abs(valB)
delta = abs(valA - valB)
if (valA == 0) or (valB == 0) or (delta < minf):
return delta < (epsilon * minf)
return (delta / min(absA + absB, maxf)) < (epsilon * 2) | implementation based on:
http://floating-point-gui.de/errors/comparison/ | entailment |
def _convert(cls, other, ignoreScalars=False):
'''
:other: Point or point equivalent
:ignorescalars: optional boolean
:return: Point
Class private method for converting 'other' into a Point
subclasss. If 'other' already is a Point subclass, nothing
is done. If ignoreScalars is True and other is a float or int
type, a TypeError exception is raised.
'''
if ignoreScalars:
if isinstance(other, (int, float)):
msg = "unable to convert {} to {}".format(other, cls.__name__)
raise TypeError(msg)
return cls(other) if not issubclass(type(other), cls) else other | :other: Point or point equivalent
:ignorescalars: optional boolean
:return: Point
Class private method for converting 'other' into a Point
subclasss. If 'other' already is a Point subclass, nothing
is done. If ignoreScalars is True and other is a float or int
type, a TypeError exception is raised. | entailment |
def units(cls, scale=1):
'''
:scale: optional integer scaling factor
:return: list of three Point subclass
Returns three points whose coordinates are the head of a
unit vector from the origin ( conventionally i, j and k).
'''
return [cls(x=scale), cls(y=scale), cls(z=scale)] | :scale: optional integer scaling factor
:return: list of three Point subclass
Returns three points whose coordinates are the head of a
unit vector from the origin ( conventionally i, j and k). | entailment |
def gaussian(cls, mu=0, sigma=1):
'''
:mu: mean
:sigma: standard deviation
:return: Point subclass
Returns a point whose coordinates are picked from a Gaussian
distribution with mean 'mu' and standard deviation 'sigma'.
See random.gauss for further explanation of those parameters.
'''
return cls(random.gauss(mu, sigma),
random.gauss(mu, sigma),
random.gauss(mu, sigma)) | :mu: mean
:sigma: standard deviation
:return: Point subclass
Returns a point whose coordinates are picked from a Gaussian
distribution with mean 'mu' and standard deviation 'sigma'.
See random.gauss for further explanation of those parameters. | entailment |
def random(cls, origin=None, radius=1):
'''
:origin: optional Point or point equivalent
:radius: optional float, radius around origin
:return: Point subclass
Returns a point with random x, y and z coordinates bounded by
the sphere defined by (origin,radius).
If a sphere is not supplied, a unit sphere at the origin is
used by default.
'''
p = cls(origin)
r = random.uniform(0, radius)
u = random.uniform(0, Two_Pi)
v = random.uniform(-Half_Pi, Half_Pi)
r_cosv = r * math.cos(v)
p.x += r_cosv * math.cos(u)
p.y += r_cosv * math.sin(u)
p.z += radius * math.sin(v)
return p | :origin: optional Point or point equivalent
:radius: optional float, radius around origin
:return: Point subclass
Returns a point with random x, y and z coordinates bounded by
the sphere defined by (origin,radius).
If a sphere is not supplied, a unit sphere at the origin is
used by default. | entailment |
def _binary_(self, other, func, inplace=False):
'''
:other: Point or point equivalent
:func: binary function to apply
:inplace: optional boolean
:return: Point
Implementation private method.
All of the binary operations funnel thru this method to
reduce cut-and-paste code and enforce consistent behavior
of binary ops.
Applies 'func' to 'self' and 'other' and returns the result.
If 'inplace' is True the results of will be stored in 'self',
otherwise the results will be stored in a new object.
Returns a Point.
'''
dst = self if inplace else self.__class__(self)
try:
b = self.__class__._convert(other, ignoreScalars=True)
dst.x = func(dst.x, b.x)
dst.y = func(dst.y, b.y)
dst.z = func(dst.z, b.z)
return dst
except TypeError:
pass
dst.x = func(dst.x, other)
dst.y = func(dst.y, other)
dst.z = func(dst.z, other)
return dst | :other: Point or point equivalent
:func: binary function to apply
:inplace: optional boolean
:return: Point
Implementation private method.
All of the binary operations funnel thru this method to
reduce cut-and-paste code and enforce consistent behavior
of binary ops.
Applies 'func' to 'self' and 'other' and returns the result.
If 'inplace' is True the results of will be stored in 'self',
otherwise the results will be stored in a new object.
Returns a Point. | entailment |
def _unary_(self, func, inplace=False):
'''
:func: unary function to apply to each coordinate
:inplace: optional boolean
:return: Point
Implementation private method.
All of the unary operations funnel thru this method
to reduce cut-and-paste code and enforce consistent
behavior of unary ops.
Applies 'func' to self and returns the result.
The expected call signature of 'func' is f(a)
If 'inplace' is True, the results are stored in 'self',
otherwise the results will be stored in a new object.
Returns a Point.
'''
dst = self if inplace else self.__class__(self)
dst.x = func(dst.x)
dst.y = func(dst.y)
dst.z = func(dst.z)
return dst | :func: unary function to apply to each coordinate
:inplace: optional boolean
:return: Point
Implementation private method.
All of the unary operations funnel thru this method
to reduce cut-and-paste code and enforce consistent
behavior of unary ops.
Applies 'func' to self and returns the result.
The expected call signature of 'func' is f(a)
If 'inplace' is True, the results are stored in 'self',
otherwise the results will be stored in a new object.
Returns a Point. | entailment |
def cross(self, other):
'''
:other: Point or point equivalent
:return: float
Vector cross product of points U (self) and V (other), computed:
U x V = (u1*i + u2*j + u3*k) x (v1*i + v2*j + v3*k)
s1 = u2v3 - u3v2
s2 = u3v1 - u1v3
s3 = u1v2 - u2v1
U x V = s1 + s2 + s3
Returns a float.
'''
b = self.__class__._convert(other)
return sum([(self.y * b.z) - (self.z * b.y),
(self.z * b.x) - (self.x * b.z),
(self.x * b.y) - (self.y * b.x)]) | :other: Point or point equivalent
:return: float
Vector cross product of points U (self) and V (other), computed:
U x V = (u1*i + u2*j + u3*k) x (v1*i + v2*j + v3*k)
s1 = u2v3 - u3v2
s2 = u3v1 - u1v3
s3 = u1v2 - u2v1
U x V = s1 + s2 + s3
Returns a float. | entailment |
def isBetween(self, a, b, axes='xyz'):
'''
:a: Point or point equivalent
:b: Point or point equivalent
:axis: optional string
:return: float
Checks the coordinates specified in 'axes' of 'self' to
determine if they are bounded by 'a' and 'b'. The range
is inclusive of end-points.
Returns boolean.
'''
a = self.__class__._convert(a)
b = self.__class__._convert(b)
fn = lambda k: (self[k] >= min(a[k], b[k])) and (
self[k] <= max(a[k], b[k]))
return all(fn(axis) for axis in axes) | :a: Point or point equivalent
:b: Point or point equivalent
:axis: optional string
:return: float
Checks the coordinates specified in 'axes' of 'self' to
determine if they are bounded by 'a' and 'b'. The range
is inclusive of end-points.
Returns boolean. | entailment |
def ccw(self, b, c, axis='z'):
'''
:b: Point or point equivalent
:c: Point or point equivalent
:axis: optional string or integer in set('x',0,'y',1,'z',2)
:return: float
CCW - Counter Clockwise
Returns an integer signifying the direction of rotation around 'axis'
described by the angle [b, self, c].
> 0 : counter-clockwise
0 : points are collinear
< 0 : clockwise
Returns an integer.
Raises ValueError if axis is not in 'xyz'.
'''
bsuba = b - self
csuba = c - self
if axis in ['z', 2]:
return (bsuba.x * csuba.y) - (bsuba.y * csuba.x)
if axis in ['y', 1]:
return (bsuba.x * csuba.z) - (bsuba.z * csuba.x)
if axis in ['x', 0]:
return (bsuba.y * csuba.z) - (bsuba.z * csuba.y)
msg = "invalid axis '{!r}', must be one of {}".format(axis, self._keys)
raise ValueError(msg) | :b: Point or point equivalent
:c: Point or point equivalent
:axis: optional string or integer in set('x',0,'y',1,'z',2)
:return: float
CCW - Counter Clockwise
Returns an integer signifying the direction of rotation around 'axis'
described by the angle [b, self, c].
> 0 : counter-clockwise
0 : points are collinear
< 0 : clockwise
Returns an integer.
Raises ValueError if axis is not in 'xyz'. | entailment |
def isCCW(self, b, c, axis='z'):
'''
:b: Point or point equivalent
:c: Point or point equivalent
:axis: optional string or integer in set('x',0,'y',1,'z',2)
:return: boolean
True if the angle determined by a,self,b around 'axis'
describes a counter-clockwise rotation, otherwise False.
Raises CollinearPoints if self, b, c are collinear.
'''
result = self.ccw(b, c, axis)
if result == 0:
raise CollinearPoints(b, self, c)
return result > 0 | :b: Point or point equivalent
:c: Point or point equivalent
:axis: optional string or integer in set('x',0,'y',1,'z',2)
:return: boolean
True if the angle determined by a,self,b around 'axis'
describes a counter-clockwise rotation, otherwise False.
Raises CollinearPoints if self, b, c are collinear. | entailment |
def isCollinear(self, b, c):
'''
:b: Point or point equivalent
:c: Point or point equivalent
:return: boolean
True if 'self' is collinear with 'b' and 'c', otherwise False.
'''
return all(self.ccw(b, c, axis) == 0 for axis in self._keys) | :b: Point or point equivalent
:c: Point or point equivalent
:return: boolean
True if 'self' is collinear with 'b' and 'c', otherwise False. | entailment |
def rotate2d(self, theta, origin=None, axis='z', radians=False):
'''
:theta: float radians to rotate self around origin
:origin: optional Point, defaults to 0,0,0
Returns a Point rotated by :theta: around :origin:.
'''
origin = Point._convert(origin)
delta = self - origin
p = Point(origin)
if not radians:
theta = math.radians(theta)
cosT = math.cos(theta)
sinT = math.sin(theta)
if axis == 'z':
p.x += (cosT * delta.x) - (sinT * delta.y)
p.y += (sinT * delta.x) + (cosT * delta.y)
return p
if axis == 'y':
p.z += (cosT * delta.z) - (sinT * delta.x)
p.x += (sinT * delta.z) + (cosT * delta.x)
return p
if axis == 'x':
p.y += (cosT * delta.y) - (sinT * delta.z)
p.z += (sinT * delta.y) + (cosT * delta.z)
return p
raise KeyError('unknown axis {}, expecting x, y or z'.format(axis)) | :theta: float radians to rotate self around origin
:origin: optional Point, defaults to 0,0,0
Returns a Point rotated by :theta: around :origin:. | entailment |
def withAngles(cls, origin=None, base=1, alpha=None,
beta=None, gamma=None, inDegrees=False):
'''
:origin: optional Point
:alpha: optional float describing length of the side opposite A
:beta: optional float describing length of the side opposite B
:gamma: optional float describing length of the side opposite C
:return: Triangle initialized with points comprising the triangle
with the specified angles.
'''
raise NotImplementedError("withAngles") | :origin: optional Point
:alpha: optional float describing length of the side opposite A
:beta: optional float describing length of the side opposite B
:gamma: optional float describing length of the side opposite C
:return: Triangle initialized with points comprising the triangle
with the specified angles. | entailment |
def heronsArea(self):
'''
Heron's forumla for computing the area of a triangle, float.
Performance note: contains a square root.
'''
s = self.semiperimeter
return math.sqrt(s * ((s - self.a) * (s - self.b) * (s - self.c))) | Heron's forumla for computing the area of a triangle, float.
Performance note: contains a square root. | entailment |
def circumradius(self):
'''
Distance from the circumcenter to all the verticies in
the Triangle, float.
'''
return (self.a * self.b * self.c) / (self.area * 4) | Distance from the circumcenter to all the verticies in
the Triangle, float. | entailment |
def altitudes(self):
'''
A list of the altitudes of each vertex [AltA, AltB, AltC], list of
floats.
An altitude is the shortest distance from a vertex to the side
opposite of it.
'''
A = self.area * 2
return [A / self.a, A / self.b, A / self.c] | A list of the altitudes of each vertex [AltA, AltB, AltC], list of
floats.
An altitude is the shortest distance from a vertex to the side
opposite of it. | entailment |
def isIsosceles(self):
'''
True iff two side lengths are equal, boolean.
'''
return (self.a == self.b) or (self.a == self.c) or (self.b == self.c) | True iff two side lengths are equal, boolean. | entailment |
def congruent(self, other):
'''
A congruent B
True iff all angles of 'A' equal angles in 'B' and
all side lengths of 'A' equal all side lengths of 'B', boolean.
'''
a = set(self.angles)
b = set(other.angles)
if len(a) != len(b) or len(a.difference(b)) != 0:
return False
a = set(self.sides)
b = set(other.sides)
return len(a) == len(b) and len(a.difference(b)) == 0 | A congruent B
True iff all angles of 'A' equal angles in 'B' and
all side lengths of 'A' equal all side lengths of 'B', boolean. | entailment |
def center(self):
'''
Center point of the ellipse, equidistant from foci, Point class.\n
Defaults to the origin.
'''
try:
return self._center
except AttributeError:
pass
self._center = Point()
return self._center | Center point of the ellipse, equidistant from foci, Point class.\n
Defaults to the origin. | entailment |
def radius(self):
'''
Radius of the ellipse, Point class.
'''
try:
return self._radius
except AttributeError:
pass
self._radius = Point(1, 1, 0)
return self._radius | Radius of the ellipse, Point class. | entailment |
def xAxisIsMajor(self):
'''
Returns True if the major axis is parallel to the X axis, boolean.
'''
return max(self.radius.x, self.radius.y) == self.radius.x | Returns True if the major axis is parallel to the X axis, boolean. | entailment |
def xAxisIsMinor(self):
'''
Returns True if the minor axis is parallel to the X axis, boolean.
'''
return min(self.radius.x, self.radius.y) == self.radius.x | Returns True if the minor axis is parallel to the X axis, boolean. | entailment |
def yAxisIsMajor(self):
'''
Returns True if the major axis is parallel to the Y axis, boolean.
'''
return max(self.radius.x, self.radius.y) == self.radius.y | Returns True if the major axis is parallel to the Y axis, boolean. | entailment |
def yAxisIsMinor(self):
'''
Returns True if the minor axis is parallel to the Y axis, boolean.
'''
return min(self.radius.x, self.radius.y) == self.radius.y | Returns True if the minor axis is parallel to the Y axis, boolean. | entailment |
def a(self):
'''
Positive antipodal point on the major axis, Point class.
'''
a = Point(self.center)
if self.xAxisIsMajor:
a.x += self.majorRadius
else:
a.y += self.majorRadius
return a | Positive antipodal point on the major axis, Point class. | entailment |
def a_neg(self):
'''
Negative antipodal point on the major axis, Point class.
'''
na = Point(self.center)
if self.xAxisIsMajor:
na.x -= self.majorRadius
else:
na.y -= self.majorRadius
return na | Negative antipodal point on the major axis, Point class. | entailment |
def b(self):
'''
Positive antipodal point on the minor axis, Point class.
'''
b = Point(self.center)
if self.xAxisIsMinor:
b.x += self.minorRadius
else:
b.y += self.minorRadius
return b | Positive antipodal point on the minor axis, Point class. | entailment |
def b_neg(self):
'''
Negative antipodal point on the minor axis, Point class.
'''
nb = Point(self.center)
if self.xAxisIsMinor:
nb.x -= self.minorRadius
else:
nb.y -= self.minorRadius
return nb | Negative antipodal point on the minor axis, Point class. | entailment |
def vertices(self):
'''
A dictionary of four points where the axes intersect the ellipse, dict.
'''
return {'a': self.a, 'a_neg': self.a_neg,
'b': self.b, 'b_neg': self.b_neg} | A dictionary of four points where the axes intersect the ellipse, dict. | entailment |
def focus0(self):
'''
First focus of the ellipse, Point class.
'''
f = Point(self.center)
if self.xAxisIsMajor:
f.x -= self.linearEccentricity
else:
f.y -= self.linearEccentricity
return f | First focus of the ellipse, Point class. | entailment |
def circumcircleForTriangle(cls, triangle):
'''
:param: triangle - Triangle class
:return: Circle class
Returns the circle where every vertex in the input triangle is
on the radius of that circle.
'''
if triangle.isRight:
# circumcircle origin is the midpoint of the hypotenues
o = triangle.hypotenuse.midpoint
r = o.distance(triangle.A)
return cls(o, r)
# otherwise
# 1. find the normals to two sides
# 2. translate them to the midpoints of those two sides
# 3. intersect those lines for center of circumcircle
# 4. radius is distance from center to any vertex in the triangle
abn = triangle.AB.normal
abn += triangle.AB.midpoint
acn = triangle.AC.normal
acn += triangle.AC.midpoint
o = abn.intersection(acn)
r = o.distance(triangle.A)
return cls(o, r) | :param: triangle - Triangle class
:return: Circle class
Returns the circle where every vertex in the input triangle is
on the radius of that circle. | entailment |
def doesIntersect(self, other):
'''
:param: other - Circle class
Returns True iff:
self.center.distance(other.center) <= self.radius+other.radius
'''
otherType = type(other)
if issubclass(otherType, Ellipse):
distance = self.center.distance(other.center)
radiisum = self.radius + other.radius
return distance <= radiisum
if issubclass(otherType, Line):
raise NotImplementedError('doesIntersect,other is Line class')
raise TypeError("unknown type '{t}'".format(t=otherType)) | :param: other - Circle class
Returns True iff:
self.center.distance(other.center) <= self.radius+other.radius | entailment |
def AB(self):
'''
A list containing Points A and B.
'''
try:
return self._AB
except AttributeError:
pass
self._AB = [self.A, self.B]
return self._AB | A list containing Points A and B. | entailment |
def normal(self):
'''
:return: Line
Returns a Line normal (perpendicular) to this Line.
'''
d = self.B - self.A
return Line([-d.y, d.x], [d.y, -d.x]) | :return: Line
Returns a Line normal (perpendicular) to this Line. | entailment |
def t(self, point):
'''
:point: Point subclass
:return: float
If :point: is collinear, determine the 't' coefficient of
the parametric equation:
xyz = A<xyz> + t ( B<xyz> - A<xyz> )
if t < 0, point is less than A and B on the line
if t >= 0 and <= 1, point is between A and B
if t > 1 point is greater than B
'''
# XXX could use for an ordering on points?
if point not in self:
msg = "'{p}' is not collinear with '{l}'"
raise CollinearPoints(msg.format(p=point, l=self))
# p = A + t ( B - A)
# p - A = t ( B - A)
# p - A / (B -A) = t
return (point - self.A) / self.m | :point: Point subclass
:return: float
If :point: is collinear, determine the 't' coefficient of
the parametric equation:
xyz = A<xyz> + t ( B<xyz> - A<xyz> )
if t < 0, point is less than A and B on the line
if t >= 0 and <= 1, point is between A and B
if t > 1 point is greater than B | entailment |
def flip(self):
'''
:returns: None
Swaps the positions of A and B.
'''
tmp = self.A.xyz
self.A = self.B
self.B = tmp | :returns: None
Swaps the positions of A and B. | entailment |
def doesIntersect(self, other):
'''
:param: other - Line subclass
:return: boolean
Returns True iff:
ccw(self.A,self.B,other.A) * ccw(self.A,self.B,other.B) <= 0
and
ccw(other.A,other.B,self.A) * ccw(other.A,other.B,self.B) <= 0
'''
if self.A.ccw(self.B, other.A) * self.A.ccw(self.B, other.B) > 0:
return False
if other.A.ccw(other.B, self.A) * other.A.ccw(other.B, self.B) > 0:
return False
return True | :param: other - Line subclass
:return: boolean
Returns True iff:
ccw(self.A,self.B,other.A) * ccw(self.A,self.B,other.B) <= 0
and
ccw(other.A,other.B,self.A) * ccw(other.A,other.B,self.B) <= 0 | entailment |
def intersection(self, other):
'''
:param: other - Line subclass
:return: Point subclass
Returns a Point object with the coordinates of the intersection
between the current line and the other line.
Will raise Parallel() if the two lines are parallel.
Will raise Collinear() if the two lines are collinear.
'''
if self.isCollinear(other):
msg = '{!r} and {!r} are collinear'
raise CollinearLines(msg.format(self, other))
d0 = self.A - self.B
d1 = other.A - other.B
denominator = (d0.x * d1.y) - (d0.y * d1.x)
if denominator == 0:
msg = '{!r} and {!r} are parallel'
raise ParallelLines(msg.format(self, other))
cp0 = self.A.cross(self.B)
cp1 = other.A.cross(other.B)
x_num = (cp0 * d1.x) - (d0.x * cp1)
y_num = (cp0 * d1.y) - (d0.y * cp1)
p = Point(x_num / denominator, y_num / denominator)
if p in self and p in other:
return p
msg = "found point {!r} but not in {!r} and {!r}"
raise ParallelLines(msg.format(p, self, other)) | :param: other - Line subclass
:return: Point subclass
Returns a Point object with the coordinates of the intersection
between the current line and the other line.
Will raise Parallel() if the two lines are parallel.
Will raise Collinear() if the two lines are collinear. | entailment |
def distanceFromPoint(self, point):
'''
:param: point - Point subclass
:return: float
Distance from the line to the given point.
'''
# XXX planar distance, doesn't take into account z ?
d = self.m
n = (d.y * point.x) - (d.x * point.y) + self.A.cross(self.B)
return abs(n / self.A.distance(self.B)) | :param: point - Point subclass
:return: float
Distance from the line to the given point. | entailment |
def radiansBetween(self, other):
'''
:param: other - Line subclass
:return: float
Returns the angle measured between two lines in radians
with a range of [0, 2 * math.pi].
'''
# a dot b = |a||b| * cos(theta)
# a dot b / |a||b| = cos(theta)
# cos-1(a dot b / |a||b|) = theta
# translate each line so that it passes through the origin and
# produce a new point whose distance (magnitude) from the
# origin is 1.
#
a = Point.unit(self.A, self.B)
b = Point.unit(other.A, other.B)
# in a perfect world, after unit: |A| = |B| = 1
# which is a noop when dividing the dot product of A,B
# but sometimes the lengths are different.
#
# let's just assume things are perfect and the lengths equal 1.
return math.acos(a.dot(b)) | :param: other - Line subclass
:return: float
Returns the angle measured between two lines in radians
with a range of [0, 2 * math.pi]. | entailment |
def FloatProperty(name, default=0.0, readonly=False, docs=None):
'''
:name: string - property name
:default: float - property default value
:readonly: boolean - if True, setter method is NOT generated
Returns a property object that can be used to initialize a
class instance variable as a property.
'''
private_name = '_' + name
def getf(self):
if not hasattr(self, private_name):
setattr(self, private_name, default)
return getattr(self, private_name)
if readonly:
setf = None
else:
def setf(self, newValue):
def epsilon_set(v):
# epsilon_set: creates a float from v unless that
# float is less than epsilon, which will
# be considered effectively zero.
fv = float(v)
return 0.0 if nearly_zero(fv) else fv
try:
setattr(self, private_name, epsilon_set(newValue))
return
except TypeError:
pass
if isinstance(newValue, collections.Mapping):
try:
setattr(self, private_name, epsilon_set(newValue[name]))
except KeyError:
pass
return
if isinstance(newValue, collections.Iterable):
try:
setattr(self, private_name, epsilon_set(newValue[0]))
return
except (IndexError, TypeError):
pass
try:
mapping = vars(newValue)
setattr(self, private_name, epsilon_set(mapping[name]))
return
except (TypeError, KeyError):
pass
if newValue is None:
setattr(self, private_name, epsilon_set(default))
return
raise ValueError(newValue)
return property(getf, setf, None, docs) | :name: string - property name
:default: float - property default value
:readonly: boolean - if True, setter method is NOT generated
Returns a property object that can be used to initialize a
class instance variable as a property. | entailment |
def randomSizeAndLocation(cls, radius, widthLimits,
heightLimits, origin=None):
'''
:param: radius - float
:param: widthLimits - iterable of floats with length >= 2
:param: heightLimits - iterable of floats with length >= 2
:param: origin - optional Point subclass
:return: Rectangle
'''
r = cls(widthLimits, heightLimits, origin)
r.origin = Point.randomLocation(radius, origin) | :param: radius - float
:param: widthLimits - iterable of floats with length >= 2
:param: heightLimits - iterable of floats with length >= 2
:param: origin - optional Point subclass
:return: Rectangle | entailment |
def randomSize(cls, widthLimits, heightLimits, origin=None):
'''
:param: widthLimits - iterable of integers with length >= 2
:param: heightLimits - iterable of integers with length >= 2
:param: origin - optional Point subclass
:return: Rectangle
'''
r = cls(0, 0, origin)
r.w = random.randint(widthLimits[0], widthLimits[1])
r.h = random.randint(heightLimits[0], heightLimits[1])
return r | :param: widthLimits - iterable of integers with length >= 2
:param: heightLimits - iterable of integers with length >= 2
:param: origin - optional Point subclass
:return: Rectangle | entailment |
def randomLocation(cls, radius, width, height, origin=None):
'''
:param: radius - float
:param: width - float
:param: height - float
:param: origin - optional Point subclass
:return: Rectangle
'''
return cls(width,
height,
Point.randomLocation(radius, origin)) | :param: radius - float
:param: width - float
:param: height - float
:param: origin - optional Point subclass
:return: Rectangle | entailment |
def origin(self):
'''
Point describing the origin of the rectangle. Defaults to (0,0,0).
'''
try:
return self._origin
except AttributeError:
pass
self._origin = Point()
return self._origin | Point describing the origin of the rectangle. Defaults to (0,0,0). | entailment |
def B(self):
'''
Point whose coordinates are (maxX,minY,origin.z), Point.
'''
return Point(self.maxX, self.minY, self.origin.z) | Point whose coordinates are (maxX,minY,origin.z), Point. | entailment |
def C(self):
'''
Point whose coordinates are (maxX,maxY,origin.z), Point.
'''
return Point(self.maxX, self.maxY, self.origin.z) | Point whose coordinates are (maxX,maxY,origin.z), Point. | entailment |
def D(self):
'''
Point whose coordinates are (minX,maxY,origin.Z), Point.
'''
return Point(self.minX, self.maxY, self.origin.z) | Point whose coordinates are (minX,maxY,origin.Z), Point. | entailment |
def center(self):
'''
Point whose coordinates are (midX,midY,origin.z), Point.
'''
return Point(self.midX, self.midY, self.origin.z) | Point whose coordinates are (midX,midY,origin.z), Point. | entailment |
def scale(self, dx=1.0, dy=1.0):
'''
:param: dx - optional float
:param: dy - optional float
Scales the rectangle's width and height by dx and dy.
'''
self.width *= dx
self.height *= dy | :param: dx - optional float
:param: dy - optional float
Scales the rectangle's width and height by dx and dy. | entailment |
def containsPoint(self, point, Zorder=False):
'''
:param: point - Point subclass
:param: Zorder - optional Boolean
Is true if the point is contain in the rectangle or
along the rectangle's edges.
If Zorder is True, the method will check point.z for
equality with the rectangle origin's Z coordinate.
'''
if not point.isBetweenX(self.A, self.B):
return False
if not point.isBetweenY(self.A, self.D):
return False
if Zorder:
return point.z == self.origin.z
return True | :param: point - Point subclass
:param: Zorder - optional Boolean
Is true if the point is contain in the rectangle or
along the rectangle's edges.
If Zorder is True, the method will check point.z for
equality with the rectangle origin's Z coordinate. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.