function
stringlengths 11
56k
| repo_name
stringlengths 5
60
| features
sequence |
---|---|---|
def pdb_string_to_pdb_dict(filestring):
"""Takes a .pdb filestring and turns into a ``dict`` which represents its
record structure. Only lines which aren't empty are used.
The resultant dictionary has line types as the keys, which point to the
lines as its value. So ``{"TITLE": ["TITLE line 1", "TITLE line 2"]}`` etc.
The exceptions are the REMARK records, where there is a sub-dictionary with
REMARK numbers as keys, and the structure records themselves which are just
arranged into lists - one for each model.
:param str filestring: the .pdb filestring to process.
:rtype: ``dict``"""
pdb_dict = {}
lines = list(filter(lambda l: bool(l.strip()), filestring.split("\n")))
lines = [[line[:6].rstrip(), line.rstrip()] for line in lines]
model_recs = ("ATOM", "HETATM", "ANISOU", "MODEL", "TER", "ENDMDL")
model = []
in_model = False
for head, line in lines:
if head == "REMARK":
if "REMARK" not in pdb_dict: pdb_dict["REMARK"] = {}
number = line.lstrip().split()[1]
update_dict(pdb_dict["REMARK"], number, line)
elif head in model_recs:
if "MODEL" not in pdb_dict: pdb_dict["MODEL"] = [[]]
if head == "ENDMDL":
pdb_dict["MODEL"].append([])
elif head != "MODEL":
pdb_dict["MODEL"][-1].append(line)
else:
update_dict(pdb_dict, head, line)
if "MODEL" in pdb_dict and not pdb_dict["MODEL"][-1]: pdb_dict["MODEL"].pop()
return pdb_dict | samirelanduk/molecupy | [
97,
19,
97,
13,
1462320672
] |
def pdb_dict_to_data_dict(pdb_dict):
"""Converts an .pdb dictionary into an atomium data dictionary, with the
same standard layout that the other file formats get converted into.
:param dict pdb_dict: the .pdb dictionary.
:rtype: ``dict``"""
data_dict = {
"description": {
"code": None, "title": None, "deposition_date": None,
"classification": None, "keywords": [], "authors": []
}, "experiment": {
"technique": None, "source_organism": None, "expression_system": None,
"missing_residues": []
}, "quality": {"resolution": None, "rvalue": None, "rfree": None},
"geometry": {"assemblies": [], "crystallography": {}}, "models": []
}
update_description_dict(pdb_dict, data_dict)
update_experiment_dict(pdb_dict, data_dict)
update_quality_dict(pdb_dict, data_dict)
update_geometry_dict(pdb_dict, data_dict)
update_models_list(pdb_dict, data_dict)
return data_dict | samirelanduk/molecupy | [
97,
19,
97,
13,
1462320672
] |
def update_experiment_dict(pdb_dict, data_dict):
"""Creates the experiment component of a standard atomium data dictionary
from a .pdb dictionary.
:param dict pdb_dict: The .pdb dictionary to read.
:param dict data_dict: The data dictionary to update."""
extract_technique(pdb_dict, data_dict["experiment"])
extract_source(pdb_dict, data_dict["experiment"])
extract_missing_residues(pdb_dict, data_dict["experiment"]) | samirelanduk/molecupy | [
97,
19,
97,
13,
1462320672
] |
def update_geometry_dict(pdb_dict, data_dict):
"""Creates the geometry component of a standard atomium data dictionary
from a .pdb dictionary.
:param dict pdb_dict: The .pdb dictionary to read.
:param dict data_dict: The data dictionary to update."""
extract_assembly_remark(pdb_dict, data_dict["geometry"])
extract_crystallography(pdb_dict, data_dict["geometry"]) | samirelanduk/molecupy | [
97,
19,
97,
13,
1462320672
] |
def extract_header(pdb_dict, description_dict):
"""Takes a ``dict`` and adds header information to it by parsing the HEADER
line.
:param dict pdb_dict: the ``dict`` to read.
:param dict description_dict: the ``dict`` to update."""
if pdb_dict.get("HEADER"):
line = pdb_dict["HEADER"][0]
if line[50:59].strip():
description_dict["deposition_date"] = datetime.strptime(
line[50:59], "%d-%b-%y"
).date()
if line[62:66].strip(): description_dict["code"] = line[62:66]
if line[10:50].strip():
description_dict["classification"] = line[10:50].strip() | samirelanduk/molecupy | [
97,
19,
97,
13,
1462320672
] |
def extract_keywords(pdb_dict, description_dict):
"""Takes a ``dict`` and adds header information to it by parsing the KEYWDS
line.
:param dict pdb_dict: the ``dict`` to read.
:param dict description_dict: the ``dict`` to update."""
if pdb_dict.get("KEYWDS"):
text = merge_lines(pdb_dict["KEYWDS"], 10)
description_dict["keywords"] = [w.strip() for w in text.split(",")] | samirelanduk/molecupy | [
97,
19,
97,
13,
1462320672
] |
def extract_technique(pdb_dict, experiment_dict):
"""Takes a ``dict`` and adds technique information to it by parsing EXPDTA
lines.
:param dict pdb_dict: the ``dict`` to read.
:param dict experiment_dict: the ``dict`` to update."""
if pdb_dict.get("EXPDTA"):
if pdb_dict["EXPDTA"][0].strip():
experiment_dict["technique"] = pdb_dict["EXPDTA"][0][6:].strip() | samirelanduk/molecupy | [
97,
19,
97,
13,
1462320672
] |
def extract_missing_residues(pdb_dict, experiment_dict):
"""Takes a ``dict`` and adds missing residue information to it by parsing
REMARK 465 lines.
:param dict pdb_dict: the ``dict`` to read.
:param dict experiment_dict: the ``dict`` to update."""
for line in pdb_dict.get("REMARK", {}).get("465", []):
chunks = line.strip().split()
if len(chunks) == 5:
experiment_dict["missing_residues"].append({
"name": chunks[2], "id": f"{chunks[3]}.{chunks[4]}"
}) | samirelanduk/molecupy | [
97,
19,
97,
13,
1462320672
] |
def extract_rvalue_remark(pdb_dict, quality_dict):
"""Takes a ``dict`` and adds resolution information to it by parsing REMARK
3 lines.
:param dict pdb_dict: the ``dict`` to read.
:param dict quality_dict: the ``dict`` to update."""
if pdb_dict.get("REMARK") and pdb_dict["REMARK"].get("3"):
patterns = {
"rvalue": r"R VALUE.+WORKING.+?: (.+)",
"rfree": r"FREE R VALUE[ ]{2,}: (.+)",
}
for attribute, pattern in patterns.items():
for remark in pdb_dict["REMARK"]["3"]:
matches = re.findall(pattern, remark.strip())
if matches:
try:
quality_dict[attribute] = float(matches[0].strip())
except: pass
break | samirelanduk/molecupy | [
97,
19,
97,
13,
1462320672
] |
def assembly_lines_to_assembly_dict(lines):
"""Takes the lines representing a single biological assembly and turns
them into an assembly dictionary.
:param list lines: The REMARK lines to read.
:rtype: ``dict``"""
assembly = {
"transformations": [], "software": None, "buried_surface_area": None,
"surface_area": None, "delta_energy": None, "id": 0
}
patterns = [[r"(.+)SOFTWARE USED: (.+)", "software", lambda x: x],
[r"(.+)BIOMOLECULE: (.+)", "id", int],
[r"(.+)SURFACE AREA: (.+) [A-Z]", "buried_surface_area", float],
[r"(.+)AREA OF THE COMPLEX: (.+) [A-Z]", "surface_area", float],
[r"(.+)FREE ENERGY: (.+) [A-Z]", "delta_energy", float]]
t = None
for line in lines:
for p in patterns:
matches = re.findall(p[0], line)
if matches: assembly[p[1]] = p[2](matches[0][1].strip())
if "APPLY THE FOLLOWING" in line:
if t: assembly["transformations"].append(t)
t = {"chains": [], "matrix": [], "vector": []}
if "CHAINS:" in line:
t["chains"] += [c.strip() for c in
line.split(":")[-1].strip().split(",") if c.strip()]
if "BIOMT" in line:
values = [float(x) for x in line.split()[4:]]
if len(t["matrix"]) == 3:
assembly["transformations"].append(t)
t = {"chains": t["chains"], "matrix": [], "vector": []}
t["matrix"].append(values[:3])
t["vector"].append(values[-1])
if t: assembly["transformations"].append(t)
return assembly | samirelanduk/molecupy | [
97,
19,
97,
13,
1462320672
] |
def make_sequences(pdb_dict):
"""Creates a mapping of chain IDs to sequences, by parsing SEQRES records.
:param dict pdb_dict: the .pdb dictionary to read.
:rtype: ``dict``"""
seq = {}
if pdb_dict.get("SEQRES"):
for line in pdb_dict["SEQRES"]:
chain, residues = line[11], line[19:].strip().split()
if chain not in seq:
seq[chain] = []
seq[chain] += residues
return {k: "".join([CODES.get(r, "X") for r in v]) for k, v in seq.items()} | samirelanduk/molecupy | [
97,
19,
97,
13,
1462320672
] |
def get_full_names(pdb_dict):
"""Creates a mapping of het names to full English names.
:param pdb_dict: the .pdb dict to read.
:rtype: ``dict``"""
full_names = {}
for line in pdb_dict.get("HETNAM", []):
try:
full_names[line[11:14].strip()] += line[15:].strip()
except: full_names[line[11:14].strip()] = line[15:].strip()
return full_names | samirelanduk/molecupy | [
97,
19,
97,
13,
1462320672
] |
def get_last_ter_line(model_lines):
"""Gets the index of the last TER record in a list of records. 0 will be
returned if there are none.
:param list model_lines: the lines to search.
:rtype: ``int``"""
last_ter = 0
for index, line in enumerate(model_lines[::-1]):
if line[:3] == "TER":
last_ter = len(model_lines) - index - 1
break
return last_ter | samirelanduk/molecupy | [
97,
19,
97,
13,
1462320672
] |
def add_atom_to_polymer(line, model, chain_id, res_id, aniso_dict, full_names):
"""Takes an .pdb ATOM or HETATM record, converts it, and adds it to a
polymer dictionary.
:param dict line: the line to read.
:param dict model: the model to update.
:param str chain_id: the chain ID to add to.
:param str res_id: the molecule ID to add to.
:param dict aniso_dict: lookup dictionary for anisotropy information."""
try:
model["polymer"][chain_id]["residues"][res_id]["atoms"][
int(line[6:11])
] = atom_line_to_dict(line, aniso_dict)
except:
name = line[17:20].strip()
try:
model["polymer"][chain_id]["residues"][res_id] = {
"name": name, "full_name": full_names.get(name),
"atoms": {int(line[6:11]): atom_line_to_dict(line, aniso_dict)},
"number": len(model["polymer"][chain_id]["residues"]) + 1
}
except:
model["polymer"][chain_id] = {
"internal_id": chain_id, "helices": [], "strands": [],
"residues": {res_id: {
"name": line[17:20].strip(),
"atoms": {int(line[6:11]): atom_line_to_dict(line, aniso_dict)},
"number": 1, "full_name": None,
}}
} | samirelanduk/molecupy | [
97,
19,
97,
13,
1462320672
] |
def atom_line_to_dict(line, aniso_dict):
"""Converts an ATOM or HETATM record to an atom dictionary.
:param str line: the record to convert.
:param dict aniso_dict: the anisotropy dictionary to use.
:rtype: ``dict``"""
a = {
"occupancy": 1, "bvalue": None, "charge": 0,
"anisotropy": aniso_dict.get(int(line[6:11].strip()), [0, 0, 0, 0, 0, 0])
}
a["is_hetatm"] = line[:6] == "HETATM"
a["name"] = line[12:16].strip() or None
a["alt_loc"] = line[16].strip() or None
a["x"] = float(line[30:38].strip())
a["y"] = float(line[38:46].strip())
a["z"] = float(line[46:54].strip())
if line[54:60].strip(): a["occupancy"] = float(line[54:60].strip())
if line[60:66].strip(): a["bvalue"] = float(line[60:66].strip())
a["element"] = line[76:78].strip() or None
if line[78:80].strip():
try:
a["charge"] = int(line[78:80].strip())
except: a["charge"] = int(line[78:80][::-1].strip())
return a | samirelanduk/molecupy | [
97,
19,
97,
13,
1462320672
] |
def structure_to_pdb_string(structure):
"""Converts a :py:class:`.AtomStructure` to a .pdb filestring.
:param AtomStructure structure: the structure to convert.
:rtype: ``str``"""
lines = []
pack_sequences(structure, lines)
atoms = sorted(structure.atoms(), key=lambda a: a.id)
for i, atom in enumerate(atoms):
atom_to_atom_line(atom, lines)
if isinstance(atom.het, Residue) and (
atom is atoms[-1] or atoms[i + 1].chain is not atom.chain or
isinstance(atoms[i + 1].het, Ligand)):
last = lines[-1]
lines.append(f"TER {last[6:11]} {last[17:20]} {last[21]}{last[22:26]}{last[26]}")
return "\n".join(lines) | samirelanduk/molecupy | [
97,
19,
97,
13,
1462320672
] |
def atom_to_atom_line(a, lines):
"""Converts an :py:class:`.Atom` to an ATOM or HETATM record. ANISOU lines
will also be added where appropriate.
:param Atom a: The Atom to pack.
:param list lines: the string lines to update."""
line = "{:6}{:5} {:4} {:3} {:1}{:4}{:1} "
line += "{:>8}{:>8}{:>8} 1.00{:6} {:>2}{:2}"
id_, residue_name, chain_id, residue_id, insert_code = "", "", "", "", ""
if a.het:
id_, residue_name = a.het.id, a.het._name
chain_id = a.chain.id if a.chain is not None else ""
residue_id = int("".join([c for c in id_ if c.isdigit() or c == "-"]))
insert_code = id_[-1] if id_ and id_[-1].isalpha() else ""
atom_name = a._name or ""
atom_name = " " + atom_name if len(atom_name) < 4 else atom_name
occupancy = " 1.00"
line = line.format(
"HETATM" if isinstance(a.het, Ligand) or a._is_hetatm else "ATOM",
a.id, atom_name, residue_name, chain_id, residue_id, insert_code,
"{:.3f}".format(a.location[0]) if a.location[0] is not None else "",
"{:.3f}".format(a.location[1]) if a.location[1] is not None else "",
"{:.3f}".format(a.location[2]) if a.location[2] is not None else "",
a.bvalue if a.bvalue is not None else "", a.element or "",
str(int(a.charge))[::-1] if a.charge else "",
)
lines.append(line)
if a.anisotropy != [0, 0, 0, 0, 0, 0]:
lines.append(atom_to_anisou_line(a, atom_name,
residue_name, chain_id, residue_id, insert_code)) | samirelanduk/molecupy | [
97,
19,
97,
13,
1462320672
] |
def chdir(dirname=None):
curdir = os.getcwd()
try:
if dirname is not None:
os.chdir(dirname)
yield
finally:
os.chdir(curdir) | sproutsocial/botanist | [
12,
11,
12,
7,
1438202896
] |
def __init__(self, args):
self.args = args | sproutsocial/botanist | [
12,
11,
12,
7,
1438202896
] |
def https_url_with_auth(self, base_url):
_, suffix = base_url.split('https://')
return 'https://%s:%s@%s' % (quote(self.args.username), quote(self.args.password), suffix) | sproutsocial/botanist | [
12,
11,
12,
7,
1438202896
] |
def get_pagination(raw_link_header):
link_map = {}
for link, rel in (lh.split(';') for lh in raw_link_header.split(',')):
link_map[rel.split('=')[1].strip('"')] = link.strip(' <>')
return Pagination(*(link_map.get(f) for f in Pagination._fields)) | sproutsocial/botanist | [
12,
11,
12,
7,
1438202896
] |
def get_repos(org, repo_type, access_token=None, username=None, password=None, per_page=25):
"""
Paginates through all of the repositories using github's Link header.
https://developer.github.com/v3/#link-header
"""
url = API_BASE + 'orgs/%s/repos?' % org
qs_params = {'type': repo_type, 'per_page': per_page}
if access_token:
qs_params.update({'access_token': args.access_token})
url += urlencode(qs_params)
request = urllib2.Request(url)
elif username and password:
url += urlencode(qs_params)
request = urllib2.Request(url)
add_https_basic_auth(request, username, password)
else:
raise ValueError('unworkable combination of authentication inputs')
response = urllib2.urlopen(request)
try:
pagination = get_pagination(response.headers['Link'])
except KeyError:
print 'no Link header, nothing to paginate through.'
pagination = Pagination(None, None, None, None)
repos = json.loads(response.read())
for r in repos:
if not r.get('archived'):
yield r
# so, this isn't the DRYest code ;-)
while pagination.next:
request = urllib2.Request(pagination.next)
if username and password:
add_https_basic_auth(request, username, password)
response = urllib2.urlopen(request)
pagination = get_pagination(response.headers['Link'])
repos = json.loads(response.read())
for r in repos:
if not r.get('archived'):
yield r | sproutsocial/botanist | [
12,
11,
12,
7,
1438202896
] |
def repocsv(string):
"""
>>> repocsv('org1/repo1, org2/repo2,org3/repo3 ,org4/repo4')
['org1/repo1', 'org2/repo2', 'org3/repo3', 'org4/repo4']
"""
try:
repos = [r.strip() for r in string.split(',')]
return set(repos)
except Exception as exc:
raise argparse.ArgumentTypeError(exc.message) | sproutsocial/botanist | [
12,
11,
12,
7,
1438202896
] |
def prepare(self):
self.add_arg('file', help='The json dump file to restore') | NaPs/Kolekto | [
27,
3,
27,
6,
1360522969
] |
def __init__(self, size):
self.size = size | idmillington/layout | [
17,
1,
17,
1,
1308760437
] |
def render(self, rect, data):
self.rect = rect | idmillington/layout | [
17,
1,
17,
1,
1308760437
] |
def test_center_minimum_size(self):
b = BoxLM()
b.center = DummyElement(Point(3,4))
self.assertEqual(b.get_minimum_size(None), Point(3,4)) | idmillington/layout | [
17,
1,
17,
1,
1308760437
] |
def test_horizontal_minimum_size(self):
b = BoxLM()
b.left = DummyElement(Point(2,4))
b.center = DummyElement(Point(3,4))
b.right = DummyElement(Point(1,5))
self.assertEqual(b.get_minimum_size(None), Point(6,5)) | idmillington/layout | [
17,
1,
17,
1,
1308760437
] |
def test_margin_minimum_size(self):
b = BoxLM()
b.top = DummyElement(Point(4,2))
b.center = DummyElement(Point(3,4))
b.bottom = DummyElement(Point(5,1))
b.margin = 1
self.assertEqual(b.get_minimum_size(None), Point(5,9)) | idmillington/layout | [
17,
1,
17,
1,
1308760437
] |
def home():
return render_template('./home.html') | kylehayes/fpoimg | [
34,
9,
34,
3,
1356634216
] |
def generator():
return render_template('./generator.html') | kylehayes/fpoimg | [
34,
9,
34,
3,
1356634216
] |
def configurator():
return redirect('/generator', 301) | kylehayes/fpoimg | [
34,
9,
34,
3,
1356634216
] |
def examples():
return render_template('./examples.html') | kylehayes/fpoimg | [
34,
9,
34,
3,
1356634216
] |
def show_image_square(square):
return show_image_width_height(square, square) | kylehayes/fpoimg | [
34,
9,
34,
3,
1356634216
] |
def show_image_width_height(width, height):
caption = request.args.get('text', '')
return show_image_width_height_caption(width, height, caption) | kylehayes/fpoimg | [
34,
9,
34,
3,
1356634216
] |
def show_image_width_height_caption(width, height, caption):
width = min([width, 5000])
height = min([height, 5000])
bg_color_hex = request.args.get('bg_color', '#C7C7C7')
text_color = hex_to_rgb(request.args.get('text_color', '#8F8F8F'))
text_color_hex = request.args.get('text_color', '#8F8F8F')
return generate(width, height, caption, hex_to_rgb(bg_color_hex), hex_to_rgb(text_color_hex)) | kylehayes/fpoimg | [
34,
9,
34,
3,
1356634216
] |
def hex_to_rgb(value):
'''
Algorithm provided by @Jeremy Cantrell on StackOverflow.com:
http://stackoverflow.com/questions/214359/converting-hex-color-to-rgb-and-vice-versa
'''
if len(value.strip()) != 0:
if value[0] == '#':
value = value[1:]
len_value = len(value)
if len_value not in [3, 6]:
raise ValueError('Incorect a value hex {}'.format(value))
if len_value == 3:
value = ''.join(i * 2 for i in value)
return tuple(int(i, 16) for i in textwrap.wrap(value, 2))
else:
return None | kylehayes/fpoimg | [
34,
9,
34,
3,
1356634216
] |
def writeAndUploadCSV(data="", fieldnames=['name', 'category']):
new_csvfile = io.StringIO()
wr = csv.DictWriter(new_csvfile, fieldnames=fieldnames, quoting=csv.QUOTE_ALL)
wr.writeheader()
wr.writerow(data)
buffer = io.BytesIO(new_csvfile.getvalue().encode())
ts = datetime.datetime.now().timestamp()
now = datetime.datetime.now()
logs_bucket = os.environ.get('FPOIMG_AWS_BUCKET_LOGS', None)
if logs_bucket:
upload_file(
buffer,
logs_bucket,
"queries/year={year}/month={month}/day={day}/hour={hour}/{ts}.csv".format(year=now.year, month=now.month, day=now.day, hour=now.hour, ts=ts)
) | kylehayes/fpoimg | [
34,
9,
34,
3,
1356634216
] |
def generate(width, height, caption="", bg_color=(100,100,100), text_color=(200,200,200)):
size = (width,height) # size of the image to create
im = Image.new('RGB', size, bg_color) # create the image
draw = ImageDraw.Draw(im) # create a drawing object
DEFAULT_DIM_SIZE = 50
DEFAULT_CAPTION_SIZE = 30
text_line_pad = 0
dim_text = str(width) + u"\u00D7" + str(height) # \u00D7 is multiplication sign
text_lines = [(dim_text, "ArialBlack.ttf", DEFAULT_DIM_SIZE)]
if(caption):
text_lines += [(caption, "Arial.ttf", DEFAULT_CAPTION_SIZE)]
text_layouts = layout_text(width, height, 0, text_lines)
for text_layout in text_layouts:
text, font, pos = text_layout
draw.text(pos, text, fill=text_color, font=font)
del draw # I'm done drawing so I don't need this anymore | kylehayes/fpoimg | [
34,
9,
34,
3,
1356634216
] |
def __call__(self, ledgers, report, output):
for line in self.generate(ledgers, report):
output(line) | pcapriotti/pledger | [
14,
2,
14,
4,
1309686730
] |
def lpad(self, item, size, color=None):
text = str(item)[:size]
padlength = size - len(text)
if (padlength < 0):
padlength = 0
return "%s%s" % (self.colored(color, text), " " * padlength) | pcapriotti/pledger | [
14,
2,
14,
4,
1309686730
] |
def print_account(self, account, size=39):
if size is None:
return self.colored(self.ACCOUNT_COLOR, account.name)
else:
text = account.shortened_name(size)
return self.lpad(text, size, self.ACCOUNT_COLOR) | pcapriotti/pledger | [
14,
2,
14,
4,
1309686730
] |
def colored(self, color, text):
if color:
return COLORS[color] + text + COLORS["nocolor"]
else:
return text | pcapriotti/pledger | [
14,
2,
14,
4,
1309686730
] |
def generate(self, ledgers, report):
it = report.generate(ledgers)
# save total
total = next(it)
count = 0
for entry in it:
components = entry.amount.components()
for component in components[:-1]:
yield self.print_value(component)
yield self.print_value(components[-1]) + \
(" " * (entry.level - 1)) + \
self.colored(self.ACCOUNT_COLOR, entry.account)
count += 1
if count > 0:
yield "-" * 20
for component in total.amount.components():
yield self.print_value(component) | pcapriotti/pledger | [
14,
2,
14,
4,
1309686730
] |
def generate(self, ledgers, report):
last_entry = None
for entry in report.generate(ledgers):
if last_entry and id(last_entry.transaction) == id(entry.transaction):
for line in self.print_secondary_entry(entry):
yield line
else:
for line in self.print_entry(entry):
yield line
last_entry = entry | pcapriotti/pledger | [
14,
2,
14,
4,
1309686730
] |
def print_secondary_entry(self, entry):
currencies = sorted(
set(entry.entry.amount.currencies()).union(entry.total.currencies()))
components = entry.entry.amount.components(currencies)
total_components = entry.total.components(currencies)
yield "%s %s %s %s" % (
" " * 44,
self.print_account(entry.entry.account),
self.print_value(components[0]),
self.print_value(total_components[0]))
for line in self.print_extra_components(entry, components[1:], total_components[1:]):
yield line | pcapriotti/pledger | [
14,
2,
14,
4,
1309686730
] |
def percentChange(startPoint,currentPoint):
try:
x = ((float(currentPoint)-startPoint)/abs(startPoint))*100.00
if x == 0.0:
return 0.000000001
else:
return x
except:
return 0.0001 | PythonProgramming/Pattern-Recognition-for-Forex-Trading | [
199,
100,
199,
1,
1427336523
] |
def currentPattern():
mostRecentPoint = avgLine[-1] | PythonProgramming/Pattern-Recognition-for-Forex-Trading | [
199,
100,
199,
1,
1427336523
] |
def graphRawFX(): | PythonProgramming/Pattern-Recognition-for-Forex-Trading | [
199,
100,
199,
1,
1427336523
] |
def patternRecognition():
for eachPattern in patternAr:
sim1 = 100.00 - abs(percentChange(eachPattern[0], patForRec[0]))
sim2 = 100.00 - abs(percentChange(eachPattern[1], patForRec[1]))
sim3 = 100.00 - abs(percentChange(eachPattern[2], patForRec[2]))
sim4 = 100.00 - abs(percentChange(eachPattern[3], patForRec[3]))
sim5 = 100.00 - abs(percentChange(eachPattern[4], patForRec[4]))
sim6 = 100.00 - abs(percentChange(eachPattern[5], patForRec[5]))
sim7 = 100.00 - abs(percentChange(eachPattern[6], patForRec[6]))
sim8 = 100.00 - abs(percentChange(eachPattern[7], patForRec[7]))
sim9 = 100.00 - abs(percentChange(eachPattern[8], patForRec[8]))
sim10 = 100.00 - abs(percentChange(eachPattern[9], patForRec[9])) | PythonProgramming/Pattern-Recognition-for-Forex-Trading | [
199,
100,
199,
1,
1427336523
] |
def __init__(self, market_place=None, product_id=None): # noqa: E501
"""AppStoreProduct - a model defined in Swagger""" # noqa: E501
self._market_place = None
self._product_id = None
self.discriminator = None
if market_place is not None:
self.market_place = market_place
if product_id is not None:
self.product_id = product_id | docusign/docusign-python-client | [
77,
83,
77,
32,
1502240567
] |
def market_place(self):
"""Gets the market_place of this AppStoreProduct. # noqa: E501
# noqa: E501
:return: The market_place of this AppStoreProduct. # noqa: E501
:rtype: str
"""
return self._market_place | docusign/docusign-python-client | [
77,
83,
77,
32,
1502240567
] |
def market_place(self, market_place):
"""Sets the market_place of this AppStoreProduct.
# noqa: E501
:param market_place: The market_place of this AppStoreProduct. # noqa: E501
:type: str
"""
self._market_place = market_place | docusign/docusign-python-client | [
77,
83,
77,
32,
1502240567
] |
def product_id(self):
"""Gets the product_id of this AppStoreProduct. # noqa: E501
The Product ID from the AppStore. # noqa: E501
:return: The product_id of this AppStoreProduct. # noqa: E501
:rtype: str
"""
return self._product_id | docusign/docusign-python-client | [
77,
83,
77,
32,
1502240567
] |
def product_id(self, product_id):
"""Sets the product_id of this AppStoreProduct.
The Product ID from the AppStore. # noqa: E501
:param product_id: The product_id of this AppStoreProduct. # noqa: E501
:type: str
"""
self._product_id = product_id | docusign/docusign-python-client | [
77,
83,
77,
32,
1502240567
] |
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict()) | docusign/docusign-python-client | [
77,
83,
77,
32,
1502240567
] |
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AppStoreProduct):
return False
return self.__dict__ == other.__dict__ | docusign/docusign-python-client | [
77,
83,
77,
32,
1502240567
] |
def dov_proxy_no_xdov():
"""Fixture to start the DOV proxy and set PYDOV_BASE_URL to route
traffic through it.
The DOV proxy behaves as the XDOV server would be unavailable.
"""
process = Popen([sys.executable,
os.path.join(os.path.dirname(os.path.abspath(__file__)),
'stub', 'dov_proxy.py'),
'--dov-base-url', build_dov_url('/'),
'--no-xdov'])
time.sleep(2)
orig_base_url = os.environ.get('PYDOV_BASE_URL', None)
os.environ['PYDOV_BASE_URL'] = 'http://localhost:1337/'
yield
if orig_base_url is not None:
os.environ['PYDOV_BASE_URL'] = orig_base_url
else:
del(os.environ['PYDOV_BASE_URL'])
process.terminate()
process.communicate() | DOV-Vlaanderen/pydov | [
30,
16,
30,
15,
1482487793
] |
def reload_modules(dov_proxy_no_xdov):
"""Reload the boring and grondwaterfilter modules after setting
PYDOV_BASE_URL.
These need to be reloaded because they use the PYDOV_BASE_URL at import
time to set the location of XSD schemas.
Parameters
----------
dov_proxy_no_xdov : pytest.fixture
Fixture starting the DOV proxy and setting PYDOV_BASE_URL accordingly.
"""
reload(pydov.types.boring)
reload(pydov.types.grondwaterfilter)
yield
reload(pydov.types.boring)
reload(pydov.types.grondwaterfilter) | DOV-Vlaanderen/pydov | [
30,
16,
30,
15,
1482487793
] |
def reset_cache(dov_proxy_no_xdov):
"""Reset the cache to a temporary folder to remove influence from other
tests.
The cache needs to be reset after setting the PYDOV_BASE_URL variable
because at initialisation this URL is used to construct a regex for
determining the datatype of an XML request.
Parameters
----------
dov_proxy_no_xdov : pytest.fixture
Fixture starting the DOV proxy and setting PYDOV_BASE_URL accordingly.
"""
gziptext_cache = GzipTextFileCache(
cachedir=os.path.join(tempfile.gettempdir(), 'pydov_tests_error'),
max_age=datetime.timedelta(seconds=0.1))
gziptext_cache.remove()
orig_cache = pydov.cache
pydov.cache = gziptext_cache
yield
gziptext_cache.remove()
pydov.cache = orig_cache | DOV-Vlaanderen/pydov | [
30,
16,
30,
15,
1482487793
] |
def test_hook_count():
"""PyTest fixture temporarily disabling default hooks and installing
HookCounter."""
orig_hooks = pydov.hooks
pydov.hooks = Hooks(
(HookCounter(),)
)
yield
pydov.hooks = orig_hooks | DOV-Vlaanderen/pydov | [
30,
16,
30,
15,
1482487793
] |
def test_do_not_cache_error(self):
"""Test whether the 404 error page does not end up being cached."""
bs = BoringSearch(objecttype=pydov.types.boring.Boring)
bs.search(query=PropertyIsEqualTo(
'pkey_boring', build_dov_url('data/boring/2004-103984')))
assert not os.path.exists(os.path.join(
pydov.cache.cachedir, 'boring', '2004-103984.xml.gz'
)) | DOV-Vlaanderen/pydov | [
30,
16,
30,
15,
1482487793
] |
def test_do_not_overwrite_stale_cache(self):
"""Test whether a stale copy of the data which exists in the cache is
not overwritten by the 404 error page."""
bs = BoringSearch(objecttype=pydov.types.boring.Boring)
testdata_path = os.path.join(
'tests', 'data', 'types', 'boring', 'boring.xml')
cache_path = os.path.join(
pydov.cache.cachedir, 'boring', '2004-103984.xml.gz'
)
os.makedirs(os.path.dirname(cache_path))
with open(testdata_path, 'r') as testdata:
with gzip.open(cache_path, 'wb') as cached_data:
cached_data.write(testdata.read().encode('utf8'))
time.sleep(0.5)
bs.search(query=PropertyIsEqualTo(
'pkey_boring', build_dov_url('data/boring/2004-103984')))
with gzip.open(cache_path, 'rb') as cached_data:
assert 'GEO-04/169-BNo-B1' in cached_data.read().decode('utf8') | DOV-Vlaanderen/pydov | [
30,
16,
30,
15,
1482487793
] |
def test_stale_warning(self):
"""Test whether a stale version of the data from the cache is used in
case of a service error, and if a warning is issued to the user."""
bs = BoringSearch(objecttype=pydov.types.boring.Boring)
testdata_path = os.path.join(
'tests', 'data', 'types', 'boring', 'boring.xml')
cache_path = os.path.join(
pydov.cache.cachedir, 'boring', '2004-103984.xml.gz'
)
os.makedirs(os.path.dirname(cache_path))
with open(testdata_path, 'r') as testdata:
with gzip.open(cache_path, 'wb') as cached_data:
cached_data.write(testdata.read().encode('utf8'))
time.sleep(0.5)
with pytest.warns(XmlStaleWarning):
df = bs.search(query=PropertyIsEqualTo(
'pkey_boring', build_dov_url('data/boring/2004-103984')))
assert not df.iloc[0].boorgatmeting
assert df.iloc[0].boormethode == 'spade' | DOV-Vlaanderen/pydov | [
30,
16,
30,
15,
1482487793
] |
def test_stale_disabled(self):
"""Test whether no stale version of the data from the cache is used
when disabled, and if a warning is issued to the user."""
pydov.cache.stale_on_error = False
bs = BoringSearch(objecttype=pydov.types.boring.Boring)
testdata_path = os.path.join(
'tests', 'data', 'types', 'boring', 'boring.xml')
cache_path = os.path.join(
pydov.cache.cachedir, 'boring', '2004-103984.xml.gz'
)
os.makedirs(os.path.dirname(cache_path))
with open(testdata_path, 'r') as testdata:
with gzip.open(cache_path, 'wb') as cached_data:
cached_data.write(testdata.read().encode('utf8'))
time.sleep(0.5)
with pytest.warns(XmlFetchWarning):
df = bs.search(query=PropertyIsEqualTo(
'pkey_boring', build_dov_url('data/boring/2004-103984')))
assert np.isnan(df.iloc[0].boorgatmeting)
assert np.isnan(df.iloc[0].boormethode) | DOV-Vlaanderen/pydov | [
30,
16,
30,
15,
1482487793
] |
def test_wfs_data_present(self):
"""Test whether data available in the WFS is present in the dataframe
in case of a service error in XDOV."""
bs = BoringSearch(objecttype=pydov.types.boring.Boring)
df = bs.search(query=PropertyIsEqualTo(
'pkey_boring', build_dov_url('data/boring/2016-122561')))
assert df.iloc[0].gemeente == 'Wortegem-Petegem' | DOV-Vlaanderen/pydov | [
30,
16,
30,
15,
1482487793
] |
def test_nan_and_fetch_warning(self):
"""Test whether the XML data is set tot NaN in case of an error and
no stale cache is available. Also test if a warning is given to the
user."""
bs = BoringSearch(objecttype=pydov.types.boring.Boring)
with pytest.warns(XmlFetchWarning):
df = bs.search(query=PropertyIsEqualTo(
'pkey_boring', build_dov_url('data/boring/2016-122561')))
assert np.isnan(df.iloc[0].boorgatmeting) | DOV-Vlaanderen/pydov | [
30,
16,
30,
15,
1482487793
] |
def test_no_xsd_warning(self):
"""Test whether the metadata can still be retrieved, and that the
XSD values are unavailable. Also test if a warning is given to the
user."""
with pytest.warns(XsdFetchWarning):
gwf = GrondwaterFilterSearch(
objecttype=pydov.types.grondwaterfilter.GrondwaterFilter)
fields = gwf.get_fields()
assert 'values' not in fields['aquifer_code'] | DOV-Vlaanderen/pydov | [
30,
16,
30,
15,
1482487793
] |
def test_no_xsd_wfs_only(self):
"""Test whether the WFS data is available, even if XSD schemas cannot
be resolved."""
gwf = GrondwaterFilterSearch(
objecttype=pydov.types.grondwaterfilter.GrondwaterFilter)
df = gwf.search(max_features=1)
assert df.iloc[0].pkey_filter is not None | DOV-Vlaanderen/pydov | [
30,
16,
30,
15,
1482487793
] |
def test_hooks_fetch_error(self, test_hook_count):
"""Test if the correct hooks are fired when the XML fails to be
fetched from DOV.
Parameters
----------
test_hook_count : pytest.fixture
Fixture removing default hooks and installing HookCounter.
"""
bs = BoringSearch(objecttype=pydov.types.boring.Boring)
bs.search(query=PropertyIsEqualTo(
'pkey_boring', build_dov_url('data/boring/2004-103984')))
assert pydov.hooks[0].count_wfs_search_init == 1
assert pydov.hooks[0].count_wfs_search_result == 1
assert pydov.hooks[0].count_wfs_search_result_received == 1
assert pydov.hooks[0].count_xml_received == 0
assert pydov.hooks[0].count_xml_cache_hit == 0
assert pydov.hooks[0].count_xml_downloaded == 0
assert pydov.hooks[0].count_xml_stale_hit == 0
assert pydov.hooks[0].count_xml_fetch_error == 1
assert pydov.hooks[0].count_meta_received > 0
assert pydov.hooks[0].count_inject_meta_response > 0 | DOV-Vlaanderen/pydov | [
30,
16,
30,
15,
1482487793
] |
def lazy_tag(tag, *args, **kwargs):
"""
Lazily loads a template tag after the page has loaded. Requires jQuery
(for now).
Usage:
{% load lazy_tags %}
{% lazy_tag 'tag_lib.tag_name' arg1 arg2 kw1='test' kw2='hello' %}
Args:
tag (str): the tag library and tag name separated by a period. For a
template tag named `do_thing` in a tag library named `thing_tags`
the `tag` argument would be `'thing_tags.doc_thing'`.
*args: arguments to be passed to the template tag.
**kwargs: keyword arguments to be passed to the template tag.
"""
tag_id = get_tag_id()
set_lazy_tag_data(tag_id, tag, args, kwargs)
return render_to_string('lazy_tags/lazy_tag.html', {
'tag_id': tag_id,
'STATIC_URL': settings.STATIC_URL,
}) | grantmcconnaughey/django-lazy-tags | [
24,
5,
24,
2,
1437441030
] |
def lazy_tags_javascript():
"""Outputs the necessary JavaScript to load tags over AJAX."""
return _render_js('javascript') | grantmcconnaughey/django-lazy-tags | [
24,
5,
24,
2,
1437441030
] |
def lazy_tags_jquery():
"""Outputs the necessary jQuery to load tags over AJAX."""
return _render_js('jquery') | grantmcconnaughey/django-lazy-tags | [
24,
5,
24,
2,
1437441030
] |
def lazy_tags_prototype():
"""Outputs the necessary Prototype to load tags over AJAX."""
return _render_js('prototype') | grantmcconnaughey/django-lazy-tags | [
24,
5,
24,
2,
1437441030
] |
def given_a_call_counter(self):
self.x = 0
self.expected_args = (1, 4, "hello")
self.expected_kwargs = {"blah": "bloh", "bleh": 5}
self.expected_return_value = "some thing that was returned" | benjamin-hodgson/poll | [
10,
2,
10,
2,
1438953343
] |
def it_should_forward_the_arguments(self):
assert self.args == self.expected_args | benjamin-hodgson/poll | [
10,
2,
10,
2,
1438953343
] |
def it_should_call_it_once(self):
assert self.x == 1 | benjamin-hodgson/poll | [
10,
2,
10,
2,
1438953343
] |
def function_to_break(self, *args, **kwargs):
self.x += 1
self.args = args
self.kwargs = kwargs
return self.expected_return_value | benjamin-hodgson/poll | [
10,
2,
10,
2,
1438953343
] |
def given_an_exception_to_throw(self):
self.x = 0
self.expected_exception = ValueError()
@circuitbreaker(ValueError, threshold=3, reset_timeout=1, on_error=self.on_error_callback)
def function_to_break():
self.x += 1
raise self.expected_exception
self.function_to_break = function_to_break | benjamin-hodgson/poll | [
10,
2,
10,
2,
1438953343
] |
def it_should_bubble_the_exception_out(self):
assert self.exception is self.expected_exception | benjamin-hodgson/poll | [
10,
2,
10,
2,
1438953343
] |
def it_should_call_the_on_error_callback(self):
assert self.on_error_called | benjamin-hodgson/poll | [
10,
2,
10,
2,
1438953343
] |
def given_an_exception_to_throw(self):
self.x = 0
self.expected_exception = ValueError()
@circuitbreaker(ValueError, threshold=3, reset_timeout=1, on_error=self.on_error_callback)
def function_to_break():
self.x += 1
raise self.expected_exception
self.function_to_break = function_to_break | benjamin-hodgson/poll | [
10,
2,
10,
2,
1438953343
] |
def it_should_bubble_the_exception_out(self):
assert self.exception is self.expected_exception | benjamin-hodgson/poll | [
10,
2,
10,
2,
1438953343
] |
def it_should_call_the_on_error_callback(self):
assert self.on_error_result is self.expected_exception | benjamin-hodgson/poll | [
10,
2,
10,
2,
1438953343
] |
def given_the_function_has_failed_twice(self):
self.expected_exception = ValueError()
contexts.catch(self.function_to_break)
contexts.catch(self.function_to_break) | benjamin-hodgson/poll | [
10,
2,
10,
2,
1438953343
] |
def it_should_bubble_the_exception_out(self):
assert self.exception is self.expected_exception | benjamin-hodgson/poll | [
10,
2,
10,
2,
1438953343
] |
def function_to_break(self):
raise self.expected_exception | benjamin-hodgson/poll | [
10,
2,
10,
2,
1438953343
] |
def given_the_function_has_failed_three_times(self):
self.patch = mock.patch('time.perf_counter', return_value=0)
self.mock = self.patch.start()
self.x = 0
contexts.catch(self.function_to_break)
contexts.catch(self.function_to_break)
contexts.catch(self.function_to_break)
self.x = 0 | benjamin-hodgson/poll | [
10,
2,
10,
2,
1438953343
] |
def it_should_throw_CircuitBrokenError(self):
assert isinstance(self.exception, CircuitBrokenError) | benjamin-hodgson/poll | [
10,
2,
10,
2,
1438953343
] |
def it_should_not_call_the_function(self):
assert self.x == 0 | benjamin-hodgson/poll | [
10,
2,
10,
2,
1438953343
] |
def function_to_break(self):
self.x += 1
raise ValueError | benjamin-hodgson/poll | [
10,
2,
10,
2,
1438953343
] |
def given_the_circuit_was_about_to_be_broken(self):
self.patch = mock.patch('time.perf_counter', return_value=0)
self.mock = self.patch.start()
contexts.catch(self.function_to_break)
self.mock.return_value = 0.5
contexts.catch(self.function_to_break)
self.mock.return_value = 1.1 | benjamin-hodgson/poll | [
10,
2,
10,
2,
1438953343
] |
def it_should_have_decremented_the_failure_count(self):
assert isinstance(self.exception1, ValueError)
assert isinstance(self.exception2, ValueError)
assert isinstance(self.exception3, CircuitBrokenError) | benjamin-hodgson/poll | [
10,
2,
10,
2,
1438953343
] |
def function_to_break(self):
raise ValueError | benjamin-hodgson/poll | [
10,
2,
10,
2,
1438953343
] |
def given_the_circuit_was_broken_in_the_past(self):
self.x = 0
self.expected_return_value = "some thing that was returned"
self.patch = mock.patch('time.perf_counter', return_value=0)
self.mock = self.patch.start()
contexts.catch(self.function_to_break)
contexts.catch(self.function_to_break)
contexts.catch(self.function_to_break) | benjamin-hodgson/poll | [
10,
2,
10,
2,
1438953343
] |
def it_should_call_the_function(self):
assert self.x == 4 | benjamin-hodgson/poll | [
10,
2,
10,
2,
1438953343
] |
def cleanup_the_mock(self):
self.patch.stop() | benjamin-hodgson/poll | [
10,
2,
10,
2,
1438953343
] |
def function_to_break(self):
self.x += 1
if self.x < 3:
raise ValueError
return self.expected_return_value | benjamin-hodgson/poll | [
10,
2,
10,
2,
1438953343
] |
def given_the_circuit_was_broken_in_the_past(self):
self.x = 0
self.expected_exception = ValueError()
self.patch = mock.patch('time.perf_counter', return_value=0)
self.mock = self.patch.start()
contexts.catch(self.function_to_break)
contexts.catch(self.function_to_break)
contexts.catch(self.function_to_break) | benjamin-hodgson/poll | [
10,
2,
10,
2,
1438953343
] |
def it_should_call_the_function(self):
assert self.x == 4 | benjamin-hodgson/poll | [
10,
2,
10,
2,
1438953343
] |
Subsets and Splits