repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
0k/kids.cache | src/kids/cache/__init__.py | undecorate | def undecorate(func):
"""Returns the decorator and the undecorated function of given object."""
orig_call_wrapper = lambda x: x
for call_wrapper, unwrap in SUPPORTED_DECORATOR.items():
if isinstance(func, call_wrapper):
func = unwrap(func)
orig_call_wrapper = call_wrapper
break
return orig_call_wrapper, func | python | def undecorate(func):
"""Returns the decorator and the undecorated function of given object."""
orig_call_wrapper = lambda x: x
for call_wrapper, unwrap in SUPPORTED_DECORATOR.items():
if isinstance(func, call_wrapper):
func = unwrap(func)
orig_call_wrapper = call_wrapper
break
return orig_call_wrapper, func | [
"def",
"undecorate",
"(",
"func",
")",
":",
"orig_call_wrapper",
"=",
"lambda",
"x",
":",
"x",
"for",
"call_wrapper",
",",
"unwrap",
"in",
"SUPPORTED_DECORATOR",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"func",
",",
"call_wrapper",
")",
":",
"func",
"=",
"unwrap",
"(",
"func",
")",
"orig_call_wrapper",
"=",
"call_wrapper",
"break",
"return",
"orig_call_wrapper",
",",
"func"
] | Returns the decorator and the undecorated function of given object. | [
"Returns",
"the",
"decorator",
"and",
"the",
"undecorated",
"function",
"of",
"given",
"object",
"."
] | 668f3b966877c4a0855d60e05cc3706cf37e4570 | https://github.com/0k/kids.cache/blob/668f3b966877c4a0855d60e05cc3706cf37e4570/src/kids/cache/__init__.py#L82-L90 | train |
idlesign/steampak | steampak/cli.py | item | def item(ctx, appid, title):
"""Market-related commands."""
ctx.obj['appid'] = appid
ctx.obj['title'] = title | python | def item(ctx, appid, title):
"""Market-related commands."""
ctx.obj['appid'] = appid
ctx.obj['title'] = title | [
"def",
"item",
"(",
"ctx",
",",
"appid",
",",
"title",
")",
":",
"ctx",
".",
"obj",
"[",
"'appid'",
"]",
"=",
"appid",
"ctx",
".",
"obj",
"[",
"'title'",
"]",
"=",
"title"
] | Market-related commands. | [
"Market",
"-",
"related",
"commands",
"."
] | cb3f2c737e272b0360802d947e388df7e34f50f3 | https://github.com/idlesign/steampak/blob/cb3f2c737e272b0360802d947e388df7e34f50f3/steampak/cli.py#L111-L114 | train |
idlesign/steampak | steampak/cli.py | get_price | def get_price(ctx, currency):
"""Prints out market item price."""
appid = ctx.obj['appid']
title = ctx.obj['title']
item_ = Item(appid, title)
item_.get_price_data(currency)
click.secho('Lowest price: %s %s' % (item_.price_lowest, item_.price_currency), fg='green') | python | def get_price(ctx, currency):
"""Prints out market item price."""
appid = ctx.obj['appid']
title = ctx.obj['title']
item_ = Item(appid, title)
item_.get_price_data(currency)
click.secho('Lowest price: %s %s' % (item_.price_lowest, item_.price_currency), fg='green') | [
"def",
"get_price",
"(",
"ctx",
",",
"currency",
")",
":",
"appid",
"=",
"ctx",
".",
"obj",
"[",
"'appid'",
"]",
"title",
"=",
"ctx",
".",
"obj",
"[",
"'title'",
"]",
"item_",
"=",
"Item",
"(",
"appid",
",",
"title",
")",
"item_",
".",
"get_price_data",
"(",
"currency",
")",
"click",
".",
"secho",
"(",
"'Lowest price: %s %s'",
"%",
"(",
"item_",
".",
"price_lowest",
",",
"item_",
".",
"price_currency",
")",
",",
"fg",
"=",
"'green'",
")"
] | Prints out market item price. | [
"Prints",
"out",
"market",
"item",
"price",
"."
] | cb3f2c737e272b0360802d947e388df7e34f50f3 | https://github.com/idlesign/steampak/blob/cb3f2c737e272b0360802d947e388df7e34f50f3/steampak/cli.py#L120-L128 | train |
idlesign/steampak | steampak/cli.py | get_cards | def get_cards(ctx):
"""Prints out cards available for application."""
appid = ctx.obj['appid']
app = Application(appid)
click.secho('Cards for `%s` [appid: %s]' % (app.title, appid), fg='green')
if not app.has_cards:
click.secho('This app has no cards.', fg='red', err=True)
return
cards, booster = app.get_cards()
def get_line(card):
return '%s [market hash: `%s`]' % (card.title, card.market_hash)
for card in cards.values():
click.echo(get_line(card))
if booster:
click.secho('* Booster pack: `%s`' % get_line(booster), fg='yellow')
click.secho('* Total cards: %d' % len(cards), fg='green') | python | def get_cards(ctx):
"""Prints out cards available for application."""
appid = ctx.obj['appid']
app = Application(appid)
click.secho('Cards for `%s` [appid: %s]' % (app.title, appid), fg='green')
if not app.has_cards:
click.secho('This app has no cards.', fg='red', err=True)
return
cards, booster = app.get_cards()
def get_line(card):
return '%s [market hash: `%s`]' % (card.title, card.market_hash)
for card in cards.values():
click.echo(get_line(card))
if booster:
click.secho('* Booster pack: `%s`' % get_line(booster), fg='yellow')
click.secho('* Total cards: %d' % len(cards), fg='green') | [
"def",
"get_cards",
"(",
"ctx",
")",
":",
"appid",
"=",
"ctx",
".",
"obj",
"[",
"'appid'",
"]",
"app",
"=",
"Application",
"(",
"appid",
")",
"click",
".",
"secho",
"(",
"'Cards for `%s` [appid: %s]'",
"%",
"(",
"app",
".",
"title",
",",
"appid",
")",
",",
"fg",
"=",
"'green'",
")",
"if",
"not",
"app",
".",
"has_cards",
":",
"click",
".",
"secho",
"(",
"'This app has no cards.'",
",",
"fg",
"=",
"'red'",
",",
"err",
"=",
"True",
")",
"return",
"cards",
",",
"booster",
"=",
"app",
".",
"get_cards",
"(",
")",
"def",
"get_line",
"(",
"card",
")",
":",
"return",
"'%s [market hash: `%s`]'",
"%",
"(",
"card",
".",
"title",
",",
"card",
".",
"market_hash",
")",
"for",
"card",
"in",
"cards",
".",
"values",
"(",
")",
":",
"click",
".",
"echo",
"(",
"get_line",
"(",
"card",
")",
")",
"if",
"booster",
":",
"click",
".",
"secho",
"(",
"'* Booster pack: `%s`'",
"%",
"get_line",
"(",
"booster",
")",
",",
"fg",
"=",
"'yellow'",
")",
"click",
".",
"secho",
"(",
"'* Total cards: %d'",
"%",
"len",
"(",
"cards",
")",
",",
"fg",
"=",
"'green'",
")"
] | Prints out cards available for application. | [
"Prints",
"out",
"cards",
"available",
"for",
"application",
"."
] | cb3f2c737e272b0360802d947e388df7e34f50f3 | https://github.com/idlesign/steampak/blob/cb3f2c737e272b0360802d947e388df7e34f50f3/steampak/cli.py#L141-L164 | train |
idlesign/steampak | steampak/cli.py | get_card_prices | def get_card_prices(ctx, currency):
"""Prints out lowest card prices for an application.
Comma-separated list of application IDs is supported.
"""
appid = ctx.obj['appid']
detailed = True
appids = [appid]
if ',' in appid:
appids = [appid.strip() for appid in appid.split(',')]
detailed = False
for appid in appids:
print_card_prices(appid, currency, detailed=detailed)
click.echo('') | python | def get_card_prices(ctx, currency):
"""Prints out lowest card prices for an application.
Comma-separated list of application IDs is supported.
"""
appid = ctx.obj['appid']
detailed = True
appids = [appid]
if ',' in appid:
appids = [appid.strip() for appid in appid.split(',')]
detailed = False
for appid in appids:
print_card_prices(appid, currency, detailed=detailed)
click.echo('') | [
"def",
"get_card_prices",
"(",
"ctx",
",",
"currency",
")",
":",
"appid",
"=",
"ctx",
".",
"obj",
"[",
"'appid'",
"]",
"detailed",
"=",
"True",
"appids",
"=",
"[",
"appid",
"]",
"if",
"','",
"in",
"appid",
":",
"appids",
"=",
"[",
"appid",
".",
"strip",
"(",
")",
"for",
"appid",
"in",
"appid",
".",
"split",
"(",
"','",
")",
"]",
"detailed",
"=",
"False",
"for",
"appid",
"in",
"appids",
":",
"print_card_prices",
"(",
"appid",
",",
"currency",
",",
"detailed",
"=",
"detailed",
")",
"click",
".",
"echo",
"(",
"''",
")"
] | Prints out lowest card prices for an application.
Comma-separated list of application IDs is supported. | [
"Prints",
"out",
"lowest",
"card",
"prices",
"for",
"an",
"application",
".",
"Comma",
"-",
"separated",
"list",
"of",
"application",
"IDs",
"is",
"supported",
"."
] | cb3f2c737e272b0360802d947e388df7e34f50f3 | https://github.com/idlesign/steampak/blob/cb3f2c737e272b0360802d947e388df7e34f50f3/steampak/cli.py#L170-L186 | train |
idlesign/steampak | steampak/cli.py | get_gems | def get_gems(ctx):
"""Prints out total gems count for a Steam user."""
username = ctx.obj['username']
click.secho(
'Total gems owned by `%s`: %d' % (username, User(username).gems_total),
fg='green') | python | def get_gems(ctx):
"""Prints out total gems count for a Steam user."""
username = ctx.obj['username']
click.secho(
'Total gems owned by `%s`: %d' % (username, User(username).gems_total),
fg='green') | [
"def",
"get_gems",
"(",
"ctx",
")",
":",
"username",
"=",
"ctx",
".",
"obj",
"[",
"'username'",
"]",
"click",
".",
"secho",
"(",
"'Total gems owned by `%s`: %d'",
"%",
"(",
"username",
",",
"User",
"(",
"username",
")",
".",
"gems_total",
")",
",",
"fg",
"=",
"'green'",
")"
] | Prints out total gems count for a Steam user. | [
"Prints",
"out",
"total",
"gems",
"count",
"for",
"a",
"Steam",
"user",
"."
] | cb3f2c737e272b0360802d947e388df7e34f50f3 | https://github.com/idlesign/steampak/blob/cb3f2c737e272b0360802d947e388df7e34f50f3/steampak/cli.py#L199-L205 | train |
idlesign/steampak | steampak/cli.py | get_games | def get_games(ctx):
"""Prints out games owned by a Steam user."""
username = ctx.obj['username']
games = User(username).get_games_owned()
for game in sorted(games.values(), key=itemgetter('title')):
click.echo('%s [appid: %s]' % (game['title'], game['appid']))
click.secho('Total gems owned by `%s`: %d' % (username, len(games)), fg='green') | python | def get_games(ctx):
"""Prints out games owned by a Steam user."""
username = ctx.obj['username']
games = User(username).get_games_owned()
for game in sorted(games.values(), key=itemgetter('title')):
click.echo('%s [appid: %s]' % (game['title'], game['appid']))
click.secho('Total gems owned by `%s`: %d' % (username, len(games)), fg='green') | [
"def",
"get_games",
"(",
"ctx",
")",
":",
"username",
"=",
"ctx",
".",
"obj",
"[",
"'username'",
"]",
"games",
"=",
"User",
"(",
"username",
")",
".",
"get_games_owned",
"(",
")",
"for",
"game",
"in",
"sorted",
"(",
"games",
".",
"values",
"(",
")",
",",
"key",
"=",
"itemgetter",
"(",
"'title'",
")",
")",
":",
"click",
".",
"echo",
"(",
"'%s [appid: %s]'",
"%",
"(",
"game",
"[",
"'title'",
"]",
",",
"game",
"[",
"'appid'",
"]",
")",
")",
"click",
".",
"secho",
"(",
"'Total gems owned by `%s`: %d'",
"%",
"(",
"username",
",",
"len",
"(",
"games",
")",
")",
",",
"fg",
"=",
"'green'",
")"
] | Prints out games owned by a Steam user. | [
"Prints",
"out",
"games",
"owned",
"by",
"a",
"Steam",
"user",
"."
] | cb3f2c737e272b0360802d947e388df7e34f50f3 | https://github.com/idlesign/steampak/blob/cb3f2c737e272b0360802d947e388df7e34f50f3/steampak/cli.py#L210-L219 | train |
idlesign/steampak | steampak/cli.py | get_booster_stats | def get_booster_stats(ctx, currency):
"""Prints out price stats for booster packs available in Steam user inventory."""
username = ctx.obj['username']
inventory = User(username)._get_inventory_raw()
boosters = {}
for item in inventory['rgDescriptions'].values():
is_booster = False
tags = item['tags']
for tag in tags:
if tag['internal_name'] == TAG_ITEM_CLASS_BOOSTER:
is_booster = True
break
if not is_booster:
continue
appid = item['market_fee_app']
title = item['name']
boosters[appid] = title
if not boosters:
click.secho('User `%s` has no booster packs' % username, fg='red', err=True)
return
for appid, title in boosters.items():
click.secho('Found booster: `%s`' % title, fg='blue')
print_card_prices(appid, currency) | python | def get_booster_stats(ctx, currency):
"""Prints out price stats for booster packs available in Steam user inventory."""
username = ctx.obj['username']
inventory = User(username)._get_inventory_raw()
boosters = {}
for item in inventory['rgDescriptions'].values():
is_booster = False
tags = item['tags']
for tag in tags:
if tag['internal_name'] == TAG_ITEM_CLASS_BOOSTER:
is_booster = True
break
if not is_booster:
continue
appid = item['market_fee_app']
title = item['name']
boosters[appid] = title
if not boosters:
click.secho('User `%s` has no booster packs' % username, fg='red', err=True)
return
for appid, title in boosters.items():
click.secho('Found booster: `%s`' % title, fg='blue')
print_card_prices(appid, currency) | [
"def",
"get_booster_stats",
"(",
"ctx",
",",
"currency",
")",
":",
"username",
"=",
"ctx",
".",
"obj",
"[",
"'username'",
"]",
"inventory",
"=",
"User",
"(",
"username",
")",
".",
"_get_inventory_raw",
"(",
")",
"boosters",
"=",
"{",
"}",
"for",
"item",
"in",
"inventory",
"[",
"'rgDescriptions'",
"]",
".",
"values",
"(",
")",
":",
"is_booster",
"=",
"False",
"tags",
"=",
"item",
"[",
"'tags'",
"]",
"for",
"tag",
"in",
"tags",
":",
"if",
"tag",
"[",
"'internal_name'",
"]",
"==",
"TAG_ITEM_CLASS_BOOSTER",
":",
"is_booster",
"=",
"True",
"break",
"if",
"not",
"is_booster",
":",
"continue",
"appid",
"=",
"item",
"[",
"'market_fee_app'",
"]",
"title",
"=",
"item",
"[",
"'name'",
"]",
"boosters",
"[",
"appid",
"]",
"=",
"title",
"if",
"not",
"boosters",
":",
"click",
".",
"secho",
"(",
"'User `%s` has no booster packs'",
"%",
"username",
",",
"fg",
"=",
"'red'",
",",
"err",
"=",
"True",
")",
"return",
"for",
"appid",
",",
"title",
"in",
"boosters",
".",
"items",
"(",
")",
":",
"click",
".",
"secho",
"(",
"'Found booster: `%s`'",
"%",
"title",
",",
"fg",
"=",
"'blue'",
")",
"print_card_prices",
"(",
"appid",
",",
"currency",
")"
] | Prints out price stats for booster packs available in Steam user inventory. | [
"Prints",
"out",
"price",
"stats",
"for",
"booster",
"packs",
"available",
"in",
"Steam",
"user",
"inventory",
"."
] | cb3f2c737e272b0360802d947e388df7e34f50f3 | https://github.com/idlesign/steampak/blob/cb3f2c737e272b0360802d947e388df7e34f50f3/steampak/cli.py#L225-L255 | train |
idlesign/steampak | steampak/cli.py | get_cards_stats | def get_cards_stats(ctx, currency, skip_owned, appid, foil):
"""Prints out price stats for cards available in Steam user inventory."""
username = ctx.obj['username']
cards_by_app = defaultdict(list)
inventory = User(username).traverse_inventory(item_filter=TAG_ITEM_CLASS_CARD)
for item in inventory:
appid_ = item.app.appid
if not appid or appid_ in appid:
cards_by_app[appid_].append(item)
if not cards_by_app:
click.secho('User `%s` has no cards' % username, fg='red', err=True)
return
for appid_, cards in cards_by_app.items():
app = cards[0].app
print_card_prices(
app.appid, currency,
owned_cards=[card.title for card in cards],
skip_owned=skip_owned,
foil=foil,
) | python | def get_cards_stats(ctx, currency, skip_owned, appid, foil):
"""Prints out price stats for cards available in Steam user inventory."""
username = ctx.obj['username']
cards_by_app = defaultdict(list)
inventory = User(username).traverse_inventory(item_filter=TAG_ITEM_CLASS_CARD)
for item in inventory:
appid_ = item.app.appid
if not appid or appid_ in appid:
cards_by_app[appid_].append(item)
if not cards_by_app:
click.secho('User `%s` has no cards' % username, fg='red', err=True)
return
for appid_, cards in cards_by_app.items():
app = cards[0].app
print_card_prices(
app.appid, currency,
owned_cards=[card.title for card in cards],
skip_owned=skip_owned,
foil=foil,
) | [
"def",
"get_cards_stats",
"(",
"ctx",
",",
"currency",
",",
"skip_owned",
",",
"appid",
",",
"foil",
")",
":",
"username",
"=",
"ctx",
".",
"obj",
"[",
"'username'",
"]",
"cards_by_app",
"=",
"defaultdict",
"(",
"list",
")",
"inventory",
"=",
"User",
"(",
"username",
")",
".",
"traverse_inventory",
"(",
"item_filter",
"=",
"TAG_ITEM_CLASS_CARD",
")",
"for",
"item",
"in",
"inventory",
":",
"appid_",
"=",
"item",
".",
"app",
".",
"appid",
"if",
"not",
"appid",
"or",
"appid_",
"in",
"appid",
":",
"cards_by_app",
"[",
"appid_",
"]",
".",
"append",
"(",
"item",
")",
"if",
"not",
"cards_by_app",
":",
"click",
".",
"secho",
"(",
"'User `%s` has no cards'",
"%",
"username",
",",
"fg",
"=",
"'red'",
",",
"err",
"=",
"True",
")",
"return",
"for",
"appid_",
",",
"cards",
"in",
"cards_by_app",
".",
"items",
"(",
")",
":",
"app",
"=",
"cards",
"[",
"0",
"]",
".",
"app",
"print_card_prices",
"(",
"app",
".",
"appid",
",",
"currency",
",",
"owned_cards",
"=",
"[",
"card",
".",
"title",
"for",
"card",
"in",
"cards",
"]",
",",
"skip_owned",
"=",
"skip_owned",
",",
"foil",
"=",
"foil",
",",
")"
] | Prints out price stats for cards available in Steam user inventory. | [
"Prints",
"out",
"price",
"stats",
"for",
"cards",
"available",
"in",
"Steam",
"user",
"inventory",
"."
] | cb3f2c737e272b0360802d947e388df7e34f50f3 | https://github.com/idlesign/steampak/blob/cb3f2c737e272b0360802d947e388df7e34f50f3/steampak/cli.py#L264-L287 | train |
BD2KGenomics/protect | src/protect/mutation_calling/radia.py | run_radia_with_merge | def run_radia_with_merge(job, rna_bam, tumor_bam, normal_bam, univ_options, radia_options):
"""
A wrapper for the the entire RADIA sub-graph.
:param dict rna_bam: Dict dicts of bam and bai for tumor RNA-Seq obtained by running STAR within
ProTECT.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict radia_options: Options specific to RADIA
:return: fsID to the merged RADIA calls
:rtype: toil.fileStore.FileID
"""
spawn = job.wrapJobFn(run_radia, rna_bam['rna_genome'], tumor_bam,
normal_bam, univ_options, radia_options, disk='100M',
memory='100M').encapsulate()
merge = job.wrapJobFn(merge_perchrom_vcfs, spawn.rv(), univ_options, disk='100M', memory='100M')
job.addChild(spawn)
spawn.addChild(merge)
return merge.rv() | python | def run_radia_with_merge(job, rna_bam, tumor_bam, normal_bam, univ_options, radia_options):
"""
A wrapper for the the entire RADIA sub-graph.
:param dict rna_bam: Dict dicts of bam and bai for tumor RNA-Seq obtained by running STAR within
ProTECT.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict radia_options: Options specific to RADIA
:return: fsID to the merged RADIA calls
:rtype: toil.fileStore.FileID
"""
spawn = job.wrapJobFn(run_radia, rna_bam['rna_genome'], tumor_bam,
normal_bam, univ_options, radia_options, disk='100M',
memory='100M').encapsulate()
merge = job.wrapJobFn(merge_perchrom_vcfs, spawn.rv(), univ_options, disk='100M', memory='100M')
job.addChild(spawn)
spawn.addChild(merge)
return merge.rv() | [
"def",
"run_radia_with_merge",
"(",
"job",
",",
"rna_bam",
",",
"tumor_bam",
",",
"normal_bam",
",",
"univ_options",
",",
"radia_options",
")",
":",
"spawn",
"=",
"job",
".",
"wrapJobFn",
"(",
"run_radia",
",",
"rna_bam",
"[",
"'rna_genome'",
"]",
",",
"tumor_bam",
",",
"normal_bam",
",",
"univ_options",
",",
"radia_options",
",",
"disk",
"=",
"'100M'",
",",
"memory",
"=",
"'100M'",
")",
".",
"encapsulate",
"(",
")",
"merge",
"=",
"job",
".",
"wrapJobFn",
"(",
"merge_perchrom_vcfs",
",",
"spawn",
".",
"rv",
"(",
")",
",",
"univ_options",
",",
"disk",
"=",
"'100M'",
",",
"memory",
"=",
"'100M'",
")",
"job",
".",
"addChild",
"(",
"spawn",
")",
"spawn",
".",
"addChild",
"(",
"merge",
")",
"return",
"merge",
".",
"rv",
"(",
")"
] | A wrapper for the the entire RADIA sub-graph.
:param dict rna_bam: Dict dicts of bam and bai for tumor RNA-Seq obtained by running STAR within
ProTECT.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict radia_options: Options specific to RADIA
:return: fsID to the merged RADIA calls
:rtype: toil.fileStore.FileID | [
"A",
"wrapper",
"for",
"the",
"the",
"entire",
"RADIA",
"sub",
"-",
"graph",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/radia.py#L40-L59 | train |
BD2KGenomics/protect | src/protect/mutation_calling/radia.py | run_radia | def run_radia(job, rna_bam, tumor_bam, normal_bam, univ_options, radia_options):
"""
Spawn a RADIA job for each chromosome on the input bam trios.
:param dict rna_bam: Dict of bam and bai for tumor DNA-Seq. It can be one of two formats
rna_bam: # Just the genomic bam and bai
|- 'rna_genome_sorted.bam': fsID
+- 'rna_genome_sorted.bam.bai': fsID
OR
rna_bam: # The output from run_star
|- 'rna_transcriptome.bam': fsID
|- 'rna_genome': # Only this part will be used
|- 'rna_genome_sorted.bam': fsID
+- 'rna_genome_sorted.bam.bai': fsID
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict radia_options: Options specific to RADIA
:return: Dict of results from running RADIA on every chromosome
perchrom_radia:
|- 'chr1': fsID
|- 'chr2' fsID
|
|-...
|
+- 'chrM': fsID
:rtype: dict
"""
if 'rna_genome' in rna_bam.keys():
rna_bam = rna_bam['rna_genome']
elif set(rna_bam.keys()) == {'rna_genome_sorted.bam', 'rna_genome_sorted.bam.bai'}:
pass
else:
raise RuntimeError('An improperly formatted dict was passed to rna_bam.')
bams = {'tumor_rna': rna_bam['rna_genome_sorted.bam'],
'tumor_rnai': rna_bam['rna_genome_sorted.bam.bai'],
'tumor_dna': tumor_bam['tumor_dna_fix_pg_sorted.bam'],
'tumor_dnai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'],
'normal_dna': normal_bam['normal_dna_fix_pg_sorted.bam'],
'normal_dnai': normal_bam['normal_dna_fix_pg_sorted.bam.bai']}
# Get a list of chromosomes to process
if radia_options['chromosomes']:
chromosomes = radia_options['chromosomes']
else:
chromosomes = sample_chromosomes(job, radia_options['genome_fai'])
perchrom_radia = defaultdict()
for chrom in chromosomes:
radia = job.addChildJobFn(run_radia_perchrom, bams, univ_options, radia_options, chrom,
memory='6G',
disk=PromisedRequirement(
radia_disk, tumor_bam['tumor_dna_fix_pg_sorted.bam'],
normal_bam['normal_dna_fix_pg_sorted.bam'],
rna_bam['rna_genome_sorted.bam'],
radia_options['genome_fasta']))
filter_radia = radia.addChildJobFn(run_filter_radia, bams, radia.rv(), univ_options,
radia_options, chrom, memory='6G',
disk=PromisedRequirement(
radia_disk, tumor_bam['tumor_dna_fix_pg_sorted.bam'],
normal_bam['normal_dna_fix_pg_sorted.bam'],
rna_bam['rna_genome_sorted.bam'],
radia_options['genome_fasta']))
perchrom_radia[chrom] = filter_radia.rv()
job.fileStore.logToMaster('Ran spawn_radia on %s successfully' % univ_options['patient'])
return perchrom_radia | python | def run_radia(job, rna_bam, tumor_bam, normal_bam, univ_options, radia_options):
"""
Spawn a RADIA job for each chromosome on the input bam trios.
:param dict rna_bam: Dict of bam and bai for tumor DNA-Seq. It can be one of two formats
rna_bam: # Just the genomic bam and bai
|- 'rna_genome_sorted.bam': fsID
+- 'rna_genome_sorted.bam.bai': fsID
OR
rna_bam: # The output from run_star
|- 'rna_transcriptome.bam': fsID
|- 'rna_genome': # Only this part will be used
|- 'rna_genome_sorted.bam': fsID
+- 'rna_genome_sorted.bam.bai': fsID
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict radia_options: Options specific to RADIA
:return: Dict of results from running RADIA on every chromosome
perchrom_radia:
|- 'chr1': fsID
|- 'chr2' fsID
|
|-...
|
+- 'chrM': fsID
:rtype: dict
"""
if 'rna_genome' in rna_bam.keys():
rna_bam = rna_bam['rna_genome']
elif set(rna_bam.keys()) == {'rna_genome_sorted.bam', 'rna_genome_sorted.bam.bai'}:
pass
else:
raise RuntimeError('An improperly formatted dict was passed to rna_bam.')
bams = {'tumor_rna': rna_bam['rna_genome_sorted.bam'],
'tumor_rnai': rna_bam['rna_genome_sorted.bam.bai'],
'tumor_dna': tumor_bam['tumor_dna_fix_pg_sorted.bam'],
'tumor_dnai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'],
'normal_dna': normal_bam['normal_dna_fix_pg_sorted.bam'],
'normal_dnai': normal_bam['normal_dna_fix_pg_sorted.bam.bai']}
# Get a list of chromosomes to process
if radia_options['chromosomes']:
chromosomes = radia_options['chromosomes']
else:
chromosomes = sample_chromosomes(job, radia_options['genome_fai'])
perchrom_radia = defaultdict()
for chrom in chromosomes:
radia = job.addChildJobFn(run_radia_perchrom, bams, univ_options, radia_options, chrom,
memory='6G',
disk=PromisedRequirement(
radia_disk, tumor_bam['tumor_dna_fix_pg_sorted.bam'],
normal_bam['normal_dna_fix_pg_sorted.bam'],
rna_bam['rna_genome_sorted.bam'],
radia_options['genome_fasta']))
filter_radia = radia.addChildJobFn(run_filter_radia, bams, radia.rv(), univ_options,
radia_options, chrom, memory='6G',
disk=PromisedRequirement(
radia_disk, tumor_bam['tumor_dna_fix_pg_sorted.bam'],
normal_bam['normal_dna_fix_pg_sorted.bam'],
rna_bam['rna_genome_sorted.bam'],
radia_options['genome_fasta']))
perchrom_radia[chrom] = filter_radia.rv()
job.fileStore.logToMaster('Ran spawn_radia on %s successfully' % univ_options['patient'])
return perchrom_radia | [
"def",
"run_radia",
"(",
"job",
",",
"rna_bam",
",",
"tumor_bam",
",",
"normal_bam",
",",
"univ_options",
",",
"radia_options",
")",
":",
"if",
"'rna_genome'",
"in",
"rna_bam",
".",
"keys",
"(",
")",
":",
"rna_bam",
"=",
"rna_bam",
"[",
"'rna_genome'",
"]",
"elif",
"set",
"(",
"rna_bam",
".",
"keys",
"(",
")",
")",
"==",
"{",
"'rna_genome_sorted.bam'",
",",
"'rna_genome_sorted.bam.bai'",
"}",
":",
"pass",
"else",
":",
"raise",
"RuntimeError",
"(",
"'An improperly formatted dict was passed to rna_bam.'",
")",
"bams",
"=",
"{",
"'tumor_rna'",
":",
"rna_bam",
"[",
"'rna_genome_sorted.bam'",
"]",
",",
"'tumor_rnai'",
":",
"rna_bam",
"[",
"'rna_genome_sorted.bam.bai'",
"]",
",",
"'tumor_dna'",
":",
"tumor_bam",
"[",
"'tumor_dna_fix_pg_sorted.bam'",
"]",
",",
"'tumor_dnai'",
":",
"tumor_bam",
"[",
"'tumor_dna_fix_pg_sorted.bam.bai'",
"]",
",",
"'normal_dna'",
":",
"normal_bam",
"[",
"'normal_dna_fix_pg_sorted.bam'",
"]",
",",
"'normal_dnai'",
":",
"normal_bam",
"[",
"'normal_dna_fix_pg_sorted.bam.bai'",
"]",
"}",
"if",
"radia_options",
"[",
"'chromosomes'",
"]",
":",
"chromosomes",
"=",
"radia_options",
"[",
"'chromosomes'",
"]",
"else",
":",
"chromosomes",
"=",
"sample_chromosomes",
"(",
"job",
",",
"radia_options",
"[",
"'genome_fai'",
"]",
")",
"perchrom_radia",
"=",
"defaultdict",
"(",
")",
"for",
"chrom",
"in",
"chromosomes",
":",
"radia",
"=",
"job",
".",
"addChildJobFn",
"(",
"run_radia_perchrom",
",",
"bams",
",",
"univ_options",
",",
"radia_options",
",",
"chrom",
",",
"memory",
"=",
"'6G'",
",",
"disk",
"=",
"PromisedRequirement",
"(",
"radia_disk",
",",
"tumor_bam",
"[",
"'tumor_dna_fix_pg_sorted.bam'",
"]",
",",
"normal_bam",
"[",
"'normal_dna_fix_pg_sorted.bam'",
"]",
",",
"rna_bam",
"[",
"'rna_genome_sorted.bam'",
"]",
",",
"radia_options",
"[",
"'genome_fasta'",
"]",
")",
")",
"filter_radia",
"=",
"radia",
".",
"addChildJobFn",
"(",
"run_filter_radia",
",",
"bams",
",",
"radia",
".",
"rv",
"(",
")",
",",
"univ_options",
",",
"radia_options",
",",
"chrom",
",",
"memory",
"=",
"'6G'",
",",
"disk",
"=",
"PromisedRequirement",
"(",
"radia_disk",
",",
"tumor_bam",
"[",
"'tumor_dna_fix_pg_sorted.bam'",
"]",
",",
"normal_bam",
"[",
"'normal_dna_fix_pg_sorted.bam'",
"]",
",",
"rna_bam",
"[",
"'rna_genome_sorted.bam'",
"]",
",",
"radia_options",
"[",
"'genome_fasta'",
"]",
")",
")",
"perchrom_radia",
"[",
"chrom",
"]",
"=",
"filter_radia",
".",
"rv",
"(",
")",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Ran spawn_radia on %s successfully'",
"%",
"univ_options",
"[",
"'patient'",
"]",
")",
"return",
"perchrom_radia"
] | Spawn a RADIA job for each chromosome on the input bam trios.
:param dict rna_bam: Dict of bam and bai for tumor DNA-Seq. It can be one of two formats
rna_bam: # Just the genomic bam and bai
|- 'rna_genome_sorted.bam': fsID
+- 'rna_genome_sorted.bam.bai': fsID
OR
rna_bam: # The output from run_star
|- 'rna_transcriptome.bam': fsID
|- 'rna_genome': # Only this part will be used
|- 'rna_genome_sorted.bam': fsID
+- 'rna_genome_sorted.bam.bai': fsID
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict radia_options: Options specific to RADIA
:return: Dict of results from running RADIA on every chromosome
perchrom_radia:
|- 'chr1': fsID
|- 'chr2' fsID
|
|-...
|
+- 'chrM': fsID
:rtype: dict | [
"Spawn",
"a",
"RADIA",
"job",
"for",
"each",
"chromosome",
"on",
"the",
"input",
"bam",
"trios",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/radia.py#L62-L126 | train |
BD2KGenomics/protect | src/protect/mutation_calling/radia.py | run_radia_perchrom | def run_radia_perchrom(job, bams, univ_options, radia_options, chrom):
"""
Run RADIA call on a single chromosome in the input bams.
:param dict bams: Dict of bam and bai for tumor DNA-Seq, normal DNA-Seq and tumor RNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict radia_options: Options specific to RADIA
:param str chrom: Chromosome to process
:return: fsID for the chromsome vcf
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
input_files = {
'rna.bam': bams['tumor_rna'],
'rna.bam.bai': bams['tumor_rnai'],
'tumor.bam': bams['tumor_dna'],
'tumor.bam.bai': bams['tumor_dnai'],
'normal.bam': bams['normal_dna'],
'normal.bam.bai': bams['normal_dnai'],
'genome.fa.tar.gz': radia_options['genome_fasta'],
'genome.fa.fai.tar.gz': radia_options['genome_fai']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
for key in ('genome.fa', 'genome.fa.fai'):
input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
input_files = {key: docker_path(path) for key, path in input_files.items()}
radia_output = ''.join([work_dir, '/radia_', chrom, '.vcf'])
radia_log = ''.join([work_dir, '/radia_', chrom, '_radia.log'])
parameters = [univ_options['patient'], # shortID
chrom,
'-n', input_files['normal.bam'],
'-t', input_files['tumor.bam'],
'-r', input_files['rna.bam'],
''.join(['--rnaTumorFasta=', input_files['genome.fa']]),
'-f', input_files['genome.fa'],
'-o', docker_path(radia_output),
'-i', univ_options['ref'],
'-m', input_files['genome.fa'],
'-d', '[email protected]',
'-q', 'Illumina',
'--disease', 'CANCER',
'-l', 'INFO',
'-g', docker_path(radia_log)]
docker_call(tool='radia', tool_parameters=parameters,
work_dir=work_dir, dockerhub=univ_options['dockerhub'],
tool_version=radia_options['version'])
output_file = job.fileStore.writeGlobalFile(radia_output)
job.fileStore.logToMaster('Ran radia on %s:%s successfully' % (univ_options['patient'], chrom))
return output_file | python | def run_radia_perchrom(job, bams, univ_options, radia_options, chrom):
"""
Run RADIA call on a single chromosome in the input bams.
:param dict bams: Dict of bam and bai for tumor DNA-Seq, normal DNA-Seq and tumor RNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict radia_options: Options specific to RADIA
:param str chrom: Chromosome to process
:return: fsID for the chromsome vcf
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
input_files = {
'rna.bam': bams['tumor_rna'],
'rna.bam.bai': bams['tumor_rnai'],
'tumor.bam': bams['tumor_dna'],
'tumor.bam.bai': bams['tumor_dnai'],
'normal.bam': bams['normal_dna'],
'normal.bam.bai': bams['normal_dnai'],
'genome.fa.tar.gz': radia_options['genome_fasta'],
'genome.fa.fai.tar.gz': radia_options['genome_fai']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
for key in ('genome.fa', 'genome.fa.fai'):
input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
input_files = {key: docker_path(path) for key, path in input_files.items()}
radia_output = ''.join([work_dir, '/radia_', chrom, '.vcf'])
radia_log = ''.join([work_dir, '/radia_', chrom, '_radia.log'])
parameters = [univ_options['patient'], # shortID
chrom,
'-n', input_files['normal.bam'],
'-t', input_files['tumor.bam'],
'-r', input_files['rna.bam'],
''.join(['--rnaTumorFasta=', input_files['genome.fa']]),
'-f', input_files['genome.fa'],
'-o', docker_path(radia_output),
'-i', univ_options['ref'],
'-m', input_files['genome.fa'],
'-d', '[email protected]',
'-q', 'Illumina',
'--disease', 'CANCER',
'-l', 'INFO',
'-g', docker_path(radia_log)]
docker_call(tool='radia', tool_parameters=parameters,
work_dir=work_dir, dockerhub=univ_options['dockerhub'],
tool_version=radia_options['version'])
output_file = job.fileStore.writeGlobalFile(radia_output)
job.fileStore.logToMaster('Ran radia on %s:%s successfully' % (univ_options['patient'], chrom))
return output_file | [
"def",
"run_radia_perchrom",
"(",
"job",
",",
"bams",
",",
"univ_options",
",",
"radia_options",
",",
"chrom",
")",
":",
"work_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"input_files",
"=",
"{",
"'rna.bam'",
":",
"bams",
"[",
"'tumor_rna'",
"]",
",",
"'rna.bam.bai'",
":",
"bams",
"[",
"'tumor_rnai'",
"]",
",",
"'tumor.bam'",
":",
"bams",
"[",
"'tumor_dna'",
"]",
",",
"'tumor.bam.bai'",
":",
"bams",
"[",
"'tumor_dnai'",
"]",
",",
"'normal.bam'",
":",
"bams",
"[",
"'normal_dna'",
"]",
",",
"'normal.bam.bai'",
":",
"bams",
"[",
"'normal_dnai'",
"]",
",",
"'genome.fa.tar.gz'",
":",
"radia_options",
"[",
"'genome_fasta'",
"]",
",",
"'genome.fa.fai.tar.gz'",
":",
"radia_options",
"[",
"'genome_fai'",
"]",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"False",
")",
"for",
"key",
"in",
"(",
"'genome.fa'",
",",
"'genome.fa.fai'",
")",
":",
"input_files",
"[",
"key",
"]",
"=",
"untargz",
"(",
"input_files",
"[",
"key",
"+",
"'.tar.gz'",
"]",
",",
"work_dir",
")",
"input_files",
"=",
"{",
"key",
":",
"docker_path",
"(",
"path",
")",
"for",
"key",
",",
"path",
"in",
"input_files",
".",
"items",
"(",
")",
"}",
"radia_output",
"=",
"''",
".",
"join",
"(",
"[",
"work_dir",
",",
"'/radia_'",
",",
"chrom",
",",
"'.vcf'",
"]",
")",
"radia_log",
"=",
"''",
".",
"join",
"(",
"[",
"work_dir",
",",
"'/radia_'",
",",
"chrom",
",",
"'_radia.log'",
"]",
")",
"parameters",
"=",
"[",
"univ_options",
"[",
"'patient'",
"]",
",",
"chrom",
",",
"'-n'",
",",
"input_files",
"[",
"'normal.bam'",
"]",
",",
"'-t'",
",",
"input_files",
"[",
"'tumor.bam'",
"]",
",",
"'-r'",
",",
"input_files",
"[",
"'rna.bam'",
"]",
",",
"''",
".",
"join",
"(",
"[",
"'--rnaTumorFasta='",
",",
"input_files",
"[",
"'genome.fa'",
"]",
"]",
")",
",",
"'-f'",
",",
"input_files",
"[",
"'genome.fa'",
"]",
",",
"'-o'",
",",
"docker_path",
"(",
"radia_output",
")",
",",
"'-i'",
",",
"univ_options",
"[",
"'ref'",
"]",
",",
"'-m'",
",",
"input_files",
"[",
"'genome.fa'",
"]",
",",
"'-d'",
",",
"'[email protected]'",
",",
"'-q'",
",",
"'Illumina'",
",",
"'--disease'",
",",
"'CANCER'",
",",
"'-l'",
",",
"'INFO'",
",",
"'-g'",
",",
"docker_path",
"(",
"radia_log",
")",
"]",
"docker_call",
"(",
"tool",
"=",
"'radia'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
",",
"tool_version",
"=",
"radia_options",
"[",
"'version'",
"]",
")",
"output_file",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"radia_output",
")",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Ran radia on %s:%s successfully'",
"%",
"(",
"univ_options",
"[",
"'patient'",
"]",
",",
"chrom",
")",
")",
"return",
"output_file"
] | Run RADIA call on a single chromosome in the input bams.
:param dict bams: Dict of bam and bai for tumor DNA-Seq, normal DNA-Seq and tumor RNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict radia_options: Options specific to RADIA
:param str chrom: Chromosome to process
:return: fsID for the chromsome vcf
:rtype: toil.fileStore.FileID | [
"Run",
"RADIA",
"call",
"on",
"a",
"single",
"chromosome",
"in",
"the",
"input",
"bams",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/radia.py#L129-L178 | train |
BD2KGenomics/protect | src/protect/mutation_calling/radia.py | run_filter_radia | def run_filter_radia(job, bams, radia_file, univ_options, radia_options, chrom):
"""
Run filterradia on the RADIA output.
:param dict bams: Dict of bam and bai for tumor DNA-Seq, normal DNA-Seq and tumor RNA-Seq
:param toil.fileStore.FileID radia_file: The vcf from runnning RADIA
:param dict univ_options: Dict of universal options used by almost all tools
:param dict radia_options: Options specific to RADIA
:param str chrom: Chromosome to process
:return: fsID for the filtered chromsome vcf
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
input_files = {
'rna.bam': bams['tumor_rna'],
'rna.bam.bai': bams['tumor_rnai'],
'tumor.bam': bams['tumor_dna'],
'tumor.bam.bai': bams['tumor_dnai'],
'normal.bam': bams['normal_dna'],
'normal.bam.bai': bams['normal_dnai'],
'radia.vcf': radia_file,
'genome.fa.tar.gz': radia_options['genome_fasta'],
'genome.fa.fai.tar.gz': radia_options['genome_fai'],
'cosmic_beds': radia_options['cosmic_beds'],
'dbsnp_beds': radia_options['dbsnp_beds'],
'retrogene_beds': radia_options['retrogene_beds'],
'pseudogene_beds': radia_options['pseudogene_beds'],
'gencode_beds': radia_options['gencode_beds']
}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
for key in ('genome.fa', 'genome.fa.fai'):
input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
for key in ('cosmic_beds', 'dbsnp_beds', 'retrogene_beds', 'pseudogene_beds', 'gencode_beds'):
input_files[key] = untargz(input_files[key], work_dir)
input_files = {key: docker_path(path) for key, path in input_files.items()}
filterradia_log = ''.join([work_dir, '/radia_filtered_', chrom, '_radia.log'])
parameters = [univ_options['patient'], # shortID
chrom.lstrip('chr'),
input_files['radia.vcf'],
'/data',
'/home/radia/scripts',
'-d', input_files['dbsnp_beds'],
'-r', input_files['retrogene_beds'],
'-p', input_files['pseudogene_beds'],
'-c', input_files['cosmic_beds'],
'-t', input_files['gencode_beds'],
'--noSnpEff',
'--noBlacklist',
'--noTargets',
'--noRnaBlacklist',
'-f', input_files['genome.fa'],
'--log=INFO',
'-g', docker_path(filterradia_log)]
docker_call(tool='filterradia',
tool_parameters=parameters, work_dir=work_dir, dockerhub=univ_options['dockerhub'],
tool_version=radia_options['version'])
output_file = ''.join([work_dir, '/', chrom, '.vcf'])
os.rename(''.join([work_dir, '/', univ_options['patient'], '_', chrom, '.vcf']), output_file)
output_fsid = job.fileStore.writeGlobalFile(output_file)
export_results(job, output_fsid, output_file, univ_options, subfolder='mutations/radia')
job.fileStore.logToMaster('Ran filter-radia on %s:%s successfully'
% (univ_options['patient'], chrom))
return output_fsid | python | def run_filter_radia(job, bams, radia_file, univ_options, radia_options, chrom):
"""
Run filterradia on the RADIA output.
:param dict bams: Dict of bam and bai for tumor DNA-Seq, normal DNA-Seq and tumor RNA-Seq
:param toil.fileStore.FileID radia_file: The vcf from runnning RADIA
:param dict univ_options: Dict of universal options used by almost all tools
:param dict radia_options: Options specific to RADIA
:param str chrom: Chromosome to process
:return: fsID for the filtered chromsome vcf
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
input_files = {
'rna.bam': bams['tumor_rna'],
'rna.bam.bai': bams['tumor_rnai'],
'tumor.bam': bams['tumor_dna'],
'tumor.bam.bai': bams['tumor_dnai'],
'normal.bam': bams['normal_dna'],
'normal.bam.bai': bams['normal_dnai'],
'radia.vcf': radia_file,
'genome.fa.tar.gz': radia_options['genome_fasta'],
'genome.fa.fai.tar.gz': radia_options['genome_fai'],
'cosmic_beds': radia_options['cosmic_beds'],
'dbsnp_beds': radia_options['dbsnp_beds'],
'retrogene_beds': radia_options['retrogene_beds'],
'pseudogene_beds': radia_options['pseudogene_beds'],
'gencode_beds': radia_options['gencode_beds']
}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
for key in ('genome.fa', 'genome.fa.fai'):
input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
for key in ('cosmic_beds', 'dbsnp_beds', 'retrogene_beds', 'pseudogene_beds', 'gencode_beds'):
input_files[key] = untargz(input_files[key], work_dir)
input_files = {key: docker_path(path) for key, path in input_files.items()}
filterradia_log = ''.join([work_dir, '/radia_filtered_', chrom, '_radia.log'])
parameters = [univ_options['patient'], # shortID
chrom.lstrip('chr'),
input_files['radia.vcf'],
'/data',
'/home/radia/scripts',
'-d', input_files['dbsnp_beds'],
'-r', input_files['retrogene_beds'],
'-p', input_files['pseudogene_beds'],
'-c', input_files['cosmic_beds'],
'-t', input_files['gencode_beds'],
'--noSnpEff',
'--noBlacklist',
'--noTargets',
'--noRnaBlacklist',
'-f', input_files['genome.fa'],
'--log=INFO',
'-g', docker_path(filterradia_log)]
docker_call(tool='filterradia',
tool_parameters=parameters, work_dir=work_dir, dockerhub=univ_options['dockerhub'],
tool_version=radia_options['version'])
output_file = ''.join([work_dir, '/', chrom, '.vcf'])
os.rename(''.join([work_dir, '/', univ_options['patient'], '_', chrom, '.vcf']), output_file)
output_fsid = job.fileStore.writeGlobalFile(output_file)
export_results(job, output_fsid, output_file, univ_options, subfolder='mutations/radia')
job.fileStore.logToMaster('Ran filter-radia on %s:%s successfully'
% (univ_options['patient'], chrom))
return output_fsid | [
"def",
"run_filter_radia",
"(",
"job",
",",
"bams",
",",
"radia_file",
",",
"univ_options",
",",
"radia_options",
",",
"chrom",
")",
":",
"work_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"input_files",
"=",
"{",
"'rna.bam'",
":",
"bams",
"[",
"'tumor_rna'",
"]",
",",
"'rna.bam.bai'",
":",
"bams",
"[",
"'tumor_rnai'",
"]",
",",
"'tumor.bam'",
":",
"bams",
"[",
"'tumor_dna'",
"]",
",",
"'tumor.bam.bai'",
":",
"bams",
"[",
"'tumor_dnai'",
"]",
",",
"'normal.bam'",
":",
"bams",
"[",
"'normal_dna'",
"]",
",",
"'normal.bam.bai'",
":",
"bams",
"[",
"'normal_dnai'",
"]",
",",
"'radia.vcf'",
":",
"radia_file",
",",
"'genome.fa.tar.gz'",
":",
"radia_options",
"[",
"'genome_fasta'",
"]",
",",
"'genome.fa.fai.tar.gz'",
":",
"radia_options",
"[",
"'genome_fai'",
"]",
",",
"'cosmic_beds'",
":",
"radia_options",
"[",
"'cosmic_beds'",
"]",
",",
"'dbsnp_beds'",
":",
"radia_options",
"[",
"'dbsnp_beds'",
"]",
",",
"'retrogene_beds'",
":",
"radia_options",
"[",
"'retrogene_beds'",
"]",
",",
"'pseudogene_beds'",
":",
"radia_options",
"[",
"'pseudogene_beds'",
"]",
",",
"'gencode_beds'",
":",
"radia_options",
"[",
"'gencode_beds'",
"]",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"False",
")",
"for",
"key",
"in",
"(",
"'genome.fa'",
",",
"'genome.fa.fai'",
")",
":",
"input_files",
"[",
"key",
"]",
"=",
"untargz",
"(",
"input_files",
"[",
"key",
"+",
"'.tar.gz'",
"]",
",",
"work_dir",
")",
"for",
"key",
"in",
"(",
"'cosmic_beds'",
",",
"'dbsnp_beds'",
",",
"'retrogene_beds'",
",",
"'pseudogene_beds'",
",",
"'gencode_beds'",
")",
":",
"input_files",
"[",
"key",
"]",
"=",
"untargz",
"(",
"input_files",
"[",
"key",
"]",
",",
"work_dir",
")",
"input_files",
"=",
"{",
"key",
":",
"docker_path",
"(",
"path",
")",
"for",
"key",
",",
"path",
"in",
"input_files",
".",
"items",
"(",
")",
"}",
"filterradia_log",
"=",
"''",
".",
"join",
"(",
"[",
"work_dir",
",",
"'/radia_filtered_'",
",",
"chrom",
",",
"'_radia.log'",
"]",
")",
"parameters",
"=",
"[",
"univ_options",
"[",
"'patient'",
"]",
",",
"chrom",
".",
"lstrip",
"(",
"'chr'",
")",
",",
"input_files",
"[",
"'radia.vcf'",
"]",
",",
"'/data'",
",",
"'/home/radia/scripts'",
",",
"'-d'",
",",
"input_files",
"[",
"'dbsnp_beds'",
"]",
",",
"'-r'",
",",
"input_files",
"[",
"'retrogene_beds'",
"]",
",",
"'-p'",
",",
"input_files",
"[",
"'pseudogene_beds'",
"]",
",",
"'-c'",
",",
"input_files",
"[",
"'cosmic_beds'",
"]",
",",
"'-t'",
",",
"input_files",
"[",
"'gencode_beds'",
"]",
",",
"'--noSnpEff'",
",",
"'--noBlacklist'",
",",
"'--noTargets'",
",",
"'--noRnaBlacklist'",
",",
"'-f'",
",",
"input_files",
"[",
"'genome.fa'",
"]",
",",
"'--log=INFO'",
",",
"'-g'",
",",
"docker_path",
"(",
"filterradia_log",
")",
"]",
"docker_call",
"(",
"tool",
"=",
"'filterradia'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
",",
"tool_version",
"=",
"radia_options",
"[",
"'version'",
"]",
")",
"output_file",
"=",
"''",
".",
"join",
"(",
"[",
"work_dir",
",",
"'/'",
",",
"chrom",
",",
"'.vcf'",
"]",
")",
"os",
".",
"rename",
"(",
"''",
".",
"join",
"(",
"[",
"work_dir",
",",
"'/'",
",",
"univ_options",
"[",
"'patient'",
"]",
",",
"'_'",
",",
"chrom",
",",
"'.vcf'",
"]",
")",
",",
"output_file",
")",
"output_fsid",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"output_file",
")",
"export_results",
"(",
"job",
",",
"output_fsid",
",",
"output_file",
",",
"univ_options",
",",
"subfolder",
"=",
"'mutations/radia'",
")",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Ran filter-radia on %s:%s successfully'",
"%",
"(",
"univ_options",
"[",
"'patient'",
"]",
",",
"chrom",
")",
")",
"return",
"output_fsid"
] | Run filterradia on the RADIA output.
:param dict bams: Dict of bam and bai for tumor DNA-Seq, normal DNA-Seq and tumor RNA-Seq
:param toil.fileStore.FileID radia_file: The vcf from runnning RADIA
:param dict univ_options: Dict of universal options used by almost all tools
:param dict radia_options: Options specific to RADIA
:param str chrom: Chromosome to process
:return: fsID for the filtered chromsome vcf
:rtype: toil.fileStore.FileID | [
"Run",
"filterradia",
"on",
"the",
"RADIA",
"output",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/radia.py#L181-L246 | train |
BD2KGenomics/protect | src/protect/alignment/common.py | index_bamfile | def index_bamfile(job, bamfile, sample_type, univ_options, samtools_options, sample_info=None,
export=True):
"""
Index `bamfile` using samtools
:param toil.fileStore.FileID bamfile: fsID for the bam file
:param str sample_type: Description of the sample to inject into the filename
:param dict univ_options: Dict of universal options used by almost all tools
:param dict samtools_options: Options specific to samtools
:param str sample_info: Information regarding the sample that will beinjected into the filename
as `sample_type`_`sample_info`.bam(.bai)
:param bool export: Should the bam and bai be exported to the output directory?
:return: Dict containing input bam and the generated index (.bam.bai)
output_files:
|- '<sample_type>(_<sample_info>).bam': fsID
+- '<sample_type>(_<sample_info>).bam.bai': fsID
:rtype: dict
"""
work_dir = os.getcwd()
in_bamfile = sample_type
if sample_info is not None:
assert isinstance(sample_info, str)
in_bamfile = '_'.join([in_bamfile, sample_info])
in_bamfile += '.bam'
input_files = {
in_bamfile: bamfile}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
parameters = ['index',
input_files[in_bamfile]]
docker_call(tool='samtools', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=samtools_options['version'])
out_bai = '/'.join([work_dir, in_bamfile + '.bai'])
output_files = {in_bamfile: bamfile,
in_bamfile + '.bai': job.fileStore.writeGlobalFile(out_bai)}
if export:
export_results(job, bamfile, os.path.splitext(out_bai)[0], univ_options,
subfolder='alignments')
export_results(job, output_files[in_bamfile + '.bai'], out_bai, univ_options,
subfolder='alignments')
job.fileStore.logToMaster('Ran samtools-index on %s:%s successfully'
% (univ_options['patient'], sample_type))
return output_files | python | def index_bamfile(job, bamfile, sample_type, univ_options, samtools_options, sample_info=None,
export=True):
"""
Index `bamfile` using samtools
:param toil.fileStore.FileID bamfile: fsID for the bam file
:param str sample_type: Description of the sample to inject into the filename
:param dict univ_options: Dict of universal options used by almost all tools
:param dict samtools_options: Options specific to samtools
:param str sample_info: Information regarding the sample that will beinjected into the filename
as `sample_type`_`sample_info`.bam(.bai)
:param bool export: Should the bam and bai be exported to the output directory?
:return: Dict containing input bam and the generated index (.bam.bai)
output_files:
|- '<sample_type>(_<sample_info>).bam': fsID
+- '<sample_type>(_<sample_info>).bam.bai': fsID
:rtype: dict
"""
work_dir = os.getcwd()
in_bamfile = sample_type
if sample_info is not None:
assert isinstance(sample_info, str)
in_bamfile = '_'.join([in_bamfile, sample_info])
in_bamfile += '.bam'
input_files = {
in_bamfile: bamfile}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
parameters = ['index',
input_files[in_bamfile]]
docker_call(tool='samtools', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=samtools_options['version'])
out_bai = '/'.join([work_dir, in_bamfile + '.bai'])
output_files = {in_bamfile: bamfile,
in_bamfile + '.bai': job.fileStore.writeGlobalFile(out_bai)}
if export:
export_results(job, bamfile, os.path.splitext(out_bai)[0], univ_options,
subfolder='alignments')
export_results(job, output_files[in_bamfile + '.bai'], out_bai, univ_options,
subfolder='alignments')
job.fileStore.logToMaster('Ran samtools-index on %s:%s successfully'
% (univ_options['patient'], sample_type))
return output_files | [
"def",
"index_bamfile",
"(",
"job",
",",
"bamfile",
",",
"sample_type",
",",
"univ_options",
",",
"samtools_options",
",",
"sample_info",
"=",
"None",
",",
"export",
"=",
"True",
")",
":",
"work_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"in_bamfile",
"=",
"sample_type",
"if",
"sample_info",
"is",
"not",
"None",
":",
"assert",
"isinstance",
"(",
"sample_info",
",",
"str",
")",
"in_bamfile",
"=",
"'_'",
".",
"join",
"(",
"[",
"in_bamfile",
",",
"sample_info",
"]",
")",
"in_bamfile",
"+=",
"'.bam'",
"input_files",
"=",
"{",
"in_bamfile",
":",
"bamfile",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"True",
")",
"parameters",
"=",
"[",
"'index'",
",",
"input_files",
"[",
"in_bamfile",
"]",
"]",
"docker_call",
"(",
"tool",
"=",
"'samtools'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
",",
"tool_version",
"=",
"samtools_options",
"[",
"'version'",
"]",
")",
"out_bai",
"=",
"'/'",
".",
"join",
"(",
"[",
"work_dir",
",",
"in_bamfile",
"+",
"'.bai'",
"]",
")",
"output_files",
"=",
"{",
"in_bamfile",
":",
"bamfile",
",",
"in_bamfile",
"+",
"'.bai'",
":",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"out_bai",
")",
"}",
"if",
"export",
":",
"export_results",
"(",
"job",
",",
"bamfile",
",",
"os",
".",
"path",
".",
"splitext",
"(",
"out_bai",
")",
"[",
"0",
"]",
",",
"univ_options",
",",
"subfolder",
"=",
"'alignments'",
")",
"export_results",
"(",
"job",
",",
"output_files",
"[",
"in_bamfile",
"+",
"'.bai'",
"]",
",",
"out_bai",
",",
"univ_options",
",",
"subfolder",
"=",
"'alignments'",
")",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Ran samtools-index on %s:%s successfully'",
"%",
"(",
"univ_options",
"[",
"'patient'",
"]",
",",
"sample_type",
")",
")",
"return",
"output_files"
] | Index `bamfile` using samtools
:param toil.fileStore.FileID bamfile: fsID for the bam file
:param str sample_type: Description of the sample to inject into the filename
:param dict univ_options: Dict of universal options used by almost all tools
:param dict samtools_options: Options specific to samtools
:param str sample_info: Information regarding the sample that will beinjected into the filename
as `sample_type`_`sample_info`.bam(.bai)
:param bool export: Should the bam and bai be exported to the output directory?
:return: Dict containing input bam and the generated index (.bam.bai)
output_files:
|- '<sample_type>(_<sample_info>).bam': fsID
+- '<sample_type>(_<sample_info>).bam.bai': fsID
:rtype: dict | [
"Index",
"bamfile",
"using",
"samtools"
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/alignment/common.py#L33-L74 | train |
BD2KGenomics/protect | src/protect/alignment/common.py | sort_bamfile | def sort_bamfile(job, bamfile, sample_type, univ_options, samtools_options):
"""
Sort `bamfile` using samtools
:param toil.fileStore.FileID bamfile: fsID for the bam file
:param str sample_type: Description of the sample to inject into the filename
:param dict univ_options: Dict of universal options used by almost all tools
:param dict samtools_options: Options specific to samtools
:return: fsID for the sorted bamfile
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
in_bamfile = ''.join([sample_type, '.bam'])
out_bamfile = '_'.join([sample_type, 'sorted.bam'])
input_files = {
in_bamfile: bamfile}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
parameters = ['sort',
'-o', docker_path(out_bamfile),
'-O', 'bam',
'-T', 'temp_sorted',
'-@', str(samtools_options['n']),
input_files[in_bamfile]]
docker_call(tool='samtools', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=samtools_options['version'])
job.fileStore.deleteGlobalFile(bamfile)
job.fileStore.logToMaster('Ran samtools-sort on %s:%s successfully'
% (univ_options['patient'], sample_type))
return job.fileStore.writeGlobalFile(out_bamfile) | python | def sort_bamfile(job, bamfile, sample_type, univ_options, samtools_options):
"""
Sort `bamfile` using samtools
:param toil.fileStore.FileID bamfile: fsID for the bam file
:param str sample_type: Description of the sample to inject into the filename
:param dict univ_options: Dict of universal options used by almost all tools
:param dict samtools_options: Options specific to samtools
:return: fsID for the sorted bamfile
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
in_bamfile = ''.join([sample_type, '.bam'])
out_bamfile = '_'.join([sample_type, 'sorted.bam'])
input_files = {
in_bamfile: bamfile}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
parameters = ['sort',
'-o', docker_path(out_bamfile),
'-O', 'bam',
'-T', 'temp_sorted',
'-@', str(samtools_options['n']),
input_files[in_bamfile]]
docker_call(tool='samtools', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=samtools_options['version'])
job.fileStore.deleteGlobalFile(bamfile)
job.fileStore.logToMaster('Ran samtools-sort on %s:%s successfully'
% (univ_options['patient'], sample_type))
return job.fileStore.writeGlobalFile(out_bamfile) | [
"def",
"sort_bamfile",
"(",
"job",
",",
"bamfile",
",",
"sample_type",
",",
"univ_options",
",",
"samtools_options",
")",
":",
"work_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"in_bamfile",
"=",
"''",
".",
"join",
"(",
"[",
"sample_type",
",",
"'.bam'",
"]",
")",
"out_bamfile",
"=",
"'_'",
".",
"join",
"(",
"[",
"sample_type",
",",
"'sorted.bam'",
"]",
")",
"input_files",
"=",
"{",
"in_bamfile",
":",
"bamfile",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"True",
")",
"parameters",
"=",
"[",
"'sort'",
",",
"'-o'",
",",
"docker_path",
"(",
"out_bamfile",
")",
",",
"'-O'",
",",
"'bam'",
",",
"'-T'",
",",
"'temp_sorted'",
",",
"'-@'",
",",
"str",
"(",
"samtools_options",
"[",
"'n'",
"]",
")",
",",
"input_files",
"[",
"in_bamfile",
"]",
"]",
"docker_call",
"(",
"tool",
"=",
"'samtools'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
",",
"tool_version",
"=",
"samtools_options",
"[",
"'version'",
"]",
")",
"job",
".",
"fileStore",
".",
"deleteGlobalFile",
"(",
"bamfile",
")",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Ran samtools-sort on %s:%s successfully'",
"%",
"(",
"univ_options",
"[",
"'patient'",
"]",
",",
"sample_type",
")",
")",
"return",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"out_bamfile",
")"
] | Sort `bamfile` using samtools
:param toil.fileStore.FileID bamfile: fsID for the bam file
:param str sample_type: Description of the sample to inject into the filename
:param dict univ_options: Dict of universal options used by almost all tools
:param dict samtools_options: Options specific to samtools
:return: fsID for the sorted bamfile
:rtype: toil.fileStore.FileID | [
"Sort",
"bamfile",
"using",
"samtools"
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/alignment/common.py#L77-L105 | train |
inveniosoftware/invenio-access | invenio_access/utils.py | get_identity | def get_identity(user):
"""Create an identity for a given user instance.
Primarily useful for testing.
"""
identity = Identity(user.id)
if hasattr(user, 'id'):
identity.provides.add(UserNeed(user.id))
for role in getattr(user, 'roles', []):
identity.provides.add(RoleNeed(role.name))
identity.user = user
return identity | python | def get_identity(user):
"""Create an identity for a given user instance.
Primarily useful for testing.
"""
identity = Identity(user.id)
if hasattr(user, 'id'):
identity.provides.add(UserNeed(user.id))
for role in getattr(user, 'roles', []):
identity.provides.add(RoleNeed(role.name))
identity.user = user
return identity | [
"def",
"get_identity",
"(",
"user",
")",
":",
"identity",
"=",
"Identity",
"(",
"user",
".",
"id",
")",
"if",
"hasattr",
"(",
"user",
",",
"'id'",
")",
":",
"identity",
".",
"provides",
".",
"add",
"(",
"UserNeed",
"(",
"user",
".",
"id",
")",
")",
"for",
"role",
"in",
"getattr",
"(",
"user",
",",
"'roles'",
",",
"[",
"]",
")",
":",
"identity",
".",
"provides",
".",
"add",
"(",
"RoleNeed",
"(",
"role",
".",
"name",
")",
")",
"identity",
".",
"user",
"=",
"user",
"return",
"identity"
] | Create an identity for a given user instance.
Primarily useful for testing. | [
"Create",
"an",
"identity",
"for",
"a",
"given",
"user",
"instance",
"."
] | 3b033a4bdc110eb2f7e9f08f0744a780884bfc80 | https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/invenio_access/utils.py#L16-L30 | train |
adfinis-sygroup/freeze | freeze/xfreeze.py | object_to_items | def object_to_items(data_structure):
"""Converts a object to a items list respecting also slots.
Use dict(object_to_items(obj)) to get a dictionary."""
items = []
# Get all items from dict
try:
items = list(data_structure.__dict__.items())
except:
pass
# Get all slots
hierarchy = [data_structure]
try:
hierarchy += inspect.getmro(data_structure)
except:
pass
slots = []
try:
for b in hierarchy:
try:
slots += b.__slots__
except: # pragma: no cover
pass
except: # pragma: no cover
pass
# Get attrs from slots
for x in slots:
items.append((x, getattr(data_structure, x)))
return items | python | def object_to_items(data_structure):
"""Converts a object to a items list respecting also slots.
Use dict(object_to_items(obj)) to get a dictionary."""
items = []
# Get all items from dict
try:
items = list(data_structure.__dict__.items())
except:
pass
# Get all slots
hierarchy = [data_structure]
try:
hierarchy += inspect.getmro(data_structure)
except:
pass
slots = []
try:
for b in hierarchy:
try:
slots += b.__slots__
except: # pragma: no cover
pass
except: # pragma: no cover
pass
# Get attrs from slots
for x in slots:
items.append((x, getattr(data_structure, x)))
return items | [
"def",
"object_to_items",
"(",
"data_structure",
")",
":",
"items",
"=",
"[",
"]",
"try",
":",
"items",
"=",
"list",
"(",
"data_structure",
".",
"__dict__",
".",
"items",
"(",
")",
")",
"except",
":",
"pass",
"hierarchy",
"=",
"[",
"data_structure",
"]",
"try",
":",
"hierarchy",
"+=",
"inspect",
".",
"getmro",
"(",
"data_structure",
")",
"except",
":",
"pass",
"slots",
"=",
"[",
"]",
"try",
":",
"for",
"b",
"in",
"hierarchy",
":",
"try",
":",
"slots",
"+=",
"b",
".",
"__slots__",
"except",
":",
"pass",
"except",
":",
"pass",
"for",
"x",
"in",
"slots",
":",
"items",
".",
"append",
"(",
"(",
"x",
",",
"getattr",
"(",
"data_structure",
",",
"x",
")",
")",
")",
"return",
"items"
] | Converts a object to a items list respecting also slots.
Use dict(object_to_items(obj)) to get a dictionary. | [
"Converts",
"a",
"object",
"to",
"a",
"items",
"list",
"respecting",
"also",
"slots",
"."
] | 61b4fab8a90ed76d685448723baaa57e2bbd5ef9 | https://github.com/adfinis-sygroup/freeze/blob/61b4fab8a90ed76d685448723baaa57e2bbd5ef9/freeze/xfreeze.py#L251-L279 | train |
adfinis-sygroup/freeze | freeze/xfreeze.py | recursive_sort | def recursive_sort(data_structure):
"""Sort a recursive data_structure.
:param data_structure: The structure to convert.
data_structure must be already sortable or you must use freeze() or dump().
The function will work with many kinds of input. Dictionaries will be
converted to lists of tuples.
>>> _py2_to_py3(vformat(recursive_sort(dump(
... [3, 1, {'c' : 'c', 'a' : 'b', 'b' : 'a'}]
... ))))
(["<class 'dict'>",
(('a',
'b'),
('b',
'a'),
('c',
'c'))],
1,
3)
>>> recursive_sort([3, 1, {'c' : 'c', 'a' : 'b', 'b' : 'a'}])
((('a', 'b'), ('b', 'a'), ('c', 'c')), 1, 3)
>>> recursive_sort(_TestClass())
(('a', 'huhu'),)
"""
# We don't sory primitve types
if not isinstance(data_structure, _primitive_types):
is_meta = isinstance(data_structure, Meta)
was_dict = isinstance(data_structure, WasDict)
if not (is_meta or was_dict):
was_dict = isinstance(data_structure, dict)
if not was_dict:
# Dictize if possible (support objects)
try:
data_structure = data_structure.__dict__
was_dict = True
except:
pass
# Itemize if possible
try:
data_structure = data_structure.items()
except:
pass
tlen = -1
# If item has a length we sort it
try:
tlen = len(data_structure)
except: # pragma: no cover
pass
if tlen != -1:
# Well there are classes out in the wild that answer to len
# but have no indexer.
try:
if was_dict:
return tuple(sorted(
[
(
recursive_sort(x[0]),
recursive_sort(x[1]),
)
for x in data_structure
],
key=TraversalBasedReprCompare
))
elif is_meta:
return data_structure[0:-1] + [
recursive_sort(
data_structure[-1]
)
]
else:
return tuple(sorted(
[recursive_sort(
x,
) for x in data_structure],
key=TraversalBasedReprCompare,
))
except: # pragma: no cover
pass
return data_structure | python | def recursive_sort(data_structure):
"""Sort a recursive data_structure.
:param data_structure: The structure to convert.
data_structure must be already sortable or you must use freeze() or dump().
The function will work with many kinds of input. Dictionaries will be
converted to lists of tuples.
>>> _py2_to_py3(vformat(recursive_sort(dump(
... [3, 1, {'c' : 'c', 'a' : 'b', 'b' : 'a'}]
... ))))
(["<class 'dict'>",
(('a',
'b'),
('b',
'a'),
('c',
'c'))],
1,
3)
>>> recursive_sort([3, 1, {'c' : 'c', 'a' : 'b', 'b' : 'a'}])
((('a', 'b'), ('b', 'a'), ('c', 'c')), 1, 3)
>>> recursive_sort(_TestClass())
(('a', 'huhu'),)
"""
# We don't sory primitve types
if not isinstance(data_structure, _primitive_types):
is_meta = isinstance(data_structure, Meta)
was_dict = isinstance(data_structure, WasDict)
if not (is_meta or was_dict):
was_dict = isinstance(data_structure, dict)
if not was_dict:
# Dictize if possible (support objects)
try:
data_structure = data_structure.__dict__
was_dict = True
except:
pass
# Itemize if possible
try:
data_structure = data_structure.items()
except:
pass
tlen = -1
# If item has a length we sort it
try:
tlen = len(data_structure)
except: # pragma: no cover
pass
if tlen != -1:
# Well there are classes out in the wild that answer to len
# but have no indexer.
try:
if was_dict:
return tuple(sorted(
[
(
recursive_sort(x[0]),
recursive_sort(x[1]),
)
for x in data_structure
],
key=TraversalBasedReprCompare
))
elif is_meta:
return data_structure[0:-1] + [
recursive_sort(
data_structure[-1]
)
]
else:
return tuple(sorted(
[recursive_sort(
x,
) for x in data_structure],
key=TraversalBasedReprCompare,
))
except: # pragma: no cover
pass
return data_structure | [
"def",
"recursive_sort",
"(",
"data_structure",
")",
":",
"if",
"not",
"isinstance",
"(",
"data_structure",
",",
"_primitive_types",
")",
":",
"is_meta",
"=",
"isinstance",
"(",
"data_structure",
",",
"Meta",
")",
"was_dict",
"=",
"isinstance",
"(",
"data_structure",
",",
"WasDict",
")",
"if",
"not",
"(",
"is_meta",
"or",
"was_dict",
")",
":",
"was_dict",
"=",
"isinstance",
"(",
"data_structure",
",",
"dict",
")",
"if",
"not",
"was_dict",
":",
"try",
":",
"data_structure",
"=",
"data_structure",
".",
"__dict__",
"was_dict",
"=",
"True",
"except",
":",
"pass",
"try",
":",
"data_structure",
"=",
"data_structure",
".",
"items",
"(",
")",
"except",
":",
"pass",
"tlen",
"=",
"-",
"1",
"try",
":",
"tlen",
"=",
"len",
"(",
"data_structure",
")",
"except",
":",
"pass",
"if",
"tlen",
"!=",
"-",
"1",
":",
"try",
":",
"if",
"was_dict",
":",
"return",
"tuple",
"(",
"sorted",
"(",
"[",
"(",
"recursive_sort",
"(",
"x",
"[",
"0",
"]",
")",
",",
"recursive_sort",
"(",
"x",
"[",
"1",
"]",
")",
",",
")",
"for",
"x",
"in",
"data_structure",
"]",
",",
"key",
"=",
"TraversalBasedReprCompare",
")",
")",
"elif",
"is_meta",
":",
"return",
"data_structure",
"[",
"0",
":",
"-",
"1",
"]",
"+",
"[",
"recursive_sort",
"(",
"data_structure",
"[",
"-",
"1",
"]",
")",
"]",
"else",
":",
"return",
"tuple",
"(",
"sorted",
"(",
"[",
"recursive_sort",
"(",
"x",
",",
")",
"for",
"x",
"in",
"data_structure",
"]",
",",
"key",
"=",
"TraversalBasedReprCompare",
",",
")",
")",
"except",
":",
"pass",
"return",
"data_structure"
] | Sort a recursive data_structure.
:param data_structure: The structure to convert.
data_structure must be already sortable or you must use freeze() or dump().
The function will work with many kinds of input. Dictionaries will be
converted to lists of tuples.
>>> _py2_to_py3(vformat(recursive_sort(dump(
... [3, 1, {'c' : 'c', 'a' : 'b', 'b' : 'a'}]
... ))))
(["<class 'dict'>",
(('a',
'b'),
('b',
'a'),
('c',
'c'))],
1,
3)
>>> recursive_sort([3, 1, {'c' : 'c', 'a' : 'b', 'b' : 'a'}])
((('a', 'b'), ('b', 'a'), ('c', 'c')), 1, 3)
>>> recursive_sort(_TestClass())
(('a', 'huhu'),) | [
"Sort",
"a",
"recursive",
"data_structure",
"."
] | 61b4fab8a90ed76d685448723baaa57e2bbd5ef9 | https://github.com/adfinis-sygroup/freeze/blob/61b4fab8a90ed76d685448723baaa57e2bbd5ef9/freeze/xfreeze.py#L531-L611 | train |
adfinis-sygroup/freeze | freeze/xfreeze.py | traverse_frozen_data | def traverse_frozen_data(data_structure):
"""Yields the leaves of the frozen data-structure pre-order.
It will produce the same order as one would write the data-structure."""
parent_stack = [data_structure]
while parent_stack:
node = parent_stack.pop(0)
# We don't iterate strings
tlen = -1
if not isinstance(node, _string_types):
# If item has a length we freeze it
try:
tlen = len(node)
except:
pass
if tlen == -1:
yield node
else:
parent_stack = list(node) + parent_stack | python | def traverse_frozen_data(data_structure):
"""Yields the leaves of the frozen data-structure pre-order.
It will produce the same order as one would write the data-structure."""
parent_stack = [data_structure]
while parent_stack:
node = parent_stack.pop(0)
# We don't iterate strings
tlen = -1
if not isinstance(node, _string_types):
# If item has a length we freeze it
try:
tlen = len(node)
except:
pass
if tlen == -1:
yield node
else:
parent_stack = list(node) + parent_stack | [
"def",
"traverse_frozen_data",
"(",
"data_structure",
")",
":",
"parent_stack",
"=",
"[",
"data_structure",
"]",
"while",
"parent_stack",
":",
"node",
"=",
"parent_stack",
".",
"pop",
"(",
"0",
")",
"tlen",
"=",
"-",
"1",
"if",
"not",
"isinstance",
"(",
"node",
",",
"_string_types",
")",
":",
"try",
":",
"tlen",
"=",
"len",
"(",
"node",
")",
"except",
":",
"pass",
"if",
"tlen",
"==",
"-",
"1",
":",
"yield",
"node",
"else",
":",
"parent_stack",
"=",
"list",
"(",
"node",
")",
"+",
"parent_stack"
] | Yields the leaves of the frozen data-structure pre-order.
It will produce the same order as one would write the data-structure. | [
"Yields",
"the",
"leaves",
"of",
"the",
"frozen",
"data",
"-",
"structure",
"pre",
"-",
"order",
"."
] | 61b4fab8a90ed76d685448723baaa57e2bbd5ef9 | https://github.com/adfinis-sygroup/freeze/blob/61b4fab8a90ed76d685448723baaa57e2bbd5ef9/freeze/xfreeze.py#L665-L683 | train |
adfinis-sygroup/freeze | freeze/xfreeze.py | tree_diff | def tree_diff(a, b, n=5, sort=False):
"""Dump any data-structure or object, traverse
it depth-first in-order and apply a unified diff.
Depth-first in-order is just like structure would be printed.
:param a: data_structure a
:param b: data_structure b
:param n: lines of context
:type n: int
:param sort: sort the data-structure
ATTENTION: Sorting means changing the data-structure. The test-result may
differ. But in case of dictionaries the results become comparable because
the sorting negates the hash-algorithms "de-sorting".
>>> a = recursive_sort(freeze([
... 'a',
... [3, 4],
... {'a': [3, {'w' : set([4, '3', frozenset([3,5,2])])}]},
... []
... ]))
>>> b = recursive_sort(freeze([
... 'a',
... [7, 3],
... {'a': [3, {'w' : set([4, '3', frozenset([2,5,3])])}]},
... []
... ]))
>>> transparent_repr("\\n".join(tree_diff(a, b).split("\\n")[2:]))
@@ -7,6 +7,6 @@
'w'),),
3),
'a'),),
'a',
(3,
- 4))
+ 7))
>>> a = [
... 'a',
... [3, 4],
... {'a': [3, {'w' : set([4, '3', frozenset([3,5,2])])}]},
... []
... ]
>>> b = [
... 'a',
... [7, 3],
... {'a': [3, {'w' : set([4, '3', frozenset([2,5,3])])}]},
... []
... ]
>>> transparent_repr("\\n".join(
... tree_diff(a, b, sort=True
... ).split("\\n")[2:]))
@@ -11,6 +11,6 @@
'3',
4)]),)],
3)),)],
'a',
(3,
- 4))
+ 7))
"""
a = dump(a)
b = dump(b)
if not sort:
a = vformat(a).split("\n")
b = vformat(b).split("\n")
else:
a = vformat(recursive_sort(a)).split("\n")
b = vformat(recursive_sort(b)).split("\n")
return "\n".join(difflib.unified_diff(a, b, n=n, lineterm="")) | python | def tree_diff(a, b, n=5, sort=False):
"""Dump any data-structure or object, traverse
it depth-first in-order and apply a unified diff.
Depth-first in-order is just like structure would be printed.
:param a: data_structure a
:param b: data_structure b
:param n: lines of context
:type n: int
:param sort: sort the data-structure
ATTENTION: Sorting means changing the data-structure. The test-result may
differ. But in case of dictionaries the results become comparable because
the sorting negates the hash-algorithms "de-sorting".
>>> a = recursive_sort(freeze([
... 'a',
... [3, 4],
... {'a': [3, {'w' : set([4, '3', frozenset([3,5,2])])}]},
... []
... ]))
>>> b = recursive_sort(freeze([
... 'a',
... [7, 3],
... {'a': [3, {'w' : set([4, '3', frozenset([2,5,3])])}]},
... []
... ]))
>>> transparent_repr("\\n".join(tree_diff(a, b).split("\\n")[2:]))
@@ -7,6 +7,6 @@
'w'),),
3),
'a'),),
'a',
(3,
- 4))
+ 7))
>>> a = [
... 'a',
... [3, 4],
... {'a': [3, {'w' : set([4, '3', frozenset([3,5,2])])}]},
... []
... ]
>>> b = [
... 'a',
... [7, 3],
... {'a': [3, {'w' : set([4, '3', frozenset([2,5,3])])}]},
... []
... ]
>>> transparent_repr("\\n".join(
... tree_diff(a, b, sort=True
... ).split("\\n")[2:]))
@@ -11,6 +11,6 @@
'3',
4)]),)],
3)),)],
'a',
(3,
- 4))
+ 7))
"""
a = dump(a)
b = dump(b)
if not sort:
a = vformat(a).split("\n")
b = vformat(b).split("\n")
else:
a = vformat(recursive_sort(a)).split("\n")
b = vformat(recursive_sort(b)).split("\n")
return "\n".join(difflib.unified_diff(a, b, n=n, lineterm="")) | [
"def",
"tree_diff",
"(",
"a",
",",
"b",
",",
"n",
"=",
"5",
",",
"sort",
"=",
"False",
")",
":",
"a",
"=",
"dump",
"(",
"a",
")",
"b",
"=",
"dump",
"(",
"b",
")",
"if",
"not",
"sort",
":",
"a",
"=",
"vformat",
"(",
"a",
")",
".",
"split",
"(",
"\"\\n\"",
")",
"b",
"=",
"vformat",
"(",
"b",
")",
".",
"split",
"(",
"\"\\n\"",
")",
"else",
":",
"a",
"=",
"vformat",
"(",
"recursive_sort",
"(",
"a",
")",
")",
".",
"split",
"(",
"\"\\n\"",
")",
"b",
"=",
"vformat",
"(",
"recursive_sort",
"(",
"b",
")",
")",
".",
"split",
"(",
"\"\\n\"",
")",
"return",
"\"\\n\"",
".",
"join",
"(",
"difflib",
".",
"unified_diff",
"(",
"a",
",",
"b",
",",
"n",
"=",
"n",
",",
"lineterm",
"=",
"\"\"",
")",
")"
] | Dump any data-structure or object, traverse
it depth-first in-order and apply a unified diff.
Depth-first in-order is just like structure would be printed.
:param a: data_structure a
:param b: data_structure b
:param n: lines of context
:type n: int
:param sort: sort the data-structure
ATTENTION: Sorting means changing the data-structure. The test-result may
differ. But in case of dictionaries the results become comparable because
the sorting negates the hash-algorithms "de-sorting".
>>> a = recursive_sort(freeze([
... 'a',
... [3, 4],
... {'a': [3, {'w' : set([4, '3', frozenset([3,5,2])])}]},
... []
... ]))
>>> b = recursive_sort(freeze([
... 'a',
... [7, 3],
... {'a': [3, {'w' : set([4, '3', frozenset([2,5,3])])}]},
... []
... ]))
>>> transparent_repr("\\n".join(tree_diff(a, b).split("\\n")[2:]))
@@ -7,6 +7,6 @@
'w'),),
3),
'a'),),
'a',
(3,
- 4))
+ 7))
>>> a = [
... 'a',
... [3, 4],
... {'a': [3, {'w' : set([4, '3', frozenset([3,5,2])])}]},
... []
... ]
>>> b = [
... 'a',
... [7, 3],
... {'a': [3, {'w' : set([4, '3', frozenset([2,5,3])])}]},
... []
... ]
>>> transparent_repr("\\n".join(
... tree_diff(a, b, sort=True
... ).split("\\n")[2:]))
@@ -11,6 +11,6 @@
'3',
4)]),)],
3)),)],
'a',
(3,
- 4))
+ 7)) | [
"Dump",
"any",
"data",
"-",
"structure",
"or",
"object",
"traverse",
"it",
"depth",
"-",
"first",
"in",
"-",
"order",
"and",
"apply",
"a",
"unified",
"diff",
"."
] | 61b4fab8a90ed76d685448723baaa57e2bbd5ef9 | https://github.com/adfinis-sygroup/freeze/blob/61b4fab8a90ed76d685448723baaa57e2bbd5ef9/freeze/xfreeze.py#L752-L823 | train |
idlesign/steampak | steampak/libsteam/resources/groups.py | Group.stats | def stats(self):
"""Basic group statistics.
Returned dict has the following keys:
'online' - users online count
'ingame' - users currently in game count
'chatting' - users chatting count
:return: dict
"""
stats_online = CRef.cint()
stats_ingame = CRef.cint()
stats_chatting = CRef.cint()
self._iface.get_clan_stats(
self.group_id,
stats_online,
stats_ingame,
stats_chatting,
)
return {
'online': int(stats_online),
'ingame': int(stats_ingame),
'chatting': int(stats_chatting),
} | python | def stats(self):
"""Basic group statistics.
Returned dict has the following keys:
'online' - users online count
'ingame' - users currently in game count
'chatting' - users chatting count
:return: dict
"""
stats_online = CRef.cint()
stats_ingame = CRef.cint()
stats_chatting = CRef.cint()
self._iface.get_clan_stats(
self.group_id,
stats_online,
stats_ingame,
stats_chatting,
)
return {
'online': int(stats_online),
'ingame': int(stats_ingame),
'chatting': int(stats_chatting),
} | [
"def",
"stats",
"(",
"self",
")",
":",
"stats_online",
"=",
"CRef",
".",
"cint",
"(",
")",
"stats_ingame",
"=",
"CRef",
".",
"cint",
"(",
")",
"stats_chatting",
"=",
"CRef",
".",
"cint",
"(",
")",
"self",
".",
"_iface",
".",
"get_clan_stats",
"(",
"self",
".",
"group_id",
",",
"stats_online",
",",
"stats_ingame",
",",
"stats_chatting",
",",
")",
"return",
"{",
"'online'",
":",
"int",
"(",
"stats_online",
")",
",",
"'ingame'",
":",
"int",
"(",
"stats_ingame",
")",
",",
"'chatting'",
":",
"int",
"(",
"stats_chatting",
")",
",",
"}"
] | Basic group statistics.
Returned dict has the following keys:
'online' - users online count
'ingame' - users currently in game count
'chatting' - users chatting count
:return: dict | [
"Basic",
"group",
"statistics",
"."
] | cb3f2c737e272b0360802d947e388df7e34f50f3 | https://github.com/idlesign/steampak/blob/cb3f2c737e272b0360802d947e388df7e34f50f3/steampak/libsteam/resources/groups.py#L24-L50 | train |
budacom/trading-bots | trading_bots/core/management/__init__.py | startproject | def startproject(name, directory, verbosity):
"""
Creates a Trading-Bots project directory structure for the given project
NAME in the current directory or optionally in the given DIRECTORY.
"""
handle_template('project', name, target=directory, verbosity=verbosity)
click.echo(f"Success: '{name}' project was successfully created on '{directory}'") | python | def startproject(name, directory, verbosity):
"""
Creates a Trading-Bots project directory structure for the given project
NAME in the current directory or optionally in the given DIRECTORY.
"""
handle_template('project', name, target=directory, verbosity=verbosity)
click.echo(f"Success: '{name}' project was successfully created on '{directory}'") | [
"def",
"startproject",
"(",
"name",
",",
"directory",
",",
"verbosity",
")",
":",
"handle_template",
"(",
"'project'",
",",
"name",
",",
"target",
"=",
"directory",
",",
"verbosity",
"=",
"verbosity",
")",
"click",
".",
"echo",
"(",
"f\"Success: '{name}' project was successfully created on '{directory}'\"",
")"
] | Creates a Trading-Bots project directory structure for the given project
NAME in the current directory or optionally in the given DIRECTORY. | [
"Creates",
"a",
"Trading",
"-",
"Bots",
"project",
"directory",
"structure",
"for",
"the",
"given",
"project",
"NAME",
"in",
"the",
"current",
"directory",
"or",
"optionally",
"in",
"the",
"given",
"DIRECTORY",
"."
] | 8cb68bb8d0b5f822108db1cc5dae336e3d3c3452 | https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/core/management/__init__.py#L101-L107 | train |
budacom/trading-bots | trading_bots/core/management/__init__.py | createbot | def createbot(name, directory, verbosity):
"""
Creates a Bot's directory structure for the given bot NAME in
the current directory or optionally in the given DIRECTORY.
"""
handle_template('bot', name, target=directory, verbosity=verbosity)
click.echo(f"Success: '{name}' bot was successfully created on '{directory}'") | python | def createbot(name, directory, verbosity):
"""
Creates a Bot's directory structure for the given bot NAME in
the current directory or optionally in the given DIRECTORY.
"""
handle_template('bot', name, target=directory, verbosity=verbosity)
click.echo(f"Success: '{name}' bot was successfully created on '{directory}'") | [
"def",
"createbot",
"(",
"name",
",",
"directory",
",",
"verbosity",
")",
":",
"handle_template",
"(",
"'bot'",
",",
"name",
",",
"target",
"=",
"directory",
",",
"verbosity",
"=",
"verbosity",
")",
"click",
".",
"echo",
"(",
"f\"Success: '{name}' bot was successfully created on '{directory}'\"",
")"
] | Creates a Bot's directory structure for the given bot NAME in
the current directory or optionally in the given DIRECTORY. | [
"Creates",
"a",
"Bot",
"s",
"directory",
"structure",
"for",
"the",
"given",
"bot",
"NAME",
"in",
"the",
"current",
"directory",
"or",
"optionally",
"in",
"the",
"given",
"DIRECTORY",
"."
] | 8cb68bb8d0b5f822108db1cc5dae336e3d3c3452 | https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/core/management/__init__.py#L114-L120 | train |
idlesign/steampak | steampak/libsteam/resources/user.py | User.get_state | def get_state(self, as_str=False):
"""Returns user state. See ``UserState``.
:param bool as_str: Return human-friendly state name instead of an ID.
:rtype: int|str
"""
uid = self.user_id
if self._iface_user.get_id() == uid:
result = self._iface.get_my_state()
else:
result = self._iface.get_state(uid)
if as_str:
return UserState.get_alias(result)
return result | python | def get_state(self, as_str=False):
"""Returns user state. See ``UserState``.
:param bool as_str: Return human-friendly state name instead of an ID.
:rtype: int|str
"""
uid = self.user_id
if self._iface_user.get_id() == uid:
result = self._iface.get_my_state()
else:
result = self._iface.get_state(uid)
if as_str:
return UserState.get_alias(result)
return result | [
"def",
"get_state",
"(",
"self",
",",
"as_str",
"=",
"False",
")",
":",
"uid",
"=",
"self",
".",
"user_id",
"if",
"self",
".",
"_iface_user",
".",
"get_id",
"(",
")",
"==",
"uid",
":",
"result",
"=",
"self",
".",
"_iface",
".",
"get_my_state",
"(",
")",
"else",
":",
"result",
"=",
"self",
".",
"_iface",
".",
"get_state",
"(",
"uid",
")",
"if",
"as_str",
":",
"return",
"UserState",
".",
"get_alias",
"(",
"result",
")",
"return",
"result"
] | Returns user state. See ``UserState``.
:param bool as_str: Return human-friendly state name instead of an ID.
:rtype: int|str | [
"Returns",
"user",
"state",
".",
"See",
"UserState",
"."
] | cb3f2c737e272b0360802d947e388df7e34f50f3 | https://github.com/idlesign/steampak/blob/cb3f2c737e272b0360802d947e388df7e34f50f3/steampak/libsteam/resources/user.py#L95-L113 | train |
inveniosoftware/invenio-access | invenio_access/loaders.py | load_permissions_on_identity_loaded | def load_permissions_on_identity_loaded(sender, identity):
"""Add system roles "Needs" to users' identities.
Every user gets the **any_user** Need.
Authenticated users get in addition the **authenticated_user** Need.
"""
identity.provides.add(
any_user
)
# if the user is not anonymous
if current_user.is_authenticated:
# Add the need provided to authenticated users
identity.provides.add(
authenticated_user
) | python | def load_permissions_on_identity_loaded(sender, identity):
"""Add system roles "Needs" to users' identities.
Every user gets the **any_user** Need.
Authenticated users get in addition the **authenticated_user** Need.
"""
identity.provides.add(
any_user
)
# if the user is not anonymous
if current_user.is_authenticated:
# Add the need provided to authenticated users
identity.provides.add(
authenticated_user
) | [
"def",
"load_permissions_on_identity_loaded",
"(",
"sender",
",",
"identity",
")",
":",
"identity",
".",
"provides",
".",
"add",
"(",
"any_user",
")",
"if",
"current_user",
".",
"is_authenticated",
":",
"identity",
".",
"provides",
".",
"add",
"(",
"authenticated_user",
")"
] | Add system roles "Needs" to users' identities.
Every user gets the **any_user** Need.
Authenticated users get in addition the **authenticated_user** Need. | [
"Add",
"system",
"roles",
"Needs",
"to",
"users",
"identities",
"."
] | 3b033a4bdc110eb2f7e9f08f0744a780884bfc80 | https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/invenio_access/loaders.py#L15-L29 | train |
HEPData/hepdata-validator | hepdata_validator/__init__.py | Validator.print_errors | def print_errors(self, file_name):
"""
Prints the errors observed for a file
"""
for error in self.get_messages(file_name):
print('\t', error.__unicode__()) | python | def print_errors(self, file_name):
"""
Prints the errors observed for a file
"""
for error in self.get_messages(file_name):
print('\t', error.__unicode__()) | [
"def",
"print_errors",
"(",
"self",
",",
"file_name",
")",
":",
"for",
"error",
"in",
"self",
".",
"get_messages",
"(",
"file_name",
")",
":",
"print",
"(",
"'\\t'",
",",
"error",
".",
"__unicode__",
"(",
")",
")"
] | Prints the errors observed for a file | [
"Prints",
"the",
"errors",
"observed",
"for",
"a",
"file"
] | d0b0cab742a009c8f0e8aac9f8c8e434a524d43c | https://github.com/HEPData/hepdata-validator/blob/d0b0cab742a009c8f0e8aac9f8c8e434a524d43c/hepdata_validator/__init__.py#L98-L103 | train |
bkg/django-spillway | spillway/forms/forms.py | RasterQueryForm.clean | def clean(self):
"""Return cleaned fields as a dict, determine which geom takes
precedence.
"""
data = super(RasterQueryForm, self).clean()
geom = data.pop('upload', None) or data.pop('bbox', None)
if geom:
data['g'] = geom
return data | python | def clean(self):
"""Return cleaned fields as a dict, determine which geom takes
precedence.
"""
data = super(RasterQueryForm, self).clean()
geom = data.pop('upload', None) or data.pop('bbox', None)
if geom:
data['g'] = geom
return data | [
"def",
"clean",
"(",
"self",
")",
":",
"data",
"=",
"super",
"(",
"RasterQueryForm",
",",
"self",
")",
".",
"clean",
"(",
")",
"geom",
"=",
"data",
".",
"pop",
"(",
"'upload'",
",",
"None",
")",
"or",
"data",
".",
"pop",
"(",
"'bbox'",
",",
"None",
")",
"if",
"geom",
":",
"data",
"[",
"'g'",
"]",
"=",
"geom",
"return",
"data"
] | Return cleaned fields as a dict, determine which geom takes
precedence. | [
"Return",
"cleaned",
"fields",
"as",
"a",
"dict",
"determine",
"which",
"geom",
"takes",
"precedence",
"."
] | c488a62642430b005f1e0d4a19e160d8d5964b67 | https://github.com/bkg/django-spillway/blob/c488a62642430b005f1e0d4a19e160d8d5964b67/spillway/forms/forms.py#L125-L133 | train |
drslump/pyshould | pyshould/matchers.py | register | def register(matcher, *aliases):
""" Register a matcher associated to one or more aliases. Each alias
given is also normalized.
"""
docstr = matcher.__doc__ if matcher.__doc__ is not None else ''
helpmatchers[matcher] = docstr.strip()
for alias in aliases:
matchers[alias] = matcher
# Map a normalized version of the alias
norm = normalize(alias)
normalized[norm] = alias
# Map a version without snake case
norm = norm.replace('_', '')
normalized[norm] = alias | python | def register(matcher, *aliases):
""" Register a matcher associated to one or more aliases. Each alias
given is also normalized.
"""
docstr = matcher.__doc__ if matcher.__doc__ is not None else ''
helpmatchers[matcher] = docstr.strip()
for alias in aliases:
matchers[alias] = matcher
# Map a normalized version of the alias
norm = normalize(alias)
normalized[norm] = alias
# Map a version without snake case
norm = norm.replace('_', '')
normalized[norm] = alias | [
"def",
"register",
"(",
"matcher",
",",
"*",
"aliases",
")",
":",
"docstr",
"=",
"matcher",
".",
"__doc__",
"if",
"matcher",
".",
"__doc__",
"is",
"not",
"None",
"else",
"''",
"helpmatchers",
"[",
"matcher",
"]",
"=",
"docstr",
".",
"strip",
"(",
")",
"for",
"alias",
"in",
"aliases",
":",
"matchers",
"[",
"alias",
"]",
"=",
"matcher",
"norm",
"=",
"normalize",
"(",
"alias",
")",
"normalized",
"[",
"norm",
"]",
"=",
"alias",
"norm",
"=",
"norm",
".",
"replace",
"(",
"'_'",
",",
"''",
")",
"normalized",
"[",
"norm",
"]",
"=",
"alias"
] | Register a matcher associated to one or more aliases. Each alias
given is also normalized. | [
"Register",
"a",
"matcher",
"associated",
"to",
"one",
"or",
"more",
"aliases",
".",
"Each",
"alias",
"given",
"is",
"also",
"normalized",
"."
] | 7210859d4c84cfbaa64f91b30c2a541aea788ddf | https://github.com/drslump/pyshould/blob/7210859d4c84cfbaa64f91b30c2a541aea788ddf/pyshould/matchers.py#L54-L68 | train |
drslump/pyshould | pyshould/matchers.py | normalize | def normalize(alias):
""" Normalizes an alias by removing adverbs defined in IGNORED_WORDS
"""
# Convert from CamelCase to snake_case
alias = re.sub(r'([a-z])([A-Z])', r'\1_\2', alias)
# Ignore words
words = alias.lower().split('_')
words = filter(lambda w: w not in IGNORED_WORDS, words)
return '_'.join(words) | python | def normalize(alias):
""" Normalizes an alias by removing adverbs defined in IGNORED_WORDS
"""
# Convert from CamelCase to snake_case
alias = re.sub(r'([a-z])([A-Z])', r'\1_\2', alias)
# Ignore words
words = alias.lower().split('_')
words = filter(lambda w: w not in IGNORED_WORDS, words)
return '_'.join(words) | [
"def",
"normalize",
"(",
"alias",
")",
":",
"alias",
"=",
"re",
".",
"sub",
"(",
"r'([a-z])([A-Z])'",
",",
"r'\\1_\\2'",
",",
"alias",
")",
"words",
"=",
"alias",
".",
"lower",
"(",
")",
".",
"split",
"(",
"'_'",
")",
"words",
"=",
"filter",
"(",
"lambda",
"w",
":",
"w",
"not",
"in",
"IGNORED_WORDS",
",",
"words",
")",
"return",
"'_'",
".",
"join",
"(",
"words",
")"
] | Normalizes an alias by removing adverbs defined in IGNORED_WORDS | [
"Normalizes",
"an",
"alias",
"by",
"removing",
"adverbs",
"defined",
"in",
"IGNORED_WORDS"
] | 7210859d4c84cfbaa64f91b30c2a541aea788ddf | https://github.com/drslump/pyshould/blob/7210859d4c84cfbaa64f91b30c2a541aea788ddf/pyshould/matchers.py#L94-L102 | train |
drslump/pyshould | pyshould/matchers.py | lookup | def lookup(alias):
""" Tries to find a matcher callable associated to the given alias. If
an exact match does not exists it will try normalizing it and even
removing underscores to find one.
"""
if alias in matchers:
return matchers[alias]
else:
norm = normalize(alias)
if norm in normalized:
alias = normalized[norm]
return matchers[alias]
# Check without snake case
if -1 != alias.find('_'):
norm = normalize(alias).replace('_', '')
return lookup(norm)
return None | python | def lookup(alias):
""" Tries to find a matcher callable associated to the given alias. If
an exact match does not exists it will try normalizing it and even
removing underscores to find one.
"""
if alias in matchers:
return matchers[alias]
else:
norm = normalize(alias)
if norm in normalized:
alias = normalized[norm]
return matchers[alias]
# Check without snake case
if -1 != alias.find('_'):
norm = normalize(alias).replace('_', '')
return lookup(norm)
return None | [
"def",
"lookup",
"(",
"alias",
")",
":",
"if",
"alias",
"in",
"matchers",
":",
"return",
"matchers",
"[",
"alias",
"]",
"else",
":",
"norm",
"=",
"normalize",
"(",
"alias",
")",
"if",
"norm",
"in",
"normalized",
":",
"alias",
"=",
"normalized",
"[",
"norm",
"]",
"return",
"matchers",
"[",
"alias",
"]",
"if",
"-",
"1",
"!=",
"alias",
".",
"find",
"(",
"'_'",
")",
":",
"norm",
"=",
"normalize",
"(",
"alias",
")",
".",
"replace",
"(",
"'_'",
",",
"''",
")",
"return",
"lookup",
"(",
"norm",
")",
"return",
"None"
] | Tries to find a matcher callable associated to the given alias. If
an exact match does not exists it will try normalizing it and even
removing underscores to find one. | [
"Tries",
"to",
"find",
"a",
"matcher",
"callable",
"associated",
"to",
"the",
"given",
"alias",
".",
"If",
"an",
"exact",
"match",
"does",
"not",
"exists",
"it",
"will",
"try",
"normalizing",
"it",
"and",
"even",
"removing",
"underscores",
"to",
"find",
"one",
"."
] | 7210859d4c84cfbaa64f91b30c2a541aea788ddf | https://github.com/drslump/pyshould/blob/7210859d4c84cfbaa64f91b30c2a541aea788ddf/pyshould/matchers.py#L105-L124 | train |
drslump/pyshould | pyshould/matchers.py | suggest | def suggest(alias, max=3, cutoff=0.5):
""" Suggest a list of aliases which are similar enough
"""
aliases = matchers.keys()
similar = get_close_matches(alias, aliases, n=max, cutoff=cutoff)
return similar | python | def suggest(alias, max=3, cutoff=0.5):
""" Suggest a list of aliases which are similar enough
"""
aliases = matchers.keys()
similar = get_close_matches(alias, aliases, n=max, cutoff=cutoff)
return similar | [
"def",
"suggest",
"(",
"alias",
",",
"max",
"=",
"3",
",",
"cutoff",
"=",
"0.5",
")",
":",
"aliases",
"=",
"matchers",
".",
"keys",
"(",
")",
"similar",
"=",
"get_close_matches",
"(",
"alias",
",",
"aliases",
",",
"n",
"=",
"max",
",",
"cutoff",
"=",
"cutoff",
")",
"return",
"similar"
] | Suggest a list of aliases which are similar enough | [
"Suggest",
"a",
"list",
"of",
"aliases",
"which",
"are",
"similar",
"enough"
] | 7210859d4c84cfbaa64f91b30c2a541aea788ddf | https://github.com/drslump/pyshould/blob/7210859d4c84cfbaa64f91b30c2a541aea788ddf/pyshould/matchers.py#L127-L134 | train |
BD2KGenomics/protect | src/protect/mutation_calling/common.py | sample_chromosomes | def sample_chromosomes(job, genome_fai_file):
"""
Get a list of chromosomes in the input data.
:param toil.fileStore.FileID genome_fai_file: Job store file ID for the genome fai file
:return: Chromosomes in the sample
:rtype: list[str]
"""
work_dir = os.getcwd()
genome_fai = untargz(job.fileStore.readGlobalFile(genome_fai_file), work_dir)
return chromosomes_from_fai(genome_fai) | python | def sample_chromosomes(job, genome_fai_file):
"""
Get a list of chromosomes in the input data.
:param toil.fileStore.FileID genome_fai_file: Job store file ID for the genome fai file
:return: Chromosomes in the sample
:rtype: list[str]
"""
work_dir = os.getcwd()
genome_fai = untargz(job.fileStore.readGlobalFile(genome_fai_file), work_dir)
return chromosomes_from_fai(genome_fai) | [
"def",
"sample_chromosomes",
"(",
"job",
",",
"genome_fai_file",
")",
":",
"work_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"genome_fai",
"=",
"untargz",
"(",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"genome_fai_file",
")",
",",
"work_dir",
")",
"return",
"chromosomes_from_fai",
"(",
"genome_fai",
")"
] | Get a list of chromosomes in the input data.
:param toil.fileStore.FileID genome_fai_file: Job store file ID for the genome fai file
:return: Chromosomes in the sample
:rtype: list[str] | [
"Get",
"a",
"list",
"of",
"chromosomes",
"in",
"the",
"input",
"data",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/common.py#L25-L35 | train |
BD2KGenomics/protect | src/protect/mutation_calling/common.py | run_mutation_aggregator | def run_mutation_aggregator(job, mutation_results, univ_options):
"""
Aggregate all the called mutations.
:param dict mutation_results: Dict of dicts of the various mutation callers in a per chromosome
format
:param dict univ_options: Dict of universal options used by almost all tools
:returns: fsID for the merged mutations file
:rtype: toil.fileStore.FileID
"""
# Setup an input data structure for the merge function
out = {}
for chrom in mutation_results['mutect'].keys():
out[chrom] = job.addChildJobFn(merge_perchrom_mutations, chrom, mutation_results,
univ_options).rv()
merged_snvs = job.addFollowOnJobFn(merge_perchrom_vcfs, out, 'merged', univ_options)
job.fileStore.logToMaster('Aggregated mutations for %s successfully' % univ_options['patient'])
return merged_snvs.rv() | python | def run_mutation_aggregator(job, mutation_results, univ_options):
"""
Aggregate all the called mutations.
:param dict mutation_results: Dict of dicts of the various mutation callers in a per chromosome
format
:param dict univ_options: Dict of universal options used by almost all tools
:returns: fsID for the merged mutations file
:rtype: toil.fileStore.FileID
"""
# Setup an input data structure for the merge function
out = {}
for chrom in mutation_results['mutect'].keys():
out[chrom] = job.addChildJobFn(merge_perchrom_mutations, chrom, mutation_results,
univ_options).rv()
merged_snvs = job.addFollowOnJobFn(merge_perchrom_vcfs, out, 'merged', univ_options)
job.fileStore.logToMaster('Aggregated mutations for %s successfully' % univ_options['patient'])
return merged_snvs.rv() | [
"def",
"run_mutation_aggregator",
"(",
"job",
",",
"mutation_results",
",",
"univ_options",
")",
":",
"out",
"=",
"{",
"}",
"for",
"chrom",
"in",
"mutation_results",
"[",
"'mutect'",
"]",
".",
"keys",
"(",
")",
":",
"out",
"[",
"chrom",
"]",
"=",
"job",
".",
"addChildJobFn",
"(",
"merge_perchrom_mutations",
",",
"chrom",
",",
"mutation_results",
",",
"univ_options",
")",
".",
"rv",
"(",
")",
"merged_snvs",
"=",
"job",
".",
"addFollowOnJobFn",
"(",
"merge_perchrom_vcfs",
",",
"out",
",",
"'merged'",
",",
"univ_options",
")",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Aggregated mutations for %s successfully'",
"%",
"univ_options",
"[",
"'patient'",
"]",
")",
"return",
"merged_snvs",
".",
"rv",
"(",
")"
] | Aggregate all the called mutations.
:param dict mutation_results: Dict of dicts of the various mutation callers in a per chromosome
format
:param dict univ_options: Dict of universal options used by almost all tools
:returns: fsID for the merged mutations file
:rtype: toil.fileStore.FileID | [
"Aggregate",
"all",
"the",
"called",
"mutations",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/common.py#L54-L71 | train |
BD2KGenomics/protect | src/protect/mutation_calling/common.py | merge_perchrom_mutations | def merge_perchrom_mutations(job, chrom, mutations, univ_options):
"""
Merge the mutation calls for a single chromosome.
:param str chrom: Chromosome to process
:param dict mutations: dict of dicts of the various mutation caller names as keys, and a dict of
per chromosome job store ids for vcfs as value
:param dict univ_options: Dict of universal options used by almost all tools
:returns fsID for vcf contaning merged calls for the given chromosome
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
from protect.mutation_calling.muse import process_muse_vcf
from protect.mutation_calling.mutect import process_mutect_vcf
from protect.mutation_calling.radia import process_radia_vcf
from protect.mutation_calling.somaticsniper import process_somaticsniper_vcf
from protect.mutation_calling.strelka import process_strelka_vcf
mutations.pop('indels')
mutations['strelka_indels'] = mutations['strelka']['indels']
mutations['strelka_snvs'] = mutations['strelka']['snvs']
vcf_processor = {'snvs': {'mutect': process_mutect_vcf,
'muse': process_muse_vcf,
'radia': process_radia_vcf,
'somaticsniper': process_somaticsniper_vcf,
'strelka_snvs': process_strelka_vcf
},
'indels': {'strelka_indels': process_strelka_vcf
}
}
# 'fusions': lambda x: None,
# 'indels': lambda x: None}
# For now, let's just say 2 out of n need to call it.
# num_preds = len(mutations)
# majority = int((num_preds + 0.5) / 2)
majority = {'snvs': 2,
'indels': 1}
accepted_hits = defaultdict(dict)
for mut_type in vcf_processor.keys():
# Get input files
perchrom_mutations = {caller: vcf_processor[mut_type][caller](job, mutations[caller][chrom],
work_dir, univ_options)
for caller in vcf_processor[mut_type]}
# Process the strelka key
perchrom_mutations['strelka'] = perchrom_mutations['strelka_' + mut_type]
perchrom_mutations.pop('strelka_' + mut_type)
# Read in each file to a dict
vcf_lists = {caller: read_vcf(vcf_file) for caller, vcf_file in perchrom_mutations.items()}
all_positions = list(set(itertools.chain(*vcf_lists.values())))
for position in sorted(all_positions):
hits = {caller: position in vcf_lists[caller] for caller in perchrom_mutations.keys()}
if sum(hits.values()) >= majority[mut_type]:
callers = ','.join([caller for caller, hit in hits.items() if hit])
assert position[1] not in accepted_hits[position[0]]
accepted_hits[position[0]][position[1]] = (position[2], position[3], callers)
with open(''.join([work_dir, '/', chrom, '.vcf']), 'w') as outfile:
print('##fileformat=VCFv4.0', file=outfile)
print('##INFO=<ID=callers,Number=.,Type=String,Description=List of supporting callers.',
file=outfile)
print('#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO', file=outfile)
for chrom in chrom_sorted(accepted_hits.keys()):
for position in sorted(accepted_hits[chrom]):
print(chrom, position, '.', accepted_hits[chrom][position][0],
accepted_hits[chrom][position][1], '.', 'PASS',
'callers=' + accepted_hits[chrom][position][2], sep='\t', file=outfile)
fsid = job.fileStore.writeGlobalFile(outfile.name)
export_results(job, fsid, outfile.name, univ_options, subfolder='mutations/merged')
return fsid | python | def merge_perchrom_mutations(job, chrom, mutations, univ_options):
"""
Merge the mutation calls for a single chromosome.
:param str chrom: Chromosome to process
:param dict mutations: dict of dicts of the various mutation caller names as keys, and a dict of
per chromosome job store ids for vcfs as value
:param dict univ_options: Dict of universal options used by almost all tools
:returns fsID for vcf contaning merged calls for the given chromosome
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
from protect.mutation_calling.muse import process_muse_vcf
from protect.mutation_calling.mutect import process_mutect_vcf
from protect.mutation_calling.radia import process_radia_vcf
from protect.mutation_calling.somaticsniper import process_somaticsniper_vcf
from protect.mutation_calling.strelka import process_strelka_vcf
mutations.pop('indels')
mutations['strelka_indels'] = mutations['strelka']['indels']
mutations['strelka_snvs'] = mutations['strelka']['snvs']
vcf_processor = {'snvs': {'mutect': process_mutect_vcf,
'muse': process_muse_vcf,
'radia': process_radia_vcf,
'somaticsniper': process_somaticsniper_vcf,
'strelka_snvs': process_strelka_vcf
},
'indels': {'strelka_indels': process_strelka_vcf
}
}
# 'fusions': lambda x: None,
# 'indels': lambda x: None}
# For now, let's just say 2 out of n need to call it.
# num_preds = len(mutations)
# majority = int((num_preds + 0.5) / 2)
majority = {'snvs': 2,
'indels': 1}
accepted_hits = defaultdict(dict)
for mut_type in vcf_processor.keys():
# Get input files
perchrom_mutations = {caller: vcf_processor[mut_type][caller](job, mutations[caller][chrom],
work_dir, univ_options)
for caller in vcf_processor[mut_type]}
# Process the strelka key
perchrom_mutations['strelka'] = perchrom_mutations['strelka_' + mut_type]
perchrom_mutations.pop('strelka_' + mut_type)
# Read in each file to a dict
vcf_lists = {caller: read_vcf(vcf_file) for caller, vcf_file in perchrom_mutations.items()}
all_positions = list(set(itertools.chain(*vcf_lists.values())))
for position in sorted(all_positions):
hits = {caller: position in vcf_lists[caller] for caller in perchrom_mutations.keys()}
if sum(hits.values()) >= majority[mut_type]:
callers = ','.join([caller for caller, hit in hits.items() if hit])
assert position[1] not in accepted_hits[position[0]]
accepted_hits[position[0]][position[1]] = (position[2], position[3], callers)
with open(''.join([work_dir, '/', chrom, '.vcf']), 'w') as outfile:
print('##fileformat=VCFv4.0', file=outfile)
print('##INFO=<ID=callers,Number=.,Type=String,Description=List of supporting callers.',
file=outfile)
print('#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO', file=outfile)
for chrom in chrom_sorted(accepted_hits.keys()):
for position in sorted(accepted_hits[chrom]):
print(chrom, position, '.', accepted_hits[chrom][position][0],
accepted_hits[chrom][position][1], '.', 'PASS',
'callers=' + accepted_hits[chrom][position][2], sep='\t', file=outfile)
fsid = job.fileStore.writeGlobalFile(outfile.name)
export_results(job, fsid, outfile.name, univ_options, subfolder='mutations/merged')
return fsid | [
"def",
"merge_perchrom_mutations",
"(",
"job",
",",
"chrom",
",",
"mutations",
",",
"univ_options",
")",
":",
"work_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"from",
"protect",
".",
"mutation_calling",
".",
"muse",
"import",
"process_muse_vcf",
"from",
"protect",
".",
"mutation_calling",
".",
"mutect",
"import",
"process_mutect_vcf",
"from",
"protect",
".",
"mutation_calling",
".",
"radia",
"import",
"process_radia_vcf",
"from",
"protect",
".",
"mutation_calling",
".",
"somaticsniper",
"import",
"process_somaticsniper_vcf",
"from",
"protect",
".",
"mutation_calling",
".",
"strelka",
"import",
"process_strelka_vcf",
"mutations",
".",
"pop",
"(",
"'indels'",
")",
"mutations",
"[",
"'strelka_indels'",
"]",
"=",
"mutations",
"[",
"'strelka'",
"]",
"[",
"'indels'",
"]",
"mutations",
"[",
"'strelka_snvs'",
"]",
"=",
"mutations",
"[",
"'strelka'",
"]",
"[",
"'snvs'",
"]",
"vcf_processor",
"=",
"{",
"'snvs'",
":",
"{",
"'mutect'",
":",
"process_mutect_vcf",
",",
"'muse'",
":",
"process_muse_vcf",
",",
"'radia'",
":",
"process_radia_vcf",
",",
"'somaticsniper'",
":",
"process_somaticsniper_vcf",
",",
"'strelka_snvs'",
":",
"process_strelka_vcf",
"}",
",",
"'indels'",
":",
"{",
"'strelka_indels'",
":",
"process_strelka_vcf",
"}",
"}",
"majority",
"=",
"{",
"'snvs'",
":",
"2",
",",
"'indels'",
":",
"1",
"}",
"accepted_hits",
"=",
"defaultdict",
"(",
"dict",
")",
"for",
"mut_type",
"in",
"vcf_processor",
".",
"keys",
"(",
")",
":",
"perchrom_mutations",
"=",
"{",
"caller",
":",
"vcf_processor",
"[",
"mut_type",
"]",
"[",
"caller",
"]",
"(",
"job",
",",
"mutations",
"[",
"caller",
"]",
"[",
"chrom",
"]",
",",
"work_dir",
",",
"univ_options",
")",
"for",
"caller",
"in",
"vcf_processor",
"[",
"mut_type",
"]",
"}",
"perchrom_mutations",
"[",
"'strelka'",
"]",
"=",
"perchrom_mutations",
"[",
"'strelka_'",
"+",
"mut_type",
"]",
"perchrom_mutations",
".",
"pop",
"(",
"'strelka_'",
"+",
"mut_type",
")",
"vcf_lists",
"=",
"{",
"caller",
":",
"read_vcf",
"(",
"vcf_file",
")",
"for",
"caller",
",",
"vcf_file",
"in",
"perchrom_mutations",
".",
"items",
"(",
")",
"}",
"all_positions",
"=",
"list",
"(",
"set",
"(",
"itertools",
".",
"chain",
"(",
"*",
"vcf_lists",
".",
"values",
"(",
")",
")",
")",
")",
"for",
"position",
"in",
"sorted",
"(",
"all_positions",
")",
":",
"hits",
"=",
"{",
"caller",
":",
"position",
"in",
"vcf_lists",
"[",
"caller",
"]",
"for",
"caller",
"in",
"perchrom_mutations",
".",
"keys",
"(",
")",
"}",
"if",
"sum",
"(",
"hits",
".",
"values",
"(",
")",
")",
">=",
"majority",
"[",
"mut_type",
"]",
":",
"callers",
"=",
"','",
".",
"join",
"(",
"[",
"caller",
"for",
"caller",
",",
"hit",
"in",
"hits",
".",
"items",
"(",
")",
"if",
"hit",
"]",
")",
"assert",
"position",
"[",
"1",
"]",
"not",
"in",
"accepted_hits",
"[",
"position",
"[",
"0",
"]",
"]",
"accepted_hits",
"[",
"position",
"[",
"0",
"]",
"]",
"[",
"position",
"[",
"1",
"]",
"]",
"=",
"(",
"position",
"[",
"2",
"]",
",",
"position",
"[",
"3",
"]",
",",
"callers",
")",
"with",
"open",
"(",
"''",
".",
"join",
"(",
"[",
"work_dir",
",",
"'/'",
",",
"chrom",
",",
"'.vcf'",
"]",
")",
",",
"'w'",
")",
"as",
"outfile",
":",
"print",
"(",
"'##fileformat=VCFv4.0'",
",",
"file",
"=",
"outfile",
")",
"print",
"(",
"'##INFO=<ID=callers,Number=.,Type=String,Description=List of supporting callers.'",
",",
"file",
"=",
"outfile",
")",
"print",
"(",
"'#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO'",
",",
"file",
"=",
"outfile",
")",
"for",
"chrom",
"in",
"chrom_sorted",
"(",
"accepted_hits",
".",
"keys",
"(",
")",
")",
":",
"for",
"position",
"in",
"sorted",
"(",
"accepted_hits",
"[",
"chrom",
"]",
")",
":",
"print",
"(",
"chrom",
",",
"position",
",",
"'.'",
",",
"accepted_hits",
"[",
"chrom",
"]",
"[",
"position",
"]",
"[",
"0",
"]",
",",
"accepted_hits",
"[",
"chrom",
"]",
"[",
"position",
"]",
"[",
"1",
"]",
",",
"'.'",
",",
"'PASS'",
",",
"'callers='",
"+",
"accepted_hits",
"[",
"chrom",
"]",
"[",
"position",
"]",
"[",
"2",
"]",
",",
"sep",
"=",
"'\\t'",
",",
"file",
"=",
"outfile",
")",
"fsid",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"outfile",
".",
"name",
")",
"export_results",
"(",
"job",
",",
"fsid",
",",
"outfile",
".",
"name",
",",
"univ_options",
",",
"subfolder",
"=",
"'mutations/merged'",
")",
"return",
"fsid"
] | Merge the mutation calls for a single chromosome.
:param str chrom: Chromosome to process
:param dict mutations: dict of dicts of the various mutation caller names as keys, and a dict of
per chromosome job store ids for vcfs as value
:param dict univ_options: Dict of universal options used by almost all tools
:returns fsID for vcf contaning merged calls for the given chromosome
:rtype: toil.fileStore.FileID | [
"Merge",
"the",
"mutation",
"calls",
"for",
"a",
"single",
"chromosome",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/common.py#L74-L143 | train |
BD2KGenomics/protect | src/protect/mutation_calling/common.py | read_vcf | def read_vcf(vcf_file):
"""
Read a vcf file to a dict of lists.
:param str vcf_file: Path to a vcf file.
:return: dict of lists of vcf records
:rtype: dict
"""
vcf_dict = []
with open(vcf_file, 'r') as invcf:
for line in invcf:
if line.startswith('#'):
continue
line = line.strip().split()
vcf_dict.append((line[0], line[1], line[3], line[4]))
return vcf_dict | python | def read_vcf(vcf_file):
"""
Read a vcf file to a dict of lists.
:param str vcf_file: Path to a vcf file.
:return: dict of lists of vcf records
:rtype: dict
"""
vcf_dict = []
with open(vcf_file, 'r') as invcf:
for line in invcf:
if line.startswith('#'):
continue
line = line.strip().split()
vcf_dict.append((line[0], line[1], line[3], line[4]))
return vcf_dict | [
"def",
"read_vcf",
"(",
"vcf_file",
")",
":",
"vcf_dict",
"=",
"[",
"]",
"with",
"open",
"(",
"vcf_file",
",",
"'r'",
")",
"as",
"invcf",
":",
"for",
"line",
"in",
"invcf",
":",
"if",
"line",
".",
"startswith",
"(",
"'#'",
")",
":",
"continue",
"line",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"vcf_dict",
".",
"append",
"(",
"(",
"line",
"[",
"0",
"]",
",",
"line",
"[",
"1",
"]",
",",
"line",
"[",
"3",
"]",
",",
"line",
"[",
"4",
"]",
")",
")",
"return",
"vcf_dict"
] | Read a vcf file to a dict of lists.
:param str vcf_file: Path to a vcf file.
:return: dict of lists of vcf records
:rtype: dict | [
"Read",
"a",
"vcf",
"file",
"to",
"a",
"dict",
"of",
"lists",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/common.py#L146-L161 | train |
BD2KGenomics/protect | src/protect/mutation_calling/common.py | merge_perchrom_vcfs | def merge_perchrom_vcfs(job, perchrom_vcfs, tool_name, univ_options):
"""
Merge per-chromosome vcf files into a single genome level vcf.
:param dict perchrom_vcfs: Dictionary with chromosome name as key and fsID of the corresponding
vcf as value
:param str tool_name: Name of the tool that generated the vcfs
:returns: fsID for the merged vcf
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
input_files = {''.join([chrom, '.vcf']): jsid for chrom, jsid in perchrom_vcfs.items()}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
first = True
with open(''.join([work_dir, '/', 'all_merged.vcf']), 'w') as outvcf:
for chromvcfname in chrom_sorted([x.rstrip('.vcf') for x in input_files.keys()]):
with open(input_files[chromvcfname + '.vcf'], 'r') as infile:
for line in infile:
line = line.strip()
if line.startswith('#'):
if first:
print(line, file=outvcf)
continue
first = False
print(line, file=outvcf)
output_file = job.fileStore.writeGlobalFile(outvcf.name)
export_results(job, output_file, outvcf.name, univ_options, subfolder='mutations/' + tool_name)
job.fileStore.logToMaster('Ran merge_perchrom_vcfs for %s successfully' % tool_name)
return output_file | python | def merge_perchrom_vcfs(job, perchrom_vcfs, tool_name, univ_options):
"""
Merge per-chromosome vcf files into a single genome level vcf.
:param dict perchrom_vcfs: Dictionary with chromosome name as key and fsID of the corresponding
vcf as value
:param str tool_name: Name of the tool that generated the vcfs
:returns: fsID for the merged vcf
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
input_files = {''.join([chrom, '.vcf']): jsid for chrom, jsid in perchrom_vcfs.items()}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
first = True
with open(''.join([work_dir, '/', 'all_merged.vcf']), 'w') as outvcf:
for chromvcfname in chrom_sorted([x.rstrip('.vcf') for x in input_files.keys()]):
with open(input_files[chromvcfname + '.vcf'], 'r') as infile:
for line in infile:
line = line.strip()
if line.startswith('#'):
if first:
print(line, file=outvcf)
continue
first = False
print(line, file=outvcf)
output_file = job.fileStore.writeGlobalFile(outvcf.name)
export_results(job, output_file, outvcf.name, univ_options, subfolder='mutations/' + tool_name)
job.fileStore.logToMaster('Ran merge_perchrom_vcfs for %s successfully' % tool_name)
return output_file | [
"def",
"merge_perchrom_vcfs",
"(",
"job",
",",
"perchrom_vcfs",
",",
"tool_name",
",",
"univ_options",
")",
":",
"work_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"input_files",
"=",
"{",
"''",
".",
"join",
"(",
"[",
"chrom",
",",
"'.vcf'",
"]",
")",
":",
"jsid",
"for",
"chrom",
",",
"jsid",
"in",
"perchrom_vcfs",
".",
"items",
"(",
")",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"False",
")",
"first",
"=",
"True",
"with",
"open",
"(",
"''",
".",
"join",
"(",
"[",
"work_dir",
",",
"'/'",
",",
"'all_merged.vcf'",
"]",
")",
",",
"'w'",
")",
"as",
"outvcf",
":",
"for",
"chromvcfname",
"in",
"chrom_sorted",
"(",
"[",
"x",
".",
"rstrip",
"(",
"'.vcf'",
")",
"for",
"x",
"in",
"input_files",
".",
"keys",
"(",
")",
"]",
")",
":",
"with",
"open",
"(",
"input_files",
"[",
"chromvcfname",
"+",
"'.vcf'",
"]",
",",
"'r'",
")",
"as",
"infile",
":",
"for",
"line",
"in",
"infile",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"line",
".",
"startswith",
"(",
"'#'",
")",
":",
"if",
"first",
":",
"print",
"(",
"line",
",",
"file",
"=",
"outvcf",
")",
"continue",
"first",
"=",
"False",
"print",
"(",
"line",
",",
"file",
"=",
"outvcf",
")",
"output_file",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"outvcf",
".",
"name",
")",
"export_results",
"(",
"job",
",",
"output_file",
",",
"outvcf",
".",
"name",
",",
"univ_options",
",",
"subfolder",
"=",
"'mutations/'",
"+",
"tool_name",
")",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Ran merge_perchrom_vcfs for %s successfully'",
"%",
"tool_name",
")",
"return",
"output_file"
] | Merge per-chromosome vcf files into a single genome level vcf.
:param dict perchrom_vcfs: Dictionary with chromosome name as key and fsID of the corresponding
vcf as value
:param str tool_name: Name of the tool that generated the vcfs
:returns: fsID for the merged vcf
:rtype: toil.fileStore.FileID | [
"Merge",
"per",
"-",
"chromosome",
"vcf",
"files",
"into",
"a",
"single",
"genome",
"level",
"vcf",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/common.py#L164-L192 | train |
BD2KGenomics/protect | src/protect/mutation_calling/common.py | unmerge | def unmerge(job, input_vcf, tool_name, chromosomes, tool_options, univ_options):
"""
Un-merge a vcf file into per-chromosome vcfs.
:param str input_vcf: Input vcf
:param str tool_name: The name of the mutation caller
:param list chromosomes: List of chromosomes to retain
:param dict tool_options: Options specific to the mutation caller
:param dict univ_options: Dict of universal options used by almost all tools
:return: dict of fsIDs, one for each chromosomal vcf
:rtype: dict
"""
work_dir = os.getcwd()
input_files = {
'input.vcf': input_vcf,
'genome.fa.fai.tar.gz': tool_options['genome_fai']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
input_files['genome.fa.fai'] = untargz(input_files['genome.fa.fai.tar.gz'], work_dir)
read_chromosomes = defaultdict()
with open(input_files['input.vcf'], 'r') as in_vcf:
header = []
for line in in_vcf:
if line.startswith('#'):
header.append(line)
continue
line = line.strip()
chrom = line.split()[0]
if chrom in read_chromosomes:
print(line, file=read_chromosomes[chrom])
else:
read_chromosomes[chrom] = open(os.path.join(os.getcwd(), chrom + '.vcf'), 'w')
print(''.join(header), file=read_chromosomes[chrom], end='')
print(line, file=read_chromosomes[chrom])
# Process chromosomes that had no mutations
for chrom in set(chromosomes).difference(set(read_chromosomes.keys())):
read_chromosomes[chrom] = open(os.path.join(os.getcwd(), chrom + '.vcf'), 'w')
print(''.join(header), file=read_chromosomes[chrom], end='')
outdict = {}
chroms = set(chromosomes).intersection(set(read_chromosomes.keys()))
for chrom, chromvcf in read_chromosomes.items():
chromvcf.close()
if chrom not in chroms:
continue
outdict[chrom] = job.fileStore.writeGlobalFile(chromvcf.name)
export_results(job, outdict[chrom], chromvcf.name, univ_options,
subfolder='mutations/' + tool_name)
return outdict | python | def unmerge(job, input_vcf, tool_name, chromosomes, tool_options, univ_options):
"""
Un-merge a vcf file into per-chromosome vcfs.
:param str input_vcf: Input vcf
:param str tool_name: The name of the mutation caller
:param list chromosomes: List of chromosomes to retain
:param dict tool_options: Options specific to the mutation caller
:param dict univ_options: Dict of universal options used by almost all tools
:return: dict of fsIDs, one for each chromosomal vcf
:rtype: dict
"""
work_dir = os.getcwd()
input_files = {
'input.vcf': input_vcf,
'genome.fa.fai.tar.gz': tool_options['genome_fai']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
input_files['genome.fa.fai'] = untargz(input_files['genome.fa.fai.tar.gz'], work_dir)
read_chromosomes = defaultdict()
with open(input_files['input.vcf'], 'r') as in_vcf:
header = []
for line in in_vcf:
if line.startswith('#'):
header.append(line)
continue
line = line.strip()
chrom = line.split()[0]
if chrom in read_chromosomes:
print(line, file=read_chromosomes[chrom])
else:
read_chromosomes[chrom] = open(os.path.join(os.getcwd(), chrom + '.vcf'), 'w')
print(''.join(header), file=read_chromosomes[chrom], end='')
print(line, file=read_chromosomes[chrom])
# Process chromosomes that had no mutations
for chrom in set(chromosomes).difference(set(read_chromosomes.keys())):
read_chromosomes[chrom] = open(os.path.join(os.getcwd(), chrom + '.vcf'), 'w')
print(''.join(header), file=read_chromosomes[chrom], end='')
outdict = {}
chroms = set(chromosomes).intersection(set(read_chromosomes.keys()))
for chrom, chromvcf in read_chromosomes.items():
chromvcf.close()
if chrom not in chroms:
continue
outdict[chrom] = job.fileStore.writeGlobalFile(chromvcf.name)
export_results(job, outdict[chrom], chromvcf.name, univ_options,
subfolder='mutations/' + tool_name)
return outdict | [
"def",
"unmerge",
"(",
"job",
",",
"input_vcf",
",",
"tool_name",
",",
"chromosomes",
",",
"tool_options",
",",
"univ_options",
")",
":",
"work_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"input_files",
"=",
"{",
"'input.vcf'",
":",
"input_vcf",
",",
"'genome.fa.fai.tar.gz'",
":",
"tool_options",
"[",
"'genome_fai'",
"]",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"False",
")",
"input_files",
"[",
"'genome.fa.fai'",
"]",
"=",
"untargz",
"(",
"input_files",
"[",
"'genome.fa.fai.tar.gz'",
"]",
",",
"work_dir",
")",
"read_chromosomes",
"=",
"defaultdict",
"(",
")",
"with",
"open",
"(",
"input_files",
"[",
"'input.vcf'",
"]",
",",
"'r'",
")",
"as",
"in_vcf",
":",
"header",
"=",
"[",
"]",
"for",
"line",
"in",
"in_vcf",
":",
"if",
"line",
".",
"startswith",
"(",
"'#'",
")",
":",
"header",
".",
"append",
"(",
"line",
")",
"continue",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"chrom",
"=",
"line",
".",
"split",
"(",
")",
"[",
"0",
"]",
"if",
"chrom",
"in",
"read_chromosomes",
":",
"print",
"(",
"line",
",",
"file",
"=",
"read_chromosomes",
"[",
"chrom",
"]",
")",
"else",
":",
"read_chromosomes",
"[",
"chrom",
"]",
"=",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"chrom",
"+",
"'.vcf'",
")",
",",
"'w'",
")",
"print",
"(",
"''",
".",
"join",
"(",
"header",
")",
",",
"file",
"=",
"read_chromosomes",
"[",
"chrom",
"]",
",",
"end",
"=",
"''",
")",
"print",
"(",
"line",
",",
"file",
"=",
"read_chromosomes",
"[",
"chrom",
"]",
")",
"for",
"chrom",
"in",
"set",
"(",
"chromosomes",
")",
".",
"difference",
"(",
"set",
"(",
"read_chromosomes",
".",
"keys",
"(",
")",
")",
")",
":",
"read_chromosomes",
"[",
"chrom",
"]",
"=",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"chrom",
"+",
"'.vcf'",
")",
",",
"'w'",
")",
"print",
"(",
"''",
".",
"join",
"(",
"header",
")",
",",
"file",
"=",
"read_chromosomes",
"[",
"chrom",
"]",
",",
"end",
"=",
"''",
")",
"outdict",
"=",
"{",
"}",
"chroms",
"=",
"set",
"(",
"chromosomes",
")",
".",
"intersection",
"(",
"set",
"(",
"read_chromosomes",
".",
"keys",
"(",
")",
")",
")",
"for",
"chrom",
",",
"chromvcf",
"in",
"read_chromosomes",
".",
"items",
"(",
")",
":",
"chromvcf",
".",
"close",
"(",
")",
"if",
"chrom",
"not",
"in",
"chroms",
":",
"continue",
"outdict",
"[",
"chrom",
"]",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"chromvcf",
".",
"name",
")",
"export_results",
"(",
"job",
",",
"outdict",
"[",
"chrom",
"]",
",",
"chromvcf",
".",
"name",
",",
"univ_options",
",",
"subfolder",
"=",
"'mutations/'",
"+",
"tool_name",
")",
"return",
"outdict"
] | Un-merge a vcf file into per-chromosome vcfs.
:param str input_vcf: Input vcf
:param str tool_name: The name of the mutation caller
:param list chromosomes: List of chromosomes to retain
:param dict tool_options: Options specific to the mutation caller
:param dict univ_options: Dict of universal options used by almost all tools
:return: dict of fsIDs, one for each chromosomal vcf
:rtype: dict | [
"Un",
"-",
"merge",
"a",
"vcf",
"file",
"into",
"per",
"-",
"chromosome",
"vcfs",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/common.py#L195-L243 | train |
bkg/django-spillway | spillway/collections.py | as_feature | def as_feature(data):
"""Returns a Feature or FeatureCollection.
Arguments:
data -- Sequence or Mapping of Feature-like or FeatureCollection-like data
"""
if not isinstance(data, (Feature, FeatureCollection)):
if is_featurelike(data):
data = Feature(**data)
elif has_features(data):
data = FeatureCollection(**data)
elif isinstance(data, collections.Sequence):
data = FeatureCollection(features=data)
elif has_layer(data):
data = LayerCollection(data)
elif has_coordinates(data):
data = Feature(geometry=data)
elif isinstance(data, collections.Mapping) and not data:
data = Feature()
return data | python | def as_feature(data):
"""Returns a Feature or FeatureCollection.
Arguments:
data -- Sequence or Mapping of Feature-like or FeatureCollection-like data
"""
if not isinstance(data, (Feature, FeatureCollection)):
if is_featurelike(data):
data = Feature(**data)
elif has_features(data):
data = FeatureCollection(**data)
elif isinstance(data, collections.Sequence):
data = FeatureCollection(features=data)
elif has_layer(data):
data = LayerCollection(data)
elif has_coordinates(data):
data = Feature(geometry=data)
elif isinstance(data, collections.Mapping) and not data:
data = Feature()
return data | [
"def",
"as_feature",
"(",
"data",
")",
":",
"if",
"not",
"isinstance",
"(",
"data",
",",
"(",
"Feature",
",",
"FeatureCollection",
")",
")",
":",
"if",
"is_featurelike",
"(",
"data",
")",
":",
"data",
"=",
"Feature",
"(",
"**",
"data",
")",
"elif",
"has_features",
"(",
"data",
")",
":",
"data",
"=",
"FeatureCollection",
"(",
"**",
"data",
")",
"elif",
"isinstance",
"(",
"data",
",",
"collections",
".",
"Sequence",
")",
":",
"data",
"=",
"FeatureCollection",
"(",
"features",
"=",
"data",
")",
"elif",
"has_layer",
"(",
"data",
")",
":",
"data",
"=",
"LayerCollection",
"(",
"data",
")",
"elif",
"has_coordinates",
"(",
"data",
")",
":",
"data",
"=",
"Feature",
"(",
"geometry",
"=",
"data",
")",
"elif",
"isinstance",
"(",
"data",
",",
"collections",
".",
"Mapping",
")",
"and",
"not",
"data",
":",
"data",
"=",
"Feature",
"(",
")",
"return",
"data"
] | Returns a Feature or FeatureCollection.
Arguments:
data -- Sequence or Mapping of Feature-like or FeatureCollection-like data | [
"Returns",
"a",
"Feature",
"or",
"FeatureCollection",
"."
] | c488a62642430b005f1e0d4a19e160d8d5964b67 | https://github.com/bkg/django-spillway/blob/c488a62642430b005f1e0d4a19e160d8d5964b67/spillway/collections.py#L9-L28 | train |
bkg/django-spillway | spillway/collections.py | has_layer | def has_layer(fcollection):
"""Returns true for a multi-layer dict of FeatureCollections."""
for val in six.viewvalues(fcollection):
if has_features(val):
return True
return False | python | def has_layer(fcollection):
"""Returns true for a multi-layer dict of FeatureCollections."""
for val in six.viewvalues(fcollection):
if has_features(val):
return True
return False | [
"def",
"has_layer",
"(",
"fcollection",
")",
":",
"for",
"val",
"in",
"six",
".",
"viewvalues",
"(",
"fcollection",
")",
":",
"if",
"has_features",
"(",
"val",
")",
":",
"return",
"True",
"return",
"False"
] | Returns true for a multi-layer dict of FeatureCollections. | [
"Returns",
"true",
"for",
"a",
"multi",
"-",
"layer",
"dict",
"of",
"FeatureCollections",
"."
] | c488a62642430b005f1e0d4a19e160d8d5964b67 | https://github.com/bkg/django-spillway/blob/c488a62642430b005f1e0d4a19e160d8d5964b67/spillway/collections.py#L51-L56 | train |
BD2KGenomics/protect | src/protect/expression_profiling/rsem.py | wrap_rsem | def wrap_rsem(job, star_bams, univ_options, rsem_options):
"""
A wrapper for run_rsem using the results from run_star as input.
:param dict star_bams: dict of results from star
:param dict univ_options: Dict of universal options used by almost all tools
:param dict rsem_options: Options specific to rsem
:return: Dict of gene- and isoform-level expression calls
output_files:
|- 'rsem.genes.results': fsID
+- 'rsem.isoforms.results': fsID
:rtype: dict
"""
rsem = job.addChildJobFn(run_rsem, star_bams['rna_transcriptome.bam'],
univ_options, rsem_options, cores=rsem_options['n'],
disk=PromisedRequirement(rsem_disk, star_bams,
rsem_options['index']))
return rsem.rv() | python | def wrap_rsem(job, star_bams, univ_options, rsem_options):
"""
A wrapper for run_rsem using the results from run_star as input.
:param dict star_bams: dict of results from star
:param dict univ_options: Dict of universal options used by almost all tools
:param dict rsem_options: Options specific to rsem
:return: Dict of gene- and isoform-level expression calls
output_files:
|- 'rsem.genes.results': fsID
+- 'rsem.isoforms.results': fsID
:rtype: dict
"""
rsem = job.addChildJobFn(run_rsem, star_bams['rna_transcriptome.bam'],
univ_options, rsem_options, cores=rsem_options['n'],
disk=PromisedRequirement(rsem_disk, star_bams,
rsem_options['index']))
return rsem.rv() | [
"def",
"wrap_rsem",
"(",
"job",
",",
"star_bams",
",",
"univ_options",
",",
"rsem_options",
")",
":",
"rsem",
"=",
"job",
".",
"addChildJobFn",
"(",
"run_rsem",
",",
"star_bams",
"[",
"'rna_transcriptome.bam'",
"]",
",",
"univ_options",
",",
"rsem_options",
",",
"cores",
"=",
"rsem_options",
"[",
"'n'",
"]",
",",
"disk",
"=",
"PromisedRequirement",
"(",
"rsem_disk",
",",
"star_bams",
",",
"rsem_options",
"[",
"'index'",
"]",
")",
")",
"return",
"rsem",
".",
"rv",
"(",
")"
] | A wrapper for run_rsem using the results from run_star as input.
:param dict star_bams: dict of results from star
:param dict univ_options: Dict of universal options used by almost all tools
:param dict rsem_options: Options specific to rsem
:return: Dict of gene- and isoform-level expression calls
output_files:
|- 'rsem.genes.results': fsID
+- 'rsem.isoforms.results': fsID
:rtype: dict | [
"A",
"wrapper",
"for",
"run_rsem",
"using",
"the",
"results",
"from",
"run_star",
"as",
"input",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/expression_profiling/rsem.py#L36-L54 | train |
BD2KGenomics/protect | src/protect/expression_profiling/rsem.py | run_rsem | def run_rsem(job, rna_bam, univ_options, rsem_options):
"""
Run rsem on the input RNA bam.
ARGUMENTS
:param toil.fileStore.FileID rna_bam: fsID of a transcriptome bam generated by STAR
:param dict univ_options: Dict of universal options used by almost all tools
:param dict rsem_options: Options specific to rsem
:return: Dict of gene- and isoform-level expression calls
output_files:
|- 'rsem.genes.results': fsID
+- 'rsem.isoforms.results': fsID
:rtype: dict
"""
work_dir = os.getcwd()
input_files = {
'star_transcriptome.bam': rna_bam,
'rsem_index.tar.gz': rsem_options['index']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
input_files['rsem_index'] = untargz(input_files['rsem_index.tar.gz'], work_dir)
input_files = {key: docker_path(path) for key, path in input_files.items()}
parameters = ['--paired-end',
'-p', str(rsem_options['n']),
'--bam',
input_files['star_transcriptome.bam'],
'--no-bam-output',
'/'.join([input_files['rsem_index'], univ_options['ref']]),
'rsem']
docker_call(tool='rsem', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=rsem_options['version'])
output_files = {}
for filename in ('rsem.genes.results', 'rsem.isoforms.results'):
output_files[filename] = job.fileStore.writeGlobalFile('/'.join([work_dir, filename]))
export_results(job, output_files[filename], '/'.join([work_dir, filename]), univ_options,
subfolder='expression')
job.fileStore.logToMaster('Ran rsem on %s successfully' % univ_options['patient'])
return output_files | python | def run_rsem(job, rna_bam, univ_options, rsem_options):
"""
Run rsem on the input RNA bam.
ARGUMENTS
:param toil.fileStore.FileID rna_bam: fsID of a transcriptome bam generated by STAR
:param dict univ_options: Dict of universal options used by almost all tools
:param dict rsem_options: Options specific to rsem
:return: Dict of gene- and isoform-level expression calls
output_files:
|- 'rsem.genes.results': fsID
+- 'rsem.isoforms.results': fsID
:rtype: dict
"""
work_dir = os.getcwd()
input_files = {
'star_transcriptome.bam': rna_bam,
'rsem_index.tar.gz': rsem_options['index']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
input_files['rsem_index'] = untargz(input_files['rsem_index.tar.gz'], work_dir)
input_files = {key: docker_path(path) for key, path in input_files.items()}
parameters = ['--paired-end',
'-p', str(rsem_options['n']),
'--bam',
input_files['star_transcriptome.bam'],
'--no-bam-output',
'/'.join([input_files['rsem_index'], univ_options['ref']]),
'rsem']
docker_call(tool='rsem', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=rsem_options['version'])
output_files = {}
for filename in ('rsem.genes.results', 'rsem.isoforms.results'):
output_files[filename] = job.fileStore.writeGlobalFile('/'.join([work_dir, filename]))
export_results(job, output_files[filename], '/'.join([work_dir, filename]), univ_options,
subfolder='expression')
job.fileStore.logToMaster('Ran rsem on %s successfully' % univ_options['patient'])
return output_files | [
"def",
"run_rsem",
"(",
"job",
",",
"rna_bam",
",",
"univ_options",
",",
"rsem_options",
")",
":",
"work_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"input_files",
"=",
"{",
"'star_transcriptome.bam'",
":",
"rna_bam",
",",
"'rsem_index.tar.gz'",
":",
"rsem_options",
"[",
"'index'",
"]",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"False",
")",
"input_files",
"[",
"'rsem_index'",
"]",
"=",
"untargz",
"(",
"input_files",
"[",
"'rsem_index.tar.gz'",
"]",
",",
"work_dir",
")",
"input_files",
"=",
"{",
"key",
":",
"docker_path",
"(",
"path",
")",
"for",
"key",
",",
"path",
"in",
"input_files",
".",
"items",
"(",
")",
"}",
"parameters",
"=",
"[",
"'--paired-end'",
",",
"'-p'",
",",
"str",
"(",
"rsem_options",
"[",
"'n'",
"]",
")",
",",
"'--bam'",
",",
"input_files",
"[",
"'star_transcriptome.bam'",
"]",
",",
"'--no-bam-output'",
",",
"'/'",
".",
"join",
"(",
"[",
"input_files",
"[",
"'rsem_index'",
"]",
",",
"univ_options",
"[",
"'ref'",
"]",
"]",
")",
",",
"'rsem'",
"]",
"docker_call",
"(",
"tool",
"=",
"'rsem'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
",",
"tool_version",
"=",
"rsem_options",
"[",
"'version'",
"]",
")",
"output_files",
"=",
"{",
"}",
"for",
"filename",
"in",
"(",
"'rsem.genes.results'",
",",
"'rsem.isoforms.results'",
")",
":",
"output_files",
"[",
"filename",
"]",
"=",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"'/'",
".",
"join",
"(",
"[",
"work_dir",
",",
"filename",
"]",
")",
")",
"export_results",
"(",
"job",
",",
"output_files",
"[",
"filename",
"]",
",",
"'/'",
".",
"join",
"(",
"[",
"work_dir",
",",
"filename",
"]",
")",
",",
"univ_options",
",",
"subfolder",
"=",
"'expression'",
")",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Ran rsem on %s successfully'",
"%",
"univ_options",
"[",
"'patient'",
"]",
")",
"return",
"output_files"
] | Run rsem on the input RNA bam.
ARGUMENTS
:param toil.fileStore.FileID rna_bam: fsID of a transcriptome bam generated by STAR
:param dict univ_options: Dict of universal options used by almost all tools
:param dict rsem_options: Options specific to rsem
:return: Dict of gene- and isoform-level expression calls
output_files:
|- 'rsem.genes.results': fsID
+- 'rsem.isoforms.results': fsID
:rtype: dict | [
"Run",
"rsem",
"on",
"the",
"input",
"RNA",
"bam",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/expression_profiling/rsem.py#L57-L95 | train |
idlesign/steampak | steampak/libsteam/resources/overlay.py | Overlay.activate | def activate(self, page=None):
"""Activates overlay with browser, optionally opened at a given page.
:param str page: Overlay page alias (see OVERLAY_PAGE_*)
or a custom URL.
"""
page = page or ''
if '://' in page:
self._iface.activate_overlay_url(page)
else:
self._iface.activate_overlay_game(page) | python | def activate(self, page=None):
"""Activates overlay with browser, optionally opened at a given page.
:param str page: Overlay page alias (see OVERLAY_PAGE_*)
or a custom URL.
"""
page = page or ''
if '://' in page:
self._iface.activate_overlay_url(page)
else:
self._iface.activate_overlay_game(page) | [
"def",
"activate",
"(",
"self",
",",
"page",
"=",
"None",
")",
":",
"page",
"=",
"page",
"or",
"''",
"if",
"'://'",
"in",
"page",
":",
"self",
".",
"_iface",
".",
"activate_overlay_url",
"(",
"page",
")",
"else",
":",
"self",
".",
"_iface",
".",
"activate_overlay_game",
"(",
"page",
")"
] | Activates overlay with browser, optionally opened at a given page.
:param str page: Overlay page alias (see OVERLAY_PAGE_*)
or a custom URL. | [
"Activates",
"overlay",
"with",
"browser",
"optionally",
"opened",
"at",
"a",
"given",
"page",
"."
] | cb3f2c737e272b0360802d947e388df7e34f50f3 | https://github.com/idlesign/steampak/blob/cb3f2c737e272b0360802d947e388df7e34f50f3/steampak/libsteam/resources/overlay.py#L30-L43 | train |
drslump/pyshould | pyshould/dsl.py | any_of | def any_of(value, *args):
""" At least one of the items in value should match """
if len(args):
value = (value,) + args
return ExpectationAny(value) | python | def any_of(value, *args):
""" At least one of the items in value should match """
if len(args):
value = (value,) + args
return ExpectationAny(value) | [
"def",
"any_of",
"(",
"value",
",",
"*",
"args",
")",
":",
"if",
"len",
"(",
"args",
")",
":",
"value",
"=",
"(",
"value",
",",
")",
"+",
"args",
"return",
"ExpectationAny",
"(",
"value",
")"
] | At least one of the items in value should match | [
"At",
"least",
"one",
"of",
"the",
"items",
"in",
"value",
"should",
"match"
] | 7210859d4c84cfbaa64f91b30c2a541aea788ddf | https://github.com/drslump/pyshould/blob/7210859d4c84cfbaa64f91b30c2a541aea788ddf/pyshould/dsl.py#L33-L39 | train |
drslump/pyshould | pyshould/dsl.py | all_of | def all_of(value, *args):
""" All the items in value should match """
if len(args):
value = (value,) + args
return ExpectationAll(value) | python | def all_of(value, *args):
""" All the items in value should match """
if len(args):
value = (value,) + args
return ExpectationAll(value) | [
"def",
"all_of",
"(",
"value",
",",
"*",
"args",
")",
":",
"if",
"len",
"(",
"args",
")",
":",
"value",
"=",
"(",
"value",
",",
")",
"+",
"args",
"return",
"ExpectationAll",
"(",
"value",
")"
] | All the items in value should match | [
"All",
"the",
"items",
"in",
"value",
"should",
"match"
] | 7210859d4c84cfbaa64f91b30c2a541aea788ddf | https://github.com/drslump/pyshould/blob/7210859d4c84cfbaa64f91b30c2a541aea788ddf/pyshould/dsl.py#L42-L48 | train |
drslump/pyshould | pyshould/dsl.py | none_of | def none_of(value, *args):
""" None of the items in value should match """
if len(args):
value = (value,) + args
return ExpectationNone(value) | python | def none_of(value, *args):
""" None of the items in value should match """
if len(args):
value = (value,) + args
return ExpectationNone(value) | [
"def",
"none_of",
"(",
"value",
",",
"*",
"args",
")",
":",
"if",
"len",
"(",
"args",
")",
":",
"value",
"=",
"(",
"value",
",",
")",
"+",
"args",
"return",
"ExpectationNone",
"(",
"value",
")"
] | None of the items in value should match | [
"None",
"of",
"the",
"items",
"in",
"value",
"should",
"match"
] | 7210859d4c84cfbaa64f91b30c2a541aea788ddf | https://github.com/drslump/pyshould/blob/7210859d4c84cfbaa64f91b30c2a541aea788ddf/pyshould/dsl.py#L51-L57 | train |
BD2KGenomics/protect | src/protect/qc/rna.py | run_cutadapt | def run_cutadapt(job, fastqs, univ_options, cutadapt_options):
"""
Runs cutadapt on the input RNA fastq files.
:param list fastqs: List of fsIDs for input an RNA-Seq fastq pair
:param dict univ_options: Dict of universal options used by almost all tools
:param dict cutadapt_options: Options specific to cutadapt
:return: List of fsIDs of cutadapted fastqs
:rtype: list[toil.fileStore.FileID]
"""
work_dir = os.getcwd()
input_files = {
'rna_1.fastq': fastqs[0],
'rna_2.fastq': fastqs[1]}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
# Handle gzipped file
gz = '.gz' if is_gzipfile(input_files['rna_1.fastq']) else ''
if gz:
for read_file in 'rna_1.fastq', 'rna_2.fastq':
os.symlink(read_file, read_file + gz)
input_files[read_file + gz] = input_files[read_file] + gz
input_files = {key: docker_path(path) for key, path in input_files.items()}
parameters = ['-a', cutadapt_options['a'], # Fwd read 3' adapter
'-A', cutadapt_options['A'], # Rev read 3' adapter
'-m', '35', # Minimum size of read
'-o', docker_path('rna_cutadapt_1.fastq.gz'), # Output for R1
'-p', docker_path('rna_cutadapt_2.fastq.gz'), # Output for R2
input_files['rna_1.fastq' + gz],
input_files['rna_2.fastq' + gz]]
docker_call(tool='cutadapt', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=cutadapt_options['version'])
output_files = []
for fastq_file in ['rna_cutadapt_1.fastq.gz', 'rna_cutadapt_2.fastq.gz']:
output_files.append(job.fileStore.writeGlobalFile('/'.join([work_dir, fastq_file])))
job.fileStore.logToMaster('Ran cutadapt on %s successfully' % univ_options['patient'])
return output_files | python | def run_cutadapt(job, fastqs, univ_options, cutadapt_options):
"""
Runs cutadapt on the input RNA fastq files.
:param list fastqs: List of fsIDs for input an RNA-Seq fastq pair
:param dict univ_options: Dict of universal options used by almost all tools
:param dict cutadapt_options: Options specific to cutadapt
:return: List of fsIDs of cutadapted fastqs
:rtype: list[toil.fileStore.FileID]
"""
work_dir = os.getcwd()
input_files = {
'rna_1.fastq': fastqs[0],
'rna_2.fastq': fastqs[1]}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
# Handle gzipped file
gz = '.gz' if is_gzipfile(input_files['rna_1.fastq']) else ''
if gz:
for read_file in 'rna_1.fastq', 'rna_2.fastq':
os.symlink(read_file, read_file + gz)
input_files[read_file + gz] = input_files[read_file] + gz
input_files = {key: docker_path(path) for key, path in input_files.items()}
parameters = ['-a', cutadapt_options['a'], # Fwd read 3' adapter
'-A', cutadapt_options['A'], # Rev read 3' adapter
'-m', '35', # Minimum size of read
'-o', docker_path('rna_cutadapt_1.fastq.gz'), # Output for R1
'-p', docker_path('rna_cutadapt_2.fastq.gz'), # Output for R2
input_files['rna_1.fastq' + gz],
input_files['rna_2.fastq' + gz]]
docker_call(tool='cutadapt', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=cutadapt_options['version'])
output_files = []
for fastq_file in ['rna_cutadapt_1.fastq.gz', 'rna_cutadapt_2.fastq.gz']:
output_files.append(job.fileStore.writeGlobalFile('/'.join([work_dir, fastq_file])))
job.fileStore.logToMaster('Ran cutadapt on %s successfully' % univ_options['patient'])
return output_files | [
"def",
"run_cutadapt",
"(",
"job",
",",
"fastqs",
",",
"univ_options",
",",
"cutadapt_options",
")",
":",
"work_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"input_files",
"=",
"{",
"'rna_1.fastq'",
":",
"fastqs",
"[",
"0",
"]",
",",
"'rna_2.fastq'",
":",
"fastqs",
"[",
"1",
"]",
"}",
"input_files",
"=",
"get_files_from_filestore",
"(",
"job",
",",
"input_files",
",",
"work_dir",
",",
"docker",
"=",
"False",
")",
"gz",
"=",
"'.gz'",
"if",
"is_gzipfile",
"(",
"input_files",
"[",
"'rna_1.fastq'",
"]",
")",
"else",
"''",
"if",
"gz",
":",
"for",
"read_file",
"in",
"'rna_1.fastq'",
",",
"'rna_2.fastq'",
":",
"os",
".",
"symlink",
"(",
"read_file",
",",
"read_file",
"+",
"gz",
")",
"input_files",
"[",
"read_file",
"+",
"gz",
"]",
"=",
"input_files",
"[",
"read_file",
"]",
"+",
"gz",
"input_files",
"=",
"{",
"key",
":",
"docker_path",
"(",
"path",
")",
"for",
"key",
",",
"path",
"in",
"input_files",
".",
"items",
"(",
")",
"}",
"parameters",
"=",
"[",
"'-a'",
",",
"cutadapt_options",
"[",
"'a'",
"]",
",",
"'-A'",
",",
"cutadapt_options",
"[",
"'A'",
"]",
",",
"'-m'",
",",
"'35'",
",",
"'-o'",
",",
"docker_path",
"(",
"'rna_cutadapt_1.fastq.gz'",
")",
",",
"'-p'",
",",
"docker_path",
"(",
"'rna_cutadapt_2.fastq.gz'",
")",
",",
"input_files",
"[",
"'rna_1.fastq'",
"+",
"gz",
"]",
",",
"input_files",
"[",
"'rna_2.fastq'",
"+",
"gz",
"]",
"]",
"docker_call",
"(",
"tool",
"=",
"'cutadapt'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
",",
"tool_version",
"=",
"cutadapt_options",
"[",
"'version'",
"]",
")",
"output_files",
"=",
"[",
"]",
"for",
"fastq_file",
"in",
"[",
"'rna_cutadapt_1.fastq.gz'",
",",
"'rna_cutadapt_2.fastq.gz'",
"]",
":",
"output_files",
".",
"append",
"(",
"job",
".",
"fileStore",
".",
"writeGlobalFile",
"(",
"'/'",
".",
"join",
"(",
"[",
"work_dir",
",",
"fastq_file",
"]",
")",
")",
")",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Ran cutadapt on %s successfully'",
"%",
"univ_options",
"[",
"'patient'",
"]",
")",
"return",
"output_files"
] | Runs cutadapt on the input RNA fastq files.
:param list fastqs: List of fsIDs for input an RNA-Seq fastq pair
:param dict univ_options: Dict of universal options used by almost all tools
:param dict cutadapt_options: Options specific to cutadapt
:return: List of fsIDs of cutadapted fastqs
:rtype: list[toil.fileStore.FileID] | [
"Runs",
"cutadapt",
"on",
"the",
"input",
"RNA",
"fastq",
"files",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/qc/rna.py#L29-L64 | train |
inveniosoftware/invenio-access | examples/app.py | index | def index():
"""Basic test view."""
identity = g.identity
actions = {}
for action in access.actions.values():
actions[action.value] = DynamicPermission(action).allows(identity)
if current_user.is_anonymous:
return render_template("invenio_access/open.html",
actions=actions,
identity=identity)
else:
return render_template("invenio_access/limited.html",
message='',
actions=actions,
identity=identity) | python | def index():
"""Basic test view."""
identity = g.identity
actions = {}
for action in access.actions.values():
actions[action.value] = DynamicPermission(action).allows(identity)
if current_user.is_anonymous:
return render_template("invenio_access/open.html",
actions=actions,
identity=identity)
else:
return render_template("invenio_access/limited.html",
message='',
actions=actions,
identity=identity) | [
"def",
"index",
"(",
")",
":",
"identity",
"=",
"g",
".",
"identity",
"actions",
"=",
"{",
"}",
"for",
"action",
"in",
"access",
".",
"actions",
".",
"values",
"(",
")",
":",
"actions",
"[",
"action",
".",
"value",
"]",
"=",
"DynamicPermission",
"(",
"action",
")",
".",
"allows",
"(",
"identity",
")",
"if",
"current_user",
".",
"is_anonymous",
":",
"return",
"render_template",
"(",
"\"invenio_access/open.html\"",
",",
"actions",
"=",
"actions",
",",
"identity",
"=",
"identity",
")",
"else",
":",
"return",
"render_template",
"(",
"\"invenio_access/limited.html\"",
",",
"message",
"=",
"''",
",",
"actions",
"=",
"actions",
",",
"identity",
"=",
"identity",
")"
] | Basic test view. | [
"Basic",
"test",
"view",
"."
] | 3b033a4bdc110eb2f7e9f08f0744a780884bfc80 | https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/examples/app.py#L109-L124 | train |
inveniosoftware/invenio-access | examples/app.py | role_admin | def role_admin():
"""View only allowed to admin role."""
identity = g.identity
actions = {}
for action in access.actions.values():
actions[action.value] = DynamicPermission(action).allows(identity)
message = 'You are opening a page requiring the "admin-access" permission'
return render_template("invenio_access/limited.html",
message=message,
actions=actions,
identity=identity) | python | def role_admin():
"""View only allowed to admin role."""
identity = g.identity
actions = {}
for action in access.actions.values():
actions[action.value] = DynamicPermission(action).allows(identity)
message = 'You are opening a page requiring the "admin-access" permission'
return render_template("invenio_access/limited.html",
message=message,
actions=actions,
identity=identity) | [
"def",
"role_admin",
"(",
")",
":",
"identity",
"=",
"g",
".",
"identity",
"actions",
"=",
"{",
"}",
"for",
"action",
"in",
"access",
".",
"actions",
".",
"values",
"(",
")",
":",
"actions",
"[",
"action",
".",
"value",
"]",
"=",
"DynamicPermission",
"(",
"action",
")",
".",
"allows",
"(",
"identity",
")",
"message",
"=",
"'You are opening a page requiring the \"admin-access\" permission'",
"return",
"render_template",
"(",
"\"invenio_access/limited.html\"",
",",
"message",
"=",
"message",
",",
"actions",
"=",
"actions",
",",
"identity",
"=",
"identity",
")"
] | View only allowed to admin role. | [
"View",
"only",
"allowed",
"to",
"admin",
"role",
"."
] | 3b033a4bdc110eb2f7e9f08f0744a780884bfc80 | https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/examples/app.py#L134-L145 | train |
BD2KGenomics/protect | src/protect/binding_prediction/common.py | read_fastas | def read_fastas(input_files):
"""
Read the tumor and normal fastas into a joint dict.
:param dict input_files: A dict containing filename: filepath for T_ and N_ transgened files.
:return: The read fastas in a dictionary of tuples
:rtype: dict
"""
tumor_file = [y for x, y in input_files.items() if x.startswith('T')][0]
normal_file = [y for x, y in input_files.items() if x.startswith('N')][0]
output_files = defaultdict(list)
output_files = _read_fasta(tumor_file, output_files)
num_entries = len(output_files)
output_files = _read_fasta(normal_file, output_files)
assert len(output_files) == num_entries
return output_files | python | def read_fastas(input_files):
"""
Read the tumor and normal fastas into a joint dict.
:param dict input_files: A dict containing filename: filepath for T_ and N_ transgened files.
:return: The read fastas in a dictionary of tuples
:rtype: dict
"""
tumor_file = [y for x, y in input_files.items() if x.startswith('T')][0]
normal_file = [y for x, y in input_files.items() if x.startswith('N')][0]
output_files = defaultdict(list)
output_files = _read_fasta(tumor_file, output_files)
num_entries = len(output_files)
output_files = _read_fasta(normal_file, output_files)
assert len(output_files) == num_entries
return output_files | [
"def",
"read_fastas",
"(",
"input_files",
")",
":",
"tumor_file",
"=",
"[",
"y",
"for",
"x",
",",
"y",
"in",
"input_files",
".",
"items",
"(",
")",
"if",
"x",
".",
"startswith",
"(",
"'T'",
")",
"]",
"[",
"0",
"]",
"normal_file",
"=",
"[",
"y",
"for",
"x",
",",
"y",
"in",
"input_files",
".",
"items",
"(",
")",
"if",
"x",
".",
"startswith",
"(",
"'N'",
")",
"]",
"[",
"0",
"]",
"output_files",
"=",
"defaultdict",
"(",
"list",
")",
"output_files",
"=",
"_read_fasta",
"(",
"tumor_file",
",",
"output_files",
")",
"num_entries",
"=",
"len",
"(",
"output_files",
")",
"output_files",
"=",
"_read_fasta",
"(",
"normal_file",
",",
"output_files",
")",
"assert",
"len",
"(",
"output_files",
")",
"==",
"num_entries",
"return",
"output_files"
] | Read the tumor and normal fastas into a joint dict.
:param dict input_files: A dict containing filename: filepath for T_ and N_ transgened files.
:return: The read fastas in a dictionary of tuples
:rtype: dict | [
"Read",
"the",
"tumor",
"and",
"normal",
"fastas",
"into",
"a",
"joint",
"dict",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/binding_prediction/common.py#L140-L155 | train |
BD2KGenomics/protect | src/protect/binding_prediction/common.py | _read_fasta | def _read_fasta(fasta_file, output_dict):
"""
Read the peptide fasta into an existing dict.
:param str fasta_file: The peptide file
:param dict output_dict: The dict to appends results to.
:return: output_dict
:rtype: dict
"""
read_name = None
with open(fasta_file, 'r') as f:
for line in f:
line = line.strip()
if not line:
continue
if line.startswith('>'):
read_name = line.lstrip('>')
else:
assert read_name is not None, line
output_dict[read_name].append(line.strip())
return output_dict | python | def _read_fasta(fasta_file, output_dict):
"""
Read the peptide fasta into an existing dict.
:param str fasta_file: The peptide file
:param dict output_dict: The dict to appends results to.
:return: output_dict
:rtype: dict
"""
read_name = None
with open(fasta_file, 'r') as f:
for line in f:
line = line.strip()
if not line:
continue
if line.startswith('>'):
read_name = line.lstrip('>')
else:
assert read_name is not None, line
output_dict[read_name].append(line.strip())
return output_dict | [
"def",
"_read_fasta",
"(",
"fasta_file",
",",
"output_dict",
")",
":",
"read_name",
"=",
"None",
"with",
"open",
"(",
"fasta_file",
",",
"'r'",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"not",
"line",
":",
"continue",
"if",
"line",
".",
"startswith",
"(",
"'>'",
")",
":",
"read_name",
"=",
"line",
".",
"lstrip",
"(",
"'>'",
")",
"else",
":",
"assert",
"read_name",
"is",
"not",
"None",
",",
"line",
"output_dict",
"[",
"read_name",
"]",
".",
"append",
"(",
"line",
".",
"strip",
"(",
")",
")",
"return",
"output_dict"
] | Read the peptide fasta into an existing dict.
:param str fasta_file: The peptide file
:param dict output_dict: The dict to appends results to.
:return: output_dict
:rtype: dict | [
"Read",
"the",
"peptide",
"fasta",
"into",
"an",
"existing",
"dict",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/binding_prediction/common.py#L158-L178 | train |
BD2KGenomics/protect | src/protect/binding_prediction/common.py | _process_consensus_mhcii | def _process_consensus_mhcii(mhc_file, normal=False):
"""
Process the results from running IEDB MHCII binding predictions using the consensus method into
a pandas dataframe.
:param str mhc_file: Output file containing consensus mhcii:peptide binding predictions
:param bool normal: Is this processing the results of a normal?
:return: Results in a tabular format
:rtype: pandas.DataFrame
"""
core_col = None # Variable to hold the column number with the core
results = pandas.DataFrame(columns=['allele', 'pept', 'tumor_pred', 'core'])
with open(mhc_file, 'r') as mf:
peptides = set()
for line in mf:
# Skip header lines
if not line.startswith('HLA'):
continue
line = line.strip().split('\t')
allele = line[0]
pept = line[4]
pred = line[6]
if core_col:
core = line[core_col]
else:
methods = line[5].lstrip('Consensus(').rstrip(')')
methods = methods.split(',')
if 'NN' in methods:
core_col = 13
elif 'netMHCIIpan' in methods:
core_col = 17
elif 'Sturniolo' in methods:
core_col = 19
elif 'SMM' in methods:
core_col = 10
core = line[core_col] if core_col else 'NOCORE'
if float(pred) > 5.00 and not normal:
continue
results.loc[len(results)] = [allele, pept, pred, core]
results.drop_duplicates(inplace=True)
return results | python | def _process_consensus_mhcii(mhc_file, normal=False):
"""
Process the results from running IEDB MHCII binding predictions using the consensus method into
a pandas dataframe.
:param str mhc_file: Output file containing consensus mhcii:peptide binding predictions
:param bool normal: Is this processing the results of a normal?
:return: Results in a tabular format
:rtype: pandas.DataFrame
"""
core_col = None # Variable to hold the column number with the core
results = pandas.DataFrame(columns=['allele', 'pept', 'tumor_pred', 'core'])
with open(mhc_file, 'r') as mf:
peptides = set()
for line in mf:
# Skip header lines
if not line.startswith('HLA'):
continue
line = line.strip().split('\t')
allele = line[0]
pept = line[4]
pred = line[6]
if core_col:
core = line[core_col]
else:
methods = line[5].lstrip('Consensus(').rstrip(')')
methods = methods.split(',')
if 'NN' in methods:
core_col = 13
elif 'netMHCIIpan' in methods:
core_col = 17
elif 'Sturniolo' in methods:
core_col = 19
elif 'SMM' in methods:
core_col = 10
core = line[core_col] if core_col else 'NOCORE'
if float(pred) > 5.00 and not normal:
continue
results.loc[len(results)] = [allele, pept, pred, core]
results.drop_duplicates(inplace=True)
return results | [
"def",
"_process_consensus_mhcii",
"(",
"mhc_file",
",",
"normal",
"=",
"False",
")",
":",
"core_col",
"=",
"None",
"results",
"=",
"pandas",
".",
"DataFrame",
"(",
"columns",
"=",
"[",
"'allele'",
",",
"'pept'",
",",
"'tumor_pred'",
",",
"'core'",
"]",
")",
"with",
"open",
"(",
"mhc_file",
",",
"'r'",
")",
"as",
"mf",
":",
"peptides",
"=",
"set",
"(",
")",
"for",
"line",
"in",
"mf",
":",
"if",
"not",
"line",
".",
"startswith",
"(",
"'HLA'",
")",
":",
"continue",
"line",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\t'",
")",
"allele",
"=",
"line",
"[",
"0",
"]",
"pept",
"=",
"line",
"[",
"4",
"]",
"pred",
"=",
"line",
"[",
"6",
"]",
"if",
"core_col",
":",
"core",
"=",
"line",
"[",
"core_col",
"]",
"else",
":",
"methods",
"=",
"line",
"[",
"5",
"]",
".",
"lstrip",
"(",
"'Consensus('",
")",
".",
"rstrip",
"(",
"')'",
")",
"methods",
"=",
"methods",
".",
"split",
"(",
"','",
")",
"if",
"'NN'",
"in",
"methods",
":",
"core_col",
"=",
"13",
"elif",
"'netMHCIIpan'",
"in",
"methods",
":",
"core_col",
"=",
"17",
"elif",
"'Sturniolo'",
"in",
"methods",
":",
"core_col",
"=",
"19",
"elif",
"'SMM'",
"in",
"methods",
":",
"core_col",
"=",
"10",
"core",
"=",
"line",
"[",
"core_col",
"]",
"if",
"core_col",
"else",
"'NOCORE'",
"if",
"float",
"(",
"pred",
")",
">",
"5.00",
"and",
"not",
"normal",
":",
"continue",
"results",
".",
"loc",
"[",
"len",
"(",
"results",
")",
"]",
"=",
"[",
"allele",
",",
"pept",
",",
"pred",
",",
"core",
"]",
"results",
".",
"drop_duplicates",
"(",
"inplace",
"=",
"True",
")",
"return",
"results"
] | Process the results from running IEDB MHCII binding predictions using the consensus method into
a pandas dataframe.
:param str mhc_file: Output file containing consensus mhcii:peptide binding predictions
:param bool normal: Is this processing the results of a normal?
:return: Results in a tabular format
:rtype: pandas.DataFrame | [
"Process",
"the",
"results",
"from",
"running",
"IEDB",
"MHCII",
"binding",
"predictions",
"using",
"the",
"consensus",
"method",
"into",
"a",
"pandas",
"dataframe",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/binding_prediction/common.py#L181-L221 | train |
BD2KGenomics/protect | src/protect/binding_prediction/common.py | _process_net_mhcii | def _process_net_mhcii(mhc_file, normal=False):
"""
Process the results from running NetMHCIIpan binding predictions into a pandas dataframe.
:param str mhc_file: Output file containing netmhciipan mhcii:peptide binding predictions
:param bool normal: Is this processing the results of a normal?
:return: Results in a tabular format
:rtype: pandas.DataFrame
"""
results = pandas.DataFrame(columns=['allele', 'pept', 'tumor_pred', 'core', 'peptide_name'])
with open(mhc_file, 'r') as mf:
peptides = set()
# Get the allele from the first line and skip the second line
allele = re.sub('-DQB', '/DQB', mf.readline().strip())
_ = mf.readline()
for line in mf:
line = line.strip().split('\t')
pept = line[1]
pred = line[5]
core = 'NOCORE'
peptide_name = line[2]
if float(pred) > 5.00 and not normal:
continue
results.loc[len(results)] = [allele, pept, pred, core, peptide_name]
results.drop_duplicates(inplace=True)
return results | python | def _process_net_mhcii(mhc_file, normal=False):
"""
Process the results from running NetMHCIIpan binding predictions into a pandas dataframe.
:param str mhc_file: Output file containing netmhciipan mhcii:peptide binding predictions
:param bool normal: Is this processing the results of a normal?
:return: Results in a tabular format
:rtype: pandas.DataFrame
"""
results = pandas.DataFrame(columns=['allele', 'pept', 'tumor_pred', 'core', 'peptide_name'])
with open(mhc_file, 'r') as mf:
peptides = set()
# Get the allele from the first line and skip the second line
allele = re.sub('-DQB', '/DQB', mf.readline().strip())
_ = mf.readline()
for line in mf:
line = line.strip().split('\t')
pept = line[1]
pred = line[5]
core = 'NOCORE'
peptide_name = line[2]
if float(pred) > 5.00 and not normal:
continue
results.loc[len(results)] = [allele, pept, pred, core, peptide_name]
results.drop_duplicates(inplace=True)
return results | [
"def",
"_process_net_mhcii",
"(",
"mhc_file",
",",
"normal",
"=",
"False",
")",
":",
"results",
"=",
"pandas",
".",
"DataFrame",
"(",
"columns",
"=",
"[",
"'allele'",
",",
"'pept'",
",",
"'tumor_pred'",
",",
"'core'",
",",
"'peptide_name'",
"]",
")",
"with",
"open",
"(",
"mhc_file",
",",
"'r'",
")",
"as",
"mf",
":",
"peptides",
"=",
"set",
"(",
")",
"allele",
"=",
"re",
".",
"sub",
"(",
"'-DQB'",
",",
"'/DQB'",
",",
"mf",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
")",
"_",
"=",
"mf",
".",
"readline",
"(",
")",
"for",
"line",
"in",
"mf",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\t'",
")",
"pept",
"=",
"line",
"[",
"1",
"]",
"pred",
"=",
"line",
"[",
"5",
"]",
"core",
"=",
"'NOCORE'",
"peptide_name",
"=",
"line",
"[",
"2",
"]",
"if",
"float",
"(",
"pred",
")",
">",
"5.00",
"and",
"not",
"normal",
":",
"continue",
"results",
".",
"loc",
"[",
"len",
"(",
"results",
")",
"]",
"=",
"[",
"allele",
",",
"pept",
",",
"pred",
",",
"core",
",",
"peptide_name",
"]",
"results",
".",
"drop_duplicates",
"(",
"inplace",
"=",
"True",
")",
"return",
"results"
] | Process the results from running NetMHCIIpan binding predictions into a pandas dataframe.
:param str mhc_file: Output file containing netmhciipan mhcii:peptide binding predictions
:param bool normal: Is this processing the results of a normal?
:return: Results in a tabular format
:rtype: pandas.DataFrame | [
"Process",
"the",
"results",
"from",
"running",
"NetMHCIIpan",
"binding",
"predictions",
"into",
"a",
"pandas",
"dataframe",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/binding_prediction/common.py#L253-L278 | train |
BD2KGenomics/protect | src/protect/binding_prediction/common.py | _process_mhci | def _process_mhci(mhc_file, normal=False):
"""
Process the results from running IEDB MHCI binding predictions into a pandas dataframe.
:param str mhc_file: Output file containing netmhciipan mhci:peptide binding predictions
:param bool normal: Is this processing the results of a normal?
:return: Results in a tabular format
:rtype: pandas.DataFrame
"""
results = pandas.DataFrame(columns=['allele', 'pept', 'tumor_pred', 'core'])
with open(mhc_file, 'r') as mf:
peptides = set()
for line in mf:
# Skip header lines
if not line.startswith('HLA'):
continue
line = line.strip().split('\t')
allele = line[0]
pept = line[5]
pred = line[7]
if float(pred) > 5.00 and not normal:
continue
results.loc[len(results)] = [allele, pept, pred, pept]
results.drop_duplicates(inplace=True)
return results | python | def _process_mhci(mhc_file, normal=False):
"""
Process the results from running IEDB MHCI binding predictions into a pandas dataframe.
:param str mhc_file: Output file containing netmhciipan mhci:peptide binding predictions
:param bool normal: Is this processing the results of a normal?
:return: Results in a tabular format
:rtype: pandas.DataFrame
"""
results = pandas.DataFrame(columns=['allele', 'pept', 'tumor_pred', 'core'])
with open(mhc_file, 'r') as mf:
peptides = set()
for line in mf:
# Skip header lines
if not line.startswith('HLA'):
continue
line = line.strip().split('\t')
allele = line[0]
pept = line[5]
pred = line[7]
if float(pred) > 5.00 and not normal:
continue
results.loc[len(results)] = [allele, pept, pred, pept]
results.drop_duplicates(inplace=True)
return results | [
"def",
"_process_mhci",
"(",
"mhc_file",
",",
"normal",
"=",
"False",
")",
":",
"results",
"=",
"pandas",
".",
"DataFrame",
"(",
"columns",
"=",
"[",
"'allele'",
",",
"'pept'",
",",
"'tumor_pred'",
",",
"'core'",
"]",
")",
"with",
"open",
"(",
"mhc_file",
",",
"'r'",
")",
"as",
"mf",
":",
"peptides",
"=",
"set",
"(",
")",
"for",
"line",
"in",
"mf",
":",
"if",
"not",
"line",
".",
"startswith",
"(",
"'HLA'",
")",
":",
"continue",
"line",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\t'",
")",
"allele",
"=",
"line",
"[",
"0",
"]",
"pept",
"=",
"line",
"[",
"5",
"]",
"pred",
"=",
"line",
"[",
"7",
"]",
"if",
"float",
"(",
"pred",
")",
">",
"5.00",
"and",
"not",
"normal",
":",
"continue",
"results",
".",
"loc",
"[",
"len",
"(",
"results",
")",
"]",
"=",
"[",
"allele",
",",
"pept",
",",
"pred",
",",
"pept",
"]",
"results",
".",
"drop_duplicates",
"(",
"inplace",
"=",
"True",
")",
"return",
"results"
] | Process the results from running IEDB MHCI binding predictions into a pandas dataframe.
:param str mhc_file: Output file containing netmhciipan mhci:peptide binding predictions
:param bool normal: Is this processing the results of a normal?
:return: Results in a tabular format
:rtype: pandas.DataFrame | [
"Process",
"the",
"results",
"from",
"running",
"IEDB",
"MHCI",
"binding",
"predictions",
"into",
"a",
"pandas",
"dataframe",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/binding_prediction/common.py#L281-L305 | train |
BD2KGenomics/protect | src/protect/binding_prediction/common.py | pept_diff | def pept_diff(p1, p2):
"""
Return the number of differences betweeen 2 peptides
:param str p1: Peptide 1
:param str p2: Peptide 2
:return: The number of differences between the pepetides
:rtype: int
>>> pept_diff('ABCDE', 'ABCDF')
1
>>> pept_diff('ABCDE', 'ABDFE')
2
>>> pept_diff('ABCDE', 'EDCBA')
4
>>> pept_diff('ABCDE', 'ABCDE')
0
"""
if len(p1) != len(p2):
return -1
else:
return sum([p1[i] != p2[i] for i in range(len(p1))]) | python | def pept_diff(p1, p2):
"""
Return the number of differences betweeen 2 peptides
:param str p1: Peptide 1
:param str p2: Peptide 2
:return: The number of differences between the pepetides
:rtype: int
>>> pept_diff('ABCDE', 'ABCDF')
1
>>> pept_diff('ABCDE', 'ABDFE')
2
>>> pept_diff('ABCDE', 'EDCBA')
4
>>> pept_diff('ABCDE', 'ABCDE')
0
"""
if len(p1) != len(p2):
return -1
else:
return sum([p1[i] != p2[i] for i in range(len(p1))]) | [
"def",
"pept_diff",
"(",
"p1",
",",
"p2",
")",
":",
"if",
"len",
"(",
"p1",
")",
"!=",
"len",
"(",
"p2",
")",
":",
"return",
"-",
"1",
"else",
":",
"return",
"sum",
"(",
"[",
"p1",
"[",
"i",
"]",
"!=",
"p2",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"p1",
")",
")",
"]",
")"
] | Return the number of differences betweeen 2 peptides
:param str p1: Peptide 1
:param str p2: Peptide 2
:return: The number of differences between the pepetides
:rtype: int
>>> pept_diff('ABCDE', 'ABCDF')
1
>>> pept_diff('ABCDE', 'ABDFE')
2
>>> pept_diff('ABCDE', 'EDCBA')
4
>>> pept_diff('ABCDE', 'ABCDE')
0 | [
"Return",
"the",
"number",
"of",
"differences",
"betweeen",
"2",
"peptides"
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/binding_prediction/common.py#L308-L329 | train |
BD2KGenomics/protect | src/protect/binding_prediction/common.py | print_mhc_peptide | def print_mhc_peptide(neoepitope_info, peptides, pepmap, outfile, netmhc=False):
"""
Accept data about one neoepitope from merge_mhc_peptide_calls and print it to outfile. This is
a generic module to reduce code redundancy.
:param pandas.core.frame neoepitope_info: object containing with allele, pept, pred, core,
normal_pept, normal_pred
:param dict peptides: Dict of pepname: pep sequence for all IARS considered
:param dict pepmap: Dict containing teh contents from the peptide map file.
:param file outfile: An open file descriptor to the output file
:param bool netmhc: Does this record correspond to a netmhcIIpan record? These are processed
differently.
"""
if netmhc:
peptide_names = [neoepitope_info.peptide_name]
else:
peptide_names = [x for x, y in peptides.items() if neoepitope_info.pept in y]
# Convert named tuple to dict so it can be modified
neoepitope_info = neoepitope_info._asdict()
# Handle fusion peptides (They are characterized by having all N's as the normal partner)
if neoepitope_info['normal_pept'] == 'N' * len(neoepitope_info['pept']):
neoepitope_info['normal_pept'] = neoepitope_info['normal_pred'] = 'NA'
# For each peptide, append the ensembl gene
for peptide_name in peptide_names:
print('{ni[allele]}\t'
'{ni[pept]}\t'
'{ni[normal_pept]}\t'
'{pname}\t'
'{ni[core]}\t'
'0\t'
'{ni[tumor_pred]}\t'
'{ni[normal_pred]}\t'
'{pmap}'.format(ni=neoepitope_info, pname=peptide_name,
pmap=pepmap[peptide_name]), file=outfile)
return None | python | def print_mhc_peptide(neoepitope_info, peptides, pepmap, outfile, netmhc=False):
"""
Accept data about one neoepitope from merge_mhc_peptide_calls and print it to outfile. This is
a generic module to reduce code redundancy.
:param pandas.core.frame neoepitope_info: object containing with allele, pept, pred, core,
normal_pept, normal_pred
:param dict peptides: Dict of pepname: pep sequence for all IARS considered
:param dict pepmap: Dict containing teh contents from the peptide map file.
:param file outfile: An open file descriptor to the output file
:param bool netmhc: Does this record correspond to a netmhcIIpan record? These are processed
differently.
"""
if netmhc:
peptide_names = [neoepitope_info.peptide_name]
else:
peptide_names = [x for x, y in peptides.items() if neoepitope_info.pept in y]
# Convert named tuple to dict so it can be modified
neoepitope_info = neoepitope_info._asdict()
# Handle fusion peptides (They are characterized by having all N's as the normal partner)
if neoepitope_info['normal_pept'] == 'N' * len(neoepitope_info['pept']):
neoepitope_info['normal_pept'] = neoepitope_info['normal_pred'] = 'NA'
# For each peptide, append the ensembl gene
for peptide_name in peptide_names:
print('{ni[allele]}\t'
'{ni[pept]}\t'
'{ni[normal_pept]}\t'
'{pname}\t'
'{ni[core]}\t'
'0\t'
'{ni[tumor_pred]}\t'
'{ni[normal_pred]}\t'
'{pmap}'.format(ni=neoepitope_info, pname=peptide_name,
pmap=pepmap[peptide_name]), file=outfile)
return None | [
"def",
"print_mhc_peptide",
"(",
"neoepitope_info",
",",
"peptides",
",",
"pepmap",
",",
"outfile",
",",
"netmhc",
"=",
"False",
")",
":",
"if",
"netmhc",
":",
"peptide_names",
"=",
"[",
"neoepitope_info",
".",
"peptide_name",
"]",
"else",
":",
"peptide_names",
"=",
"[",
"x",
"for",
"x",
",",
"y",
"in",
"peptides",
".",
"items",
"(",
")",
"if",
"neoepitope_info",
".",
"pept",
"in",
"y",
"]",
"neoepitope_info",
"=",
"neoepitope_info",
".",
"_asdict",
"(",
")",
"if",
"neoepitope_info",
"[",
"'normal_pept'",
"]",
"==",
"'N'",
"*",
"len",
"(",
"neoepitope_info",
"[",
"'pept'",
"]",
")",
":",
"neoepitope_info",
"[",
"'normal_pept'",
"]",
"=",
"neoepitope_info",
"[",
"'normal_pred'",
"]",
"=",
"'NA'",
"for",
"peptide_name",
"in",
"peptide_names",
":",
"print",
"(",
"'{ni[allele]}\\t'",
"'{ni[pept]}\\t'",
"'{ni[normal_pept]}\\t'",
"'{pname}\\t'",
"'{ni[core]}\\t'",
"'0\\t'",
"'{ni[tumor_pred]}\\t'",
"'{ni[normal_pred]}\\t'",
"'{pmap}'",
".",
"format",
"(",
"ni",
"=",
"neoepitope_info",
",",
"pname",
"=",
"peptide_name",
",",
"pmap",
"=",
"pepmap",
"[",
"peptide_name",
"]",
")",
",",
"file",
"=",
"outfile",
")",
"return",
"None"
] | Accept data about one neoepitope from merge_mhc_peptide_calls and print it to outfile. This is
a generic module to reduce code redundancy.
:param pandas.core.frame neoepitope_info: object containing with allele, pept, pred, core,
normal_pept, normal_pred
:param dict peptides: Dict of pepname: pep sequence for all IARS considered
:param dict pepmap: Dict containing teh contents from the peptide map file.
:param file outfile: An open file descriptor to the output file
:param bool netmhc: Does this record correspond to a netmhcIIpan record? These are processed
differently. | [
"Accept",
"data",
"about",
"one",
"neoepitope",
"from",
"merge_mhc_peptide_calls",
"and",
"print",
"it",
"to",
"outfile",
".",
"This",
"is",
"a",
"generic",
"module",
"to",
"reduce",
"code",
"redundancy",
"."
] | 06310682c50dcf8917b912c8e551299ff7ee41ce | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/binding_prediction/common.py#L581-L615 | train |
rs/domcheck | domcheck/__init__.py | check | def check(domain, prefix, code, strategies='*'):
"""
Check the ownership of a domain by going thru a serie of strategies.
If at least one strategy succeed, the domain is considered verified,
and this methods returns true.
The prefix is a fixed DNS safe string like "yourservice-domain-verification"
and the code is a random value associated to this domain. It is advised to
prefix the code by a fixed value that is unique to your service like
"yourservice2k3dWdk9dwz".
By default all available strategies are tested. You can limit the check
to a limited set of strategies by passing a comma separated list of
strategy names like "nds_txt,dns_cname". See the "strategies" module
for a full list of avaialble strategies.
"""
if strategies == '*' or 'dns_txt' in strategies:
if check_dns_txt(domain, prefix, code):
return True
if strategies == '*' or 'dns_cname' in strategies:
if check_dns_cname(domain, prefix, code):
return True
if strategies == '*' or 'meta_tag' in strategies:
if check_meta_tag(domain, prefix, code):
return True
if strategies == '*' or 'html_file' in strategies:
if check_html_file(domain, prefix, code):
return True
return False | python | def check(domain, prefix, code, strategies='*'):
"""
Check the ownership of a domain by going thru a serie of strategies.
If at least one strategy succeed, the domain is considered verified,
and this methods returns true.
The prefix is a fixed DNS safe string like "yourservice-domain-verification"
and the code is a random value associated to this domain. It is advised to
prefix the code by a fixed value that is unique to your service like
"yourservice2k3dWdk9dwz".
By default all available strategies are tested. You can limit the check
to a limited set of strategies by passing a comma separated list of
strategy names like "nds_txt,dns_cname". See the "strategies" module
for a full list of avaialble strategies.
"""
if strategies == '*' or 'dns_txt' in strategies:
if check_dns_txt(domain, prefix, code):
return True
if strategies == '*' or 'dns_cname' in strategies:
if check_dns_cname(domain, prefix, code):
return True
if strategies == '*' or 'meta_tag' in strategies:
if check_meta_tag(domain, prefix, code):
return True
if strategies == '*' or 'html_file' in strategies:
if check_html_file(domain, prefix, code):
return True
return False | [
"def",
"check",
"(",
"domain",
",",
"prefix",
",",
"code",
",",
"strategies",
"=",
"'*'",
")",
":",
"if",
"strategies",
"==",
"'*'",
"or",
"'dns_txt'",
"in",
"strategies",
":",
"if",
"check_dns_txt",
"(",
"domain",
",",
"prefix",
",",
"code",
")",
":",
"return",
"True",
"if",
"strategies",
"==",
"'*'",
"or",
"'dns_cname'",
"in",
"strategies",
":",
"if",
"check_dns_cname",
"(",
"domain",
",",
"prefix",
",",
"code",
")",
":",
"return",
"True",
"if",
"strategies",
"==",
"'*'",
"or",
"'meta_tag'",
"in",
"strategies",
":",
"if",
"check_meta_tag",
"(",
"domain",
",",
"prefix",
",",
"code",
")",
":",
"return",
"True",
"if",
"strategies",
"==",
"'*'",
"or",
"'html_file'",
"in",
"strategies",
":",
"if",
"check_html_file",
"(",
"domain",
",",
"prefix",
",",
"code",
")",
":",
"return",
"True",
"return",
"False"
] | Check the ownership of a domain by going thru a serie of strategies.
If at least one strategy succeed, the domain is considered verified,
and this methods returns true.
The prefix is a fixed DNS safe string like "yourservice-domain-verification"
and the code is a random value associated to this domain. It is advised to
prefix the code by a fixed value that is unique to your service like
"yourservice2k3dWdk9dwz".
By default all available strategies are tested. You can limit the check
to a limited set of strategies by passing a comma separated list of
strategy names like "nds_txt,dns_cname". See the "strategies" module
for a full list of avaialble strategies. | [
"Check",
"the",
"ownership",
"of",
"a",
"domain",
"by",
"going",
"thru",
"a",
"serie",
"of",
"strategies",
".",
"If",
"at",
"least",
"one",
"strategy",
"succeed",
"the",
"domain",
"is",
"considered",
"verified",
"and",
"this",
"methods",
"returns",
"true",
"."
] | 43e10c345320564a1236778e8577e2b8ef825925 | https://github.com/rs/domcheck/blob/43e10c345320564a1236778e8577e2b8ef825925/domcheck/__init__.py#L6-L34 | train |
daxlab/Flask-Cache-Buster | flask_cache_buster/__init__.py | CacheBuster.register_cache_buster | def register_cache_buster(self, app, config=None):
"""
Register `app` in cache buster so that `url_for` adds a unique prefix
to URLs generated for the `'static'` endpoint. Also make the app able
to serve cache-busted static files.
This allows setting long cache expiration values on static resources
because whenever the resource changes, so does its URL.
"""
if not (config is None or isinstance(config, dict)):
raise ValueError("`config` must be an instance of dict or None")
bust_map = {} # map from an unbusted filename to a busted one
unbust_map = {} # map from a busted filename to an unbusted one
# http://flask.pocoo.org/docs/0.12/api/#flask.Flask.static_folder
app.logger.debug('Starting computing hashes for static assets')
# compute (un)bust tables.
for dirpath, dirnames, filenames in os.walk(app.static_folder):
for filename in filenames:
# compute version component
rooted_filename = os.path.join(dirpath, filename)
if not self.__is_file_to_be_busted(rooted_filename):
continue
app.logger.debug(f'Computing hashes for {rooted_filename}')
with open(rooted_filename, 'rb') as f:
version = hashlib.md5(
f.read()
).hexdigest()[:self.hash_size]
# add version
unbusted = os.path.relpath(rooted_filename, app.static_folder)
# busted = os.path.join(version, unbusted)
busted = f"{unbusted}?q={version}"
# save computation to map
bust_map[unbusted] = busted
unbust_map[busted] = unbusted
app.logger.debug('Finished Starting computing hashes for static assets')
def bust_filename(file):
return bust_map.get(file, file)
def unbust_filename(file):
return unbust_map.get(file, file)
@app.url_defaults
def reverse_to_cache_busted_url(endpoint, values):
"""
Make `url_for` produce busted filenames when using the 'static'
endpoint.
"""
if endpoint == 'static':
values['filename'] = bust_filename(values['filename'])
def debusting_static_view(*args, **kwargs):
"""
Serve a request for a static file having a busted name.
"""
kwargs['filename'] = unbust_filename(kwargs.get('filename'))
return original_static_view(*args, **kwargs)
# Replace the default static file view with our debusting view.
original_static_view = app.view_functions['static']
app.view_functions['static'] = debusting_static_view | python | def register_cache_buster(self, app, config=None):
"""
Register `app` in cache buster so that `url_for` adds a unique prefix
to URLs generated for the `'static'` endpoint. Also make the app able
to serve cache-busted static files.
This allows setting long cache expiration values on static resources
because whenever the resource changes, so does its URL.
"""
if not (config is None or isinstance(config, dict)):
raise ValueError("`config` must be an instance of dict or None")
bust_map = {} # map from an unbusted filename to a busted one
unbust_map = {} # map from a busted filename to an unbusted one
# http://flask.pocoo.org/docs/0.12/api/#flask.Flask.static_folder
app.logger.debug('Starting computing hashes for static assets')
# compute (un)bust tables.
for dirpath, dirnames, filenames in os.walk(app.static_folder):
for filename in filenames:
# compute version component
rooted_filename = os.path.join(dirpath, filename)
if not self.__is_file_to_be_busted(rooted_filename):
continue
app.logger.debug(f'Computing hashes for {rooted_filename}')
with open(rooted_filename, 'rb') as f:
version = hashlib.md5(
f.read()
).hexdigest()[:self.hash_size]
# add version
unbusted = os.path.relpath(rooted_filename, app.static_folder)
# busted = os.path.join(version, unbusted)
busted = f"{unbusted}?q={version}"
# save computation to map
bust_map[unbusted] = busted
unbust_map[busted] = unbusted
app.logger.debug('Finished Starting computing hashes for static assets')
def bust_filename(file):
return bust_map.get(file, file)
def unbust_filename(file):
return unbust_map.get(file, file)
@app.url_defaults
def reverse_to_cache_busted_url(endpoint, values):
"""
Make `url_for` produce busted filenames when using the 'static'
endpoint.
"""
if endpoint == 'static':
values['filename'] = bust_filename(values['filename'])
def debusting_static_view(*args, **kwargs):
"""
Serve a request for a static file having a busted name.
"""
kwargs['filename'] = unbust_filename(kwargs.get('filename'))
return original_static_view(*args, **kwargs)
# Replace the default static file view with our debusting view.
original_static_view = app.view_functions['static']
app.view_functions['static'] = debusting_static_view | [
"def",
"register_cache_buster",
"(",
"self",
",",
"app",
",",
"config",
"=",
"None",
")",
":",
"if",
"not",
"(",
"config",
"is",
"None",
"or",
"isinstance",
"(",
"config",
",",
"dict",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"`config` must be an instance of dict or None\"",
")",
"bust_map",
"=",
"{",
"}",
"unbust_map",
"=",
"{",
"}",
"app",
".",
"logger",
".",
"debug",
"(",
"'Starting computing hashes for static assets'",
")",
"for",
"dirpath",
",",
"dirnames",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"app",
".",
"static_folder",
")",
":",
"for",
"filename",
"in",
"filenames",
":",
"rooted_filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dirpath",
",",
"filename",
")",
"if",
"not",
"self",
".",
"__is_file_to_be_busted",
"(",
"rooted_filename",
")",
":",
"continue",
"app",
".",
"logger",
".",
"debug",
"(",
"f'Computing hashes for {rooted_filename}'",
")",
"with",
"open",
"(",
"rooted_filename",
",",
"'rb'",
")",
"as",
"f",
":",
"version",
"=",
"hashlib",
".",
"md5",
"(",
"f",
".",
"read",
"(",
")",
")",
".",
"hexdigest",
"(",
")",
"[",
":",
"self",
".",
"hash_size",
"]",
"unbusted",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"rooted_filename",
",",
"app",
".",
"static_folder",
")",
"busted",
"=",
"f\"{unbusted}?q={version}\"",
"bust_map",
"[",
"unbusted",
"]",
"=",
"busted",
"unbust_map",
"[",
"busted",
"]",
"=",
"unbusted",
"app",
".",
"logger",
".",
"debug",
"(",
"'Finished Starting computing hashes for static assets'",
")",
"def",
"bust_filename",
"(",
"file",
")",
":",
"return",
"bust_map",
".",
"get",
"(",
"file",
",",
"file",
")",
"def",
"unbust_filename",
"(",
"file",
")",
":",
"return",
"unbust_map",
".",
"get",
"(",
"file",
",",
"file",
")",
"@",
"app",
".",
"url_defaults",
"def",
"reverse_to_cache_busted_url",
"(",
"endpoint",
",",
"values",
")",
":",
"if",
"endpoint",
"==",
"'static'",
":",
"values",
"[",
"'filename'",
"]",
"=",
"bust_filename",
"(",
"values",
"[",
"'filename'",
"]",
")",
"def",
"debusting_static_view",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"kwargs",
"[",
"'filename'",
"]",
"=",
"unbust_filename",
"(",
"kwargs",
".",
"get",
"(",
"'filename'",
")",
")",
"return",
"original_static_view",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"original_static_view",
"=",
"app",
".",
"view_functions",
"[",
"'static'",
"]",
"app",
".",
"view_functions",
"[",
"'static'",
"]",
"=",
"debusting_static_view"
] | Register `app` in cache buster so that `url_for` adds a unique prefix
to URLs generated for the `'static'` endpoint. Also make the app able
to serve cache-busted static files.
This allows setting long cache expiration values on static resources
because whenever the resource changes, so does its URL. | [
"Register",
"app",
"in",
"cache",
"buster",
"so",
"that",
"url_for",
"adds",
"a",
"unique",
"prefix",
"to",
"URLs",
"generated",
"for",
"the",
"static",
"endpoint",
".",
"Also",
"make",
"the",
"app",
"able",
"to",
"serve",
"cache",
"-",
"busted",
"static",
"files",
"."
] | 4c10bed9ab46020904df565a9c0014a7f2e4f6b3 | https://github.com/daxlab/Flask-Cache-Buster/blob/4c10bed9ab46020904df565a9c0014a7f2e4f6b3/flask_cache_buster/__init__.py#L29-L93 | train |
theherk/figgypy | figgypy/utils.py | env_or_default | def env_or_default(var, default=None):
"""Get environment variable or provide default.
Args:
var (str): environment variable to search for
default (optional(str)): default to return
"""
if var in os.environ:
return os.environ[var]
return default | python | def env_or_default(var, default=None):
"""Get environment variable or provide default.
Args:
var (str): environment variable to search for
default (optional(str)): default to return
"""
if var in os.environ:
return os.environ[var]
return default | [
"def",
"env_or_default",
"(",
"var",
",",
"default",
"=",
"None",
")",
":",
"if",
"var",
"in",
"os",
".",
"environ",
":",
"return",
"os",
".",
"environ",
"[",
"var",
"]",
"return",
"default"
] | Get environment variable or provide default.
Args:
var (str): environment variable to search for
default (optional(str)): default to return | [
"Get",
"environment",
"variable",
"or",
"provide",
"default",
"."
] | 324d1b281a8df20a26b92f42bf7fda0cca892116 | https://github.com/theherk/figgypy/blob/324d1b281a8df20a26b92f42bf7fda0cca892116/figgypy/utils.py#L11-L20 | train |
theherk/figgypy | figgypy/utils.py | kms_encrypt | def kms_encrypt(value, key, aws_config=None):
"""Encrypt and value with KMS key.
Args:
value (str): value to encrypt
key (str): key id or alias
aws_config (optional[dict]): aws credentials
dict of arguments passed into boto3 session
example:
aws_creds = {'aws_access_key_id': aws_access_key_id,
'aws_secret_access_key': aws_secret_access_key,
'region_name': 'us-east-1'}
Returns:
str: encrypted cipher text
"""
aws_config = aws_config or {}
aws = boto3.session.Session(**aws_config)
client = aws.client('kms')
enc_res = client.encrypt(KeyId=key,
Plaintext=value)
return n(b64encode(enc_res['CiphertextBlob'])) | python | def kms_encrypt(value, key, aws_config=None):
"""Encrypt and value with KMS key.
Args:
value (str): value to encrypt
key (str): key id or alias
aws_config (optional[dict]): aws credentials
dict of arguments passed into boto3 session
example:
aws_creds = {'aws_access_key_id': aws_access_key_id,
'aws_secret_access_key': aws_secret_access_key,
'region_name': 'us-east-1'}
Returns:
str: encrypted cipher text
"""
aws_config = aws_config or {}
aws = boto3.session.Session(**aws_config)
client = aws.client('kms')
enc_res = client.encrypt(KeyId=key,
Plaintext=value)
return n(b64encode(enc_res['CiphertextBlob'])) | [
"def",
"kms_encrypt",
"(",
"value",
",",
"key",
",",
"aws_config",
"=",
"None",
")",
":",
"aws_config",
"=",
"aws_config",
"or",
"{",
"}",
"aws",
"=",
"boto3",
".",
"session",
".",
"Session",
"(",
"**",
"aws_config",
")",
"client",
"=",
"aws",
".",
"client",
"(",
"'kms'",
")",
"enc_res",
"=",
"client",
".",
"encrypt",
"(",
"KeyId",
"=",
"key",
",",
"Plaintext",
"=",
"value",
")",
"return",
"n",
"(",
"b64encode",
"(",
"enc_res",
"[",
"'CiphertextBlob'",
"]",
")",
")"
] | Encrypt and value with KMS key.
Args:
value (str): value to encrypt
key (str): key id or alias
aws_config (optional[dict]): aws credentials
dict of arguments passed into boto3 session
example:
aws_creds = {'aws_access_key_id': aws_access_key_id,
'aws_secret_access_key': aws_secret_access_key,
'region_name': 'us-east-1'}
Returns:
str: encrypted cipher text | [
"Encrypt",
"and",
"value",
"with",
"KMS",
"key",
"."
] | 324d1b281a8df20a26b92f42bf7fda0cca892116 | https://github.com/theherk/figgypy/blob/324d1b281a8df20a26b92f42bf7fda0cca892116/figgypy/utils.py#L23-L44 | train |
theherk/figgypy | figgypy/__init__.py | get_value | def get_value(*args, **kwargs):
"""Get from config object by exposing Config.get_value method.
dict.get() method on Config.values
"""
global _config
if _config is None:
raise ValueError('configuration not set; must run figgypy.set_config first')
return _config.get_value(*args, **kwargs) | python | def get_value(*args, **kwargs):
"""Get from config object by exposing Config.get_value method.
dict.get() method on Config.values
"""
global _config
if _config is None:
raise ValueError('configuration not set; must run figgypy.set_config first')
return _config.get_value(*args, **kwargs) | [
"def",
"get_value",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"global",
"_config",
"if",
"_config",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'configuration not set; must run figgypy.set_config first'",
")",
"return",
"_config",
".",
"get_value",
"(",
"*",
"args",
",",
"**",
"kwargs",
")"
] | Get from config object by exposing Config.get_value method.
dict.get() method on Config.values | [
"Get",
"from",
"config",
"object",
"by",
"exposing",
"Config",
".",
"get_value",
"method",
"."
] | 324d1b281a8df20a26b92f42bf7fda0cca892116 | https://github.com/theherk/figgypy/blob/324d1b281a8df20a26b92f42bf7fda0cca892116/figgypy/__init__.py#L27-L35 | train |
theherk/figgypy | figgypy/__init__.py | set_value | def set_value(*args, **kwargs):
"""Set value in the global Config object."""
global _config
if _config is None:
raise ValueError('configuration not set; must run figgypy.set_config first')
return _config.set_value(*args, **kwargs) | python | def set_value(*args, **kwargs):
"""Set value in the global Config object."""
global _config
if _config is None:
raise ValueError('configuration not set; must run figgypy.set_config first')
return _config.set_value(*args, **kwargs) | [
"def",
"set_value",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"global",
"_config",
"if",
"_config",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'configuration not set; must run figgypy.set_config first'",
")",
"return",
"_config",
".",
"set_value",
"(",
"*",
"args",
",",
"**",
"kwargs",
")"
] | Set value in the global Config object. | [
"Set",
"value",
"in",
"the",
"global",
"Config",
"object",
"."
] | 324d1b281a8df20a26b92f42bf7fda0cca892116 | https://github.com/theherk/figgypy/blob/324d1b281a8df20a26b92f42bf7fda0cca892116/figgypy/__init__.py#L73-L78 | train |
Grk0/python-libconf | libconf.py | decode_escapes | def decode_escapes(s):
'''Unescape libconfig string literals'''
def decode_match(match):
return codecs.decode(match.group(0), 'unicode-escape')
return ESCAPE_SEQUENCE_RE.sub(decode_match, s) | python | def decode_escapes(s):
'''Unescape libconfig string literals'''
def decode_match(match):
return codecs.decode(match.group(0), 'unicode-escape')
return ESCAPE_SEQUENCE_RE.sub(decode_match, s) | [
"def",
"decode_escapes",
"(",
"s",
")",
":",
"def",
"decode_match",
"(",
"match",
")",
":",
"return",
"codecs",
".",
"decode",
"(",
"match",
".",
"group",
"(",
"0",
")",
",",
"'unicode-escape'",
")",
"return",
"ESCAPE_SEQUENCE_RE",
".",
"sub",
"(",
"decode_match",
",",
"s",
")"
] | Unescape libconfig string literals | [
"Unescape",
"libconfig",
"string",
"literals"
] | 9c4cf5f56d56ebbc1fe0e1596807218b7d5d5da4 | https://github.com/Grk0/python-libconf/blob/9c4cf5f56d56ebbc1fe0e1596807218b7d5d5da4/libconf.py#L50-L55 | train |
Grk0/python-libconf | libconf.py | loads | def loads(string, filename=None, includedir=''):
'''Load the contents of ``string`` to a Python object
The returned object is a subclass of ``dict`` that exposes string keys as
attributes as well.
Example:
>>> config = libconf.loads('window: { title: "libconfig example"; };')
>>> config['window']['title']
'libconfig example'
>>> config.window.title
'libconfig example'
'''
try:
f = io.StringIO(string)
except TypeError:
raise TypeError("libconf.loads() input string must by unicode")
return load(f, filename=filename, includedir=includedir) | python | def loads(string, filename=None, includedir=''):
'''Load the contents of ``string`` to a Python object
The returned object is a subclass of ``dict`` that exposes string keys as
attributes as well.
Example:
>>> config = libconf.loads('window: { title: "libconfig example"; };')
>>> config['window']['title']
'libconfig example'
>>> config.window.title
'libconfig example'
'''
try:
f = io.StringIO(string)
except TypeError:
raise TypeError("libconf.loads() input string must by unicode")
return load(f, filename=filename, includedir=includedir) | [
"def",
"loads",
"(",
"string",
",",
"filename",
"=",
"None",
",",
"includedir",
"=",
"''",
")",
":",
"try",
":",
"f",
"=",
"io",
".",
"StringIO",
"(",
"string",
")",
"except",
"TypeError",
":",
"raise",
"TypeError",
"(",
"\"libconf.loads() input string must by unicode\"",
")",
"return",
"load",
"(",
"f",
",",
"filename",
"=",
"filename",
",",
"includedir",
"=",
"includedir",
")"
] | Load the contents of ``string`` to a Python object
The returned object is a subclass of ``dict`` that exposes string keys as
attributes as well.
Example:
>>> config = libconf.loads('window: { title: "libconfig example"; };')
>>> config['window']['title']
'libconfig example'
>>> config.window.title
'libconfig example' | [
"Load",
"the",
"contents",
"of",
"string",
"to",
"a",
"Python",
"object"
] | 9c4cf5f56d56ebbc1fe0e1596807218b7d5d5da4 | https://github.com/Grk0/python-libconf/blob/9c4cf5f56d56ebbc1fe0e1596807218b7d5d5da4/libconf.py#L501-L521 | train |
Grk0/python-libconf | libconf.py | dump_string | def dump_string(s):
'''Stringize ``s``, adding double quotes and escaping as necessary
Backslash escape backslashes, double quotes, ``\f``, ``\n``, ``\r``, and
``\t``. Escape all remaining unprintable characters in ``\xFF``-style.
The returned string will be surrounded by double quotes.
'''
s = (s.replace('\\', '\\\\')
.replace('"', '\\"')
.replace('\f', r'\f')
.replace('\n', r'\n')
.replace('\r', r'\r')
.replace('\t', r'\t'))
s = UNPRINTABLE_CHARACTER_RE.sub(
lambda m: r'\x{:02x}'.format(ord(m.group(0))),
s)
return '"' + s + '"' | python | def dump_string(s):
'''Stringize ``s``, adding double quotes and escaping as necessary
Backslash escape backslashes, double quotes, ``\f``, ``\n``, ``\r``, and
``\t``. Escape all remaining unprintable characters in ``\xFF``-style.
The returned string will be surrounded by double quotes.
'''
s = (s.replace('\\', '\\\\')
.replace('"', '\\"')
.replace('\f', r'\f')
.replace('\n', r'\n')
.replace('\r', r'\r')
.replace('\t', r'\t'))
s = UNPRINTABLE_CHARACTER_RE.sub(
lambda m: r'\x{:02x}'.format(ord(m.group(0))),
s)
return '"' + s + '"' | [
"def",
"dump_string",
"(",
"s",
")",
":",
"s",
"=",
"(",
"s",
".",
"replace",
"(",
"'\\\\'",
",",
"'\\\\\\\\'",
")",
".",
"replace",
"(",
"'\"'",
",",
"'\\\\\"'",
")",
".",
"replace",
"(",
"'\\f'",
",",
"r'\\f'",
")",
".",
"replace",
"(",
"'\\n'",
",",
"r'\\n'",
")",
".",
"replace",
"(",
"'\\r'",
",",
"r'\\r'",
")",
".",
"replace",
"(",
"'\\t'",
",",
"r'\\t'",
")",
")",
"s",
"=",
"UNPRINTABLE_CHARACTER_RE",
".",
"sub",
"(",
"lambda",
"m",
":",
"r'\\x{:02x}'",
".",
"format",
"(",
"ord",
"(",
"m",
".",
"group",
"(",
"0",
")",
")",
")",
",",
"s",
")",
"return",
"'\"'",
"+",
"s",
"+",
"'\"'"
] | Stringize ``s``, adding double quotes and escaping as necessary
Backslash escape backslashes, double quotes, ``\f``, ``\n``, ``\r``, and
``\t``. Escape all remaining unprintable characters in ``\xFF``-style.
The returned string will be surrounded by double quotes. | [
"Stringize",
"s",
"adding",
"double",
"quotes",
"and",
"escaping",
"as",
"necessary"
] | 9c4cf5f56d56ebbc1fe0e1596807218b7d5d5da4 | https://github.com/Grk0/python-libconf/blob/9c4cf5f56d56ebbc1fe0e1596807218b7d5d5da4/libconf.py#L555-L572 | train |
Grk0/python-libconf | libconf.py | get_dump_type | def get_dump_type(value):
'''Get the libconfig datatype of a value
Return values: ``'d'`` (dict), ``'l'`` (list), ``'a'`` (array),
``'i'`` (integer), ``'i64'`` (long integer), ``'b'`` (bool),
``'f'`` (float), or ``'s'`` (string).
Produces the proper type for LibconfList, LibconfArray, LibconfInt64
instances.
'''
if isinstance(value, dict):
return 'd'
if isinstance(value, tuple):
return 'l'
if isinstance(value, list):
return 'a'
# Test bool before int since isinstance(True, int) == True.
if isinstance(value, bool):
return 'b'
if isint(value):
if is_long_int(value):
return 'i64'
else:
return 'i'
if isinstance(value, float):
return 'f'
if isstr(value):
return 's'
return None | python | def get_dump_type(value):
'''Get the libconfig datatype of a value
Return values: ``'d'`` (dict), ``'l'`` (list), ``'a'`` (array),
``'i'`` (integer), ``'i64'`` (long integer), ``'b'`` (bool),
``'f'`` (float), or ``'s'`` (string).
Produces the proper type for LibconfList, LibconfArray, LibconfInt64
instances.
'''
if isinstance(value, dict):
return 'd'
if isinstance(value, tuple):
return 'l'
if isinstance(value, list):
return 'a'
# Test bool before int since isinstance(True, int) == True.
if isinstance(value, bool):
return 'b'
if isint(value):
if is_long_int(value):
return 'i64'
else:
return 'i'
if isinstance(value, float):
return 'f'
if isstr(value):
return 's'
return None | [
"def",
"get_dump_type",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"return",
"'d'",
"if",
"isinstance",
"(",
"value",
",",
"tuple",
")",
":",
"return",
"'l'",
"if",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"return",
"'a'",
"if",
"isinstance",
"(",
"value",
",",
"bool",
")",
":",
"return",
"'b'",
"if",
"isint",
"(",
"value",
")",
":",
"if",
"is_long_int",
"(",
"value",
")",
":",
"return",
"'i64'",
"else",
":",
"return",
"'i'",
"if",
"isinstance",
"(",
"value",
",",
"float",
")",
":",
"return",
"'f'",
"if",
"isstr",
"(",
"value",
")",
":",
"return",
"'s'",
"return",
"None"
] | Get the libconfig datatype of a value
Return values: ``'d'`` (dict), ``'l'`` (list), ``'a'`` (array),
``'i'`` (integer), ``'i64'`` (long integer), ``'b'`` (bool),
``'f'`` (float), or ``'s'`` (string).
Produces the proper type for LibconfList, LibconfArray, LibconfInt64
instances. | [
"Get",
"the",
"libconfig",
"datatype",
"of",
"a",
"value"
] | 9c4cf5f56d56ebbc1fe0e1596807218b7d5d5da4 | https://github.com/Grk0/python-libconf/blob/9c4cf5f56d56ebbc1fe0e1596807218b7d5d5da4/libconf.py#L575-L606 | train |
Grk0/python-libconf | libconf.py | get_array_value_dtype | def get_array_value_dtype(lst):
'''Return array value type, raise ConfigSerializeError for invalid arrays
Libconfig arrays must only contain scalar values and all elements must be
of the same libconfig data type. Raises ConfigSerializeError if these
invariants are not met.
Returns the value type of the array. If an array contains both int and
long int data types, the return datatype will be ``'i64'``.
'''
array_value_type = None
for value in lst:
dtype = get_dump_type(value)
if dtype not in {'b', 'i', 'i64', 'f', 's'}:
raise ConfigSerializeError(
"Invalid datatype in array (may only contain scalars):"
"%r of type %s" % (value, type(value)))
if array_value_type is None:
array_value_type = dtype
continue
if array_value_type == dtype:
continue
if array_value_type == 'i' and dtype == 'i64':
array_value_type = 'i64'
continue
if array_value_type == 'i64' and dtype == 'i':
continue
raise ConfigSerializeError(
"Mixed types in array (all elements must have same type):"
"%r of type %s" % (value, type(value)))
return array_value_type | python | def get_array_value_dtype(lst):
'''Return array value type, raise ConfigSerializeError for invalid arrays
Libconfig arrays must only contain scalar values and all elements must be
of the same libconfig data type. Raises ConfigSerializeError if these
invariants are not met.
Returns the value type of the array. If an array contains both int and
long int data types, the return datatype will be ``'i64'``.
'''
array_value_type = None
for value in lst:
dtype = get_dump_type(value)
if dtype not in {'b', 'i', 'i64', 'f', 's'}:
raise ConfigSerializeError(
"Invalid datatype in array (may only contain scalars):"
"%r of type %s" % (value, type(value)))
if array_value_type is None:
array_value_type = dtype
continue
if array_value_type == dtype:
continue
if array_value_type == 'i' and dtype == 'i64':
array_value_type = 'i64'
continue
if array_value_type == 'i64' and dtype == 'i':
continue
raise ConfigSerializeError(
"Mixed types in array (all elements must have same type):"
"%r of type %s" % (value, type(value)))
return array_value_type | [
"def",
"get_array_value_dtype",
"(",
"lst",
")",
":",
"array_value_type",
"=",
"None",
"for",
"value",
"in",
"lst",
":",
"dtype",
"=",
"get_dump_type",
"(",
"value",
")",
"if",
"dtype",
"not",
"in",
"{",
"'b'",
",",
"'i'",
",",
"'i64'",
",",
"'f'",
",",
"'s'",
"}",
":",
"raise",
"ConfigSerializeError",
"(",
"\"Invalid datatype in array (may only contain scalars):\"",
"\"%r of type %s\"",
"%",
"(",
"value",
",",
"type",
"(",
"value",
")",
")",
")",
"if",
"array_value_type",
"is",
"None",
":",
"array_value_type",
"=",
"dtype",
"continue",
"if",
"array_value_type",
"==",
"dtype",
":",
"continue",
"if",
"array_value_type",
"==",
"'i'",
"and",
"dtype",
"==",
"'i64'",
":",
"array_value_type",
"=",
"'i64'",
"continue",
"if",
"array_value_type",
"==",
"'i64'",
"and",
"dtype",
"==",
"'i'",
":",
"continue",
"raise",
"ConfigSerializeError",
"(",
"\"Mixed types in array (all elements must have same type):\"",
"\"%r of type %s\"",
"%",
"(",
"value",
",",
"type",
"(",
"value",
")",
")",
")",
"return",
"array_value_type"
] | Return array value type, raise ConfigSerializeError for invalid arrays
Libconfig arrays must only contain scalar values and all elements must be
of the same libconfig data type. Raises ConfigSerializeError if these
invariants are not met.
Returns the value type of the array. If an array contains both int and
long int data types, the return datatype will be ``'i64'``. | [
"Return",
"array",
"value",
"type",
"raise",
"ConfigSerializeError",
"for",
"invalid",
"arrays"
] | 9c4cf5f56d56ebbc1fe0e1596807218b7d5d5da4 | https://github.com/Grk0/python-libconf/blob/9c4cf5f56d56ebbc1fe0e1596807218b7d5d5da4/libconf.py#L609-L646 | train |
Grk0/python-libconf | libconf.py | dump_value | def dump_value(key, value, f, indent=0):
'''Save a value of any libconfig type
This function serializes takes ``key`` and ``value`` and serializes them
into ``f``. If ``key`` is ``None``, a list-style output is produced.
Otherwise, output has ``key = value`` format.
'''
spaces = ' ' * indent
if key is None:
key_prefix = ''
key_prefix_nl = ''
else:
key_prefix = key + ' = '
key_prefix_nl = key + ' =\n' + spaces
dtype = get_dump_type(value)
if dtype == 'd':
f.write(u'{}{}{{\n'.format(spaces, key_prefix_nl))
dump_dict(value, f, indent + 4)
f.write(u'{}}}'.format(spaces))
elif dtype == 'l':
f.write(u'{}{}(\n'.format(spaces, key_prefix_nl))
dump_collection(value, f, indent + 4)
f.write(u'\n{})'.format(spaces))
elif dtype == 'a':
f.write(u'{}{}[\n'.format(spaces, key_prefix_nl))
value_dtype = get_array_value_dtype(value)
# If int array contains one or more Int64, promote all values to i64.
if value_dtype == 'i64':
value = [LibconfInt64(v) for v in value]
dump_collection(value, f, indent + 4)
f.write(u'\n{}]'.format(spaces))
elif dtype == 's':
f.write(u'{}{}{}'.format(spaces, key_prefix, dump_string(value)))
elif dtype == 'i' or dtype == 'i64':
f.write(u'{}{}{}'.format(spaces, key_prefix, dump_int(value)))
elif dtype == 'f' or dtype == 'b':
f.write(u'{}{}{}'.format(spaces, key_prefix, value))
else:
raise ConfigSerializeError("Can not serialize object %r of type %s" %
(value, type(value))) | python | def dump_value(key, value, f, indent=0):
'''Save a value of any libconfig type
This function serializes takes ``key`` and ``value`` and serializes them
into ``f``. If ``key`` is ``None``, a list-style output is produced.
Otherwise, output has ``key = value`` format.
'''
spaces = ' ' * indent
if key is None:
key_prefix = ''
key_prefix_nl = ''
else:
key_prefix = key + ' = '
key_prefix_nl = key + ' =\n' + spaces
dtype = get_dump_type(value)
if dtype == 'd':
f.write(u'{}{}{{\n'.format(spaces, key_prefix_nl))
dump_dict(value, f, indent + 4)
f.write(u'{}}}'.format(spaces))
elif dtype == 'l':
f.write(u'{}{}(\n'.format(spaces, key_prefix_nl))
dump_collection(value, f, indent + 4)
f.write(u'\n{})'.format(spaces))
elif dtype == 'a':
f.write(u'{}{}[\n'.format(spaces, key_prefix_nl))
value_dtype = get_array_value_dtype(value)
# If int array contains one or more Int64, promote all values to i64.
if value_dtype == 'i64':
value = [LibconfInt64(v) for v in value]
dump_collection(value, f, indent + 4)
f.write(u'\n{}]'.format(spaces))
elif dtype == 's':
f.write(u'{}{}{}'.format(spaces, key_prefix, dump_string(value)))
elif dtype == 'i' or dtype == 'i64':
f.write(u'{}{}{}'.format(spaces, key_prefix, dump_int(value)))
elif dtype == 'f' or dtype == 'b':
f.write(u'{}{}{}'.format(spaces, key_prefix, value))
else:
raise ConfigSerializeError("Can not serialize object %r of type %s" %
(value, type(value))) | [
"def",
"dump_value",
"(",
"key",
",",
"value",
",",
"f",
",",
"indent",
"=",
"0",
")",
":",
"spaces",
"=",
"' '",
"*",
"indent",
"if",
"key",
"is",
"None",
":",
"key_prefix",
"=",
"''",
"key_prefix_nl",
"=",
"''",
"else",
":",
"key_prefix",
"=",
"key",
"+",
"' = '",
"key_prefix_nl",
"=",
"key",
"+",
"' =\\n'",
"+",
"spaces",
"dtype",
"=",
"get_dump_type",
"(",
"value",
")",
"if",
"dtype",
"==",
"'d'",
":",
"f",
".",
"write",
"(",
"u'{}{}{{\\n'",
".",
"format",
"(",
"spaces",
",",
"key_prefix_nl",
")",
")",
"dump_dict",
"(",
"value",
",",
"f",
",",
"indent",
"+",
"4",
")",
"f",
".",
"write",
"(",
"u'{}}}'",
".",
"format",
"(",
"spaces",
")",
")",
"elif",
"dtype",
"==",
"'l'",
":",
"f",
".",
"write",
"(",
"u'{}{}(\\n'",
".",
"format",
"(",
"spaces",
",",
"key_prefix_nl",
")",
")",
"dump_collection",
"(",
"value",
",",
"f",
",",
"indent",
"+",
"4",
")",
"f",
".",
"write",
"(",
"u'\\n{})'",
".",
"format",
"(",
"spaces",
")",
")",
"elif",
"dtype",
"==",
"'a'",
":",
"f",
".",
"write",
"(",
"u'{}{}[\\n'",
".",
"format",
"(",
"spaces",
",",
"key_prefix_nl",
")",
")",
"value_dtype",
"=",
"get_array_value_dtype",
"(",
"value",
")",
"if",
"value_dtype",
"==",
"'i64'",
":",
"value",
"=",
"[",
"LibconfInt64",
"(",
"v",
")",
"for",
"v",
"in",
"value",
"]",
"dump_collection",
"(",
"value",
",",
"f",
",",
"indent",
"+",
"4",
")",
"f",
".",
"write",
"(",
"u'\\n{}]'",
".",
"format",
"(",
"spaces",
")",
")",
"elif",
"dtype",
"==",
"'s'",
":",
"f",
".",
"write",
"(",
"u'{}{}{}'",
".",
"format",
"(",
"spaces",
",",
"key_prefix",
",",
"dump_string",
"(",
"value",
")",
")",
")",
"elif",
"dtype",
"==",
"'i'",
"or",
"dtype",
"==",
"'i64'",
":",
"f",
".",
"write",
"(",
"u'{}{}{}'",
".",
"format",
"(",
"spaces",
",",
"key_prefix",
",",
"dump_int",
"(",
"value",
")",
")",
")",
"elif",
"dtype",
"==",
"'f'",
"or",
"dtype",
"==",
"'b'",
":",
"f",
".",
"write",
"(",
"u'{}{}{}'",
".",
"format",
"(",
"spaces",
",",
"key_prefix",
",",
"value",
")",
")",
"else",
":",
"raise",
"ConfigSerializeError",
"(",
"\"Can not serialize object %r of type %s\"",
"%",
"(",
"value",
",",
"type",
"(",
"value",
")",
")",
")"
] | Save a value of any libconfig type
This function serializes takes ``key`` and ``value`` and serializes them
into ``f``. If ``key`` is ``None``, a list-style output is produced.
Otherwise, output has ``key = value`` format. | [
"Save",
"a",
"value",
"of",
"any",
"libconfig",
"type"
] | 9c4cf5f56d56ebbc1fe0e1596807218b7d5d5da4 | https://github.com/Grk0/python-libconf/blob/9c4cf5f56d56ebbc1fe0e1596807218b7d5d5da4/libconf.py#L649-L692 | train |
Grk0/python-libconf | libconf.py | dump_collection | def dump_collection(cfg, f, indent=0):
'''Save a collection of attributes'''
for i, value in enumerate(cfg):
dump_value(None, value, f, indent)
if i < len(cfg) - 1:
f.write(u',\n') | python | def dump_collection(cfg, f, indent=0):
'''Save a collection of attributes'''
for i, value in enumerate(cfg):
dump_value(None, value, f, indent)
if i < len(cfg) - 1:
f.write(u',\n') | [
"def",
"dump_collection",
"(",
"cfg",
",",
"f",
",",
"indent",
"=",
"0",
")",
":",
"for",
"i",
",",
"value",
"in",
"enumerate",
"(",
"cfg",
")",
":",
"dump_value",
"(",
"None",
",",
"value",
",",
"f",
",",
"indent",
")",
"if",
"i",
"<",
"len",
"(",
"cfg",
")",
"-",
"1",
":",
"f",
".",
"write",
"(",
"u',\\n'",
")"
] | Save a collection of attributes | [
"Save",
"a",
"collection",
"of",
"attributes"
] | 9c4cf5f56d56ebbc1fe0e1596807218b7d5d5da4 | https://github.com/Grk0/python-libconf/blob/9c4cf5f56d56ebbc1fe0e1596807218b7d5d5da4/libconf.py#L695-L701 | train |
Grk0/python-libconf | libconf.py | dump_dict | def dump_dict(cfg, f, indent=0):
'''Save a dictionary of attributes'''
for key in cfg:
if not isstr(key):
raise ConfigSerializeError("Dict keys must be strings: %r" %
(key,))
dump_value(key, cfg[key], f, indent)
f.write(u';\n') | python | def dump_dict(cfg, f, indent=0):
'''Save a dictionary of attributes'''
for key in cfg:
if not isstr(key):
raise ConfigSerializeError("Dict keys must be strings: %r" %
(key,))
dump_value(key, cfg[key], f, indent)
f.write(u';\n') | [
"def",
"dump_dict",
"(",
"cfg",
",",
"f",
",",
"indent",
"=",
"0",
")",
":",
"for",
"key",
"in",
"cfg",
":",
"if",
"not",
"isstr",
"(",
"key",
")",
":",
"raise",
"ConfigSerializeError",
"(",
"\"Dict keys must be strings: %r\"",
"%",
"(",
"key",
",",
")",
")",
"dump_value",
"(",
"key",
",",
"cfg",
"[",
"key",
"]",
",",
"f",
",",
"indent",
")",
"f",
".",
"write",
"(",
"u';\\n'",
")"
] | Save a dictionary of attributes | [
"Save",
"a",
"dictionary",
"of",
"attributes"
] | 9c4cf5f56d56ebbc1fe0e1596807218b7d5d5da4 | https://github.com/Grk0/python-libconf/blob/9c4cf5f56d56ebbc1fe0e1596807218b7d5d5da4/libconf.py#L704-L712 | train |
Grk0/python-libconf | libconf.py | dumps | def dumps(cfg):
'''Serialize ``cfg`` into a libconfig-formatted ``str``
``cfg`` must be a ``dict`` with ``str`` keys and libconf-supported values
(numbers, strings, booleans, possibly nested dicts, lists, and tuples).
Returns the formatted string.
'''
str_file = io.StringIO()
dump(cfg, str_file)
return str_file.getvalue() | python | def dumps(cfg):
'''Serialize ``cfg`` into a libconfig-formatted ``str``
``cfg`` must be a ``dict`` with ``str`` keys and libconf-supported values
(numbers, strings, booleans, possibly nested dicts, lists, and tuples).
Returns the formatted string.
'''
str_file = io.StringIO()
dump(cfg, str_file)
return str_file.getvalue() | [
"def",
"dumps",
"(",
"cfg",
")",
":",
"str_file",
"=",
"io",
".",
"StringIO",
"(",
")",
"dump",
"(",
"cfg",
",",
"str_file",
")",
"return",
"str_file",
".",
"getvalue",
"(",
")"
] | Serialize ``cfg`` into a libconfig-formatted ``str``
``cfg`` must be a ``dict`` with ``str`` keys and libconf-supported values
(numbers, strings, booleans, possibly nested dicts, lists, and tuples).
Returns the formatted string. | [
"Serialize",
"cfg",
"into",
"a",
"libconfig",
"-",
"formatted",
"str"
] | 9c4cf5f56d56ebbc1fe0e1596807218b7d5d5da4 | https://github.com/Grk0/python-libconf/blob/9c4cf5f56d56ebbc1fe0e1596807218b7d5d5da4/libconf.py#L715-L726 | train |
Grk0/python-libconf | libconf.py | dump | def dump(cfg, f):
'''Serialize ``cfg`` as a libconfig-formatted stream into ``f``
``cfg`` must be a ``dict`` with ``str`` keys and libconf-supported values
(numbers, strings, booleans, possibly nested dicts, lists, and tuples).
``f`` must be a ``file``-like object with a ``write()`` method.
'''
if not isinstance(cfg, dict):
raise ConfigSerializeError(
'dump() requires a dict as input, not %r of type %r' %
(cfg, type(cfg)))
dump_dict(cfg, f, 0) | python | def dump(cfg, f):
'''Serialize ``cfg`` as a libconfig-formatted stream into ``f``
``cfg`` must be a ``dict`` with ``str`` keys and libconf-supported values
(numbers, strings, booleans, possibly nested dicts, lists, and tuples).
``f`` must be a ``file``-like object with a ``write()`` method.
'''
if not isinstance(cfg, dict):
raise ConfigSerializeError(
'dump() requires a dict as input, not %r of type %r' %
(cfg, type(cfg)))
dump_dict(cfg, f, 0) | [
"def",
"dump",
"(",
"cfg",
",",
"f",
")",
":",
"if",
"not",
"isinstance",
"(",
"cfg",
",",
"dict",
")",
":",
"raise",
"ConfigSerializeError",
"(",
"'dump() requires a dict as input, not %r of type %r'",
"%",
"(",
"cfg",
",",
"type",
"(",
"cfg",
")",
")",
")",
"dump_dict",
"(",
"cfg",
",",
"f",
",",
"0",
")"
] | Serialize ``cfg`` as a libconfig-formatted stream into ``f``
``cfg`` must be a ``dict`` with ``str`` keys and libconf-supported values
(numbers, strings, booleans, possibly nested dicts, lists, and tuples).
``f`` must be a ``file``-like object with a ``write()`` method. | [
"Serialize",
"cfg",
"as",
"a",
"libconfig",
"-",
"formatted",
"stream",
"into",
"f"
] | 9c4cf5f56d56ebbc1fe0e1596807218b7d5d5da4 | https://github.com/Grk0/python-libconf/blob/9c4cf5f56d56ebbc1fe0e1596807218b7d5d5da4/libconf.py#L729-L743 | train |
Grk0/python-libconf | libconf.py | Tokenizer.tokenize | def tokenize(self, string):
'''Yield tokens from the input string or throw ConfigParseError'''
pos = 0
while pos < len(string):
m = SKIP_RE.match(string, pos=pos)
if m:
skip_lines = m.group(0).split('\n')
if len(skip_lines) > 1:
self.row += len(skip_lines) - 1
self.column = 1 + len(skip_lines[-1])
else:
self.column += len(skip_lines[0])
pos = m.end()
continue
for cls, type, regex in self.token_map:
m = regex.match(string, pos=pos)
if m:
yield cls(type, m.group(0),
self.filename, self.row, self.column)
self.column += len(m.group(0))
pos = m.end()
break
else:
raise ConfigParseError(
"Couldn't load config in %r row %d, column %d: %r" %
(self.filename, self.row, self.column,
string[pos:pos+20])) | python | def tokenize(self, string):
'''Yield tokens from the input string or throw ConfigParseError'''
pos = 0
while pos < len(string):
m = SKIP_RE.match(string, pos=pos)
if m:
skip_lines = m.group(0).split('\n')
if len(skip_lines) > 1:
self.row += len(skip_lines) - 1
self.column = 1 + len(skip_lines[-1])
else:
self.column += len(skip_lines[0])
pos = m.end()
continue
for cls, type, regex in self.token_map:
m = regex.match(string, pos=pos)
if m:
yield cls(type, m.group(0),
self.filename, self.row, self.column)
self.column += len(m.group(0))
pos = m.end()
break
else:
raise ConfigParseError(
"Couldn't load config in %r row %d, column %d: %r" %
(self.filename, self.row, self.column,
string[pos:pos+20])) | [
"def",
"tokenize",
"(",
"self",
",",
"string",
")",
":",
"pos",
"=",
"0",
"while",
"pos",
"<",
"len",
"(",
"string",
")",
":",
"m",
"=",
"SKIP_RE",
".",
"match",
"(",
"string",
",",
"pos",
"=",
"pos",
")",
"if",
"m",
":",
"skip_lines",
"=",
"m",
".",
"group",
"(",
"0",
")",
".",
"split",
"(",
"'\\n'",
")",
"if",
"len",
"(",
"skip_lines",
")",
">",
"1",
":",
"self",
".",
"row",
"+=",
"len",
"(",
"skip_lines",
")",
"-",
"1",
"self",
".",
"column",
"=",
"1",
"+",
"len",
"(",
"skip_lines",
"[",
"-",
"1",
"]",
")",
"else",
":",
"self",
".",
"column",
"+=",
"len",
"(",
"skip_lines",
"[",
"0",
"]",
")",
"pos",
"=",
"m",
".",
"end",
"(",
")",
"continue",
"for",
"cls",
",",
"type",
",",
"regex",
"in",
"self",
".",
"token_map",
":",
"m",
"=",
"regex",
".",
"match",
"(",
"string",
",",
"pos",
"=",
"pos",
")",
"if",
"m",
":",
"yield",
"cls",
"(",
"type",
",",
"m",
".",
"group",
"(",
"0",
")",
",",
"self",
".",
"filename",
",",
"self",
".",
"row",
",",
"self",
".",
"column",
")",
"self",
".",
"column",
"+=",
"len",
"(",
"m",
".",
"group",
"(",
"0",
")",
")",
"pos",
"=",
"m",
".",
"end",
"(",
")",
"break",
"else",
":",
"raise",
"ConfigParseError",
"(",
"\"Couldn't load config in %r row %d, column %d: %r\"",
"%",
"(",
"self",
".",
"filename",
",",
"self",
".",
"row",
",",
"self",
".",
"column",
",",
"string",
"[",
"pos",
":",
"pos",
"+",
"20",
"]",
")",
")"
] | Yield tokens from the input string or throw ConfigParseError | [
"Yield",
"tokens",
"from",
"the",
"input",
"string",
"or",
"throw",
"ConfigParseError"
] | 9c4cf5f56d56ebbc1fe0e1596807218b7d5d5da4 | https://github.com/Grk0/python-libconf/blob/9c4cf5f56d56ebbc1fe0e1596807218b7d5d5da4/libconf.py#L178-L206 | train |
Grk0/python-libconf | libconf.py | TokenStream.from_file | def from_file(cls, f, filename=None, includedir='', seenfiles=None):
'''Create a token stream by reading an input file
Read tokens from `f`. If an include directive ('@include "file.cfg"')
is found, read its contents as well.
The `filename` argument is used for error messages and to detect
circular imports. ``includedir`` sets the lookup directory for included
files. ``seenfiles`` is used internally to detect circular includes,
and should normally not be supplied by users of is function.
'''
if filename is None:
filename = getattr(f, 'name', '<unknown>')
if seenfiles is None:
seenfiles = set()
if filename in seenfiles:
raise ConfigParseError("Circular include: %r" % (filename,))
seenfiles = seenfiles | {filename} # Copy seenfiles, don't alter it.
tokenizer = Tokenizer(filename=filename)
lines = []
tokens = []
for line in f:
m = re.match(r'@include "(.*)"$', line.strip())
if m:
tokens.extend(tokenizer.tokenize(''.join(lines)))
lines = [re.sub(r'\S', ' ', line)]
includefilename = decode_escapes(m.group(1))
includefilename = os.path.join(includedir, includefilename)
try:
includefile = open(includefilename, "r")
except IOError:
raise ConfigParseError("Could not open include file %r" %
(includefilename,))
with includefile:
includestream = cls.from_file(includefile,
filename=includefilename,
includedir=includedir,
seenfiles=seenfiles)
tokens.extend(includestream.tokens)
else:
lines.append(line)
tokens.extend(tokenizer.tokenize(''.join(lines)))
return cls(tokens) | python | def from_file(cls, f, filename=None, includedir='', seenfiles=None):
'''Create a token stream by reading an input file
Read tokens from `f`. If an include directive ('@include "file.cfg"')
is found, read its contents as well.
The `filename` argument is used for error messages and to detect
circular imports. ``includedir`` sets the lookup directory for included
files. ``seenfiles`` is used internally to detect circular includes,
and should normally not be supplied by users of is function.
'''
if filename is None:
filename = getattr(f, 'name', '<unknown>')
if seenfiles is None:
seenfiles = set()
if filename in seenfiles:
raise ConfigParseError("Circular include: %r" % (filename,))
seenfiles = seenfiles | {filename} # Copy seenfiles, don't alter it.
tokenizer = Tokenizer(filename=filename)
lines = []
tokens = []
for line in f:
m = re.match(r'@include "(.*)"$', line.strip())
if m:
tokens.extend(tokenizer.tokenize(''.join(lines)))
lines = [re.sub(r'\S', ' ', line)]
includefilename = decode_escapes(m.group(1))
includefilename = os.path.join(includedir, includefilename)
try:
includefile = open(includefilename, "r")
except IOError:
raise ConfigParseError("Could not open include file %r" %
(includefilename,))
with includefile:
includestream = cls.from_file(includefile,
filename=includefilename,
includedir=includedir,
seenfiles=seenfiles)
tokens.extend(includestream.tokens)
else:
lines.append(line)
tokens.extend(tokenizer.tokenize(''.join(lines)))
return cls(tokens) | [
"def",
"from_file",
"(",
"cls",
",",
"f",
",",
"filename",
"=",
"None",
",",
"includedir",
"=",
"''",
",",
"seenfiles",
"=",
"None",
")",
":",
"if",
"filename",
"is",
"None",
":",
"filename",
"=",
"getattr",
"(",
"f",
",",
"'name'",
",",
"'<unknown>'",
")",
"if",
"seenfiles",
"is",
"None",
":",
"seenfiles",
"=",
"set",
"(",
")",
"if",
"filename",
"in",
"seenfiles",
":",
"raise",
"ConfigParseError",
"(",
"\"Circular include: %r\"",
"%",
"(",
"filename",
",",
")",
")",
"seenfiles",
"=",
"seenfiles",
"|",
"{",
"filename",
"}",
"tokenizer",
"=",
"Tokenizer",
"(",
"filename",
"=",
"filename",
")",
"lines",
"=",
"[",
"]",
"tokens",
"=",
"[",
"]",
"for",
"line",
"in",
"f",
":",
"m",
"=",
"re",
".",
"match",
"(",
"r'@include \"(.*)\"$'",
",",
"line",
".",
"strip",
"(",
")",
")",
"if",
"m",
":",
"tokens",
".",
"extend",
"(",
"tokenizer",
".",
"tokenize",
"(",
"''",
".",
"join",
"(",
"lines",
")",
")",
")",
"lines",
"=",
"[",
"re",
".",
"sub",
"(",
"r'\\S'",
",",
"' '",
",",
"line",
")",
"]",
"includefilename",
"=",
"decode_escapes",
"(",
"m",
".",
"group",
"(",
"1",
")",
")",
"includefilename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"includedir",
",",
"includefilename",
")",
"try",
":",
"includefile",
"=",
"open",
"(",
"includefilename",
",",
"\"r\"",
")",
"except",
"IOError",
":",
"raise",
"ConfigParseError",
"(",
"\"Could not open include file %r\"",
"%",
"(",
"includefilename",
",",
")",
")",
"with",
"includefile",
":",
"includestream",
"=",
"cls",
".",
"from_file",
"(",
"includefile",
",",
"filename",
"=",
"includefilename",
",",
"includedir",
"=",
"includedir",
",",
"seenfiles",
"=",
"seenfiles",
")",
"tokens",
".",
"extend",
"(",
"includestream",
".",
"tokens",
")",
"else",
":",
"lines",
".",
"append",
"(",
"line",
")",
"tokens",
".",
"extend",
"(",
"tokenizer",
".",
"tokenize",
"(",
"''",
".",
"join",
"(",
"lines",
")",
")",
")",
"return",
"cls",
"(",
"tokens",
")"
] | Create a token stream by reading an input file
Read tokens from `f`. If an include directive ('@include "file.cfg"')
is found, read its contents as well.
The `filename` argument is used for error messages and to detect
circular imports. ``includedir`` sets the lookup directory for included
files. ``seenfiles`` is used internally to detect circular includes,
and should normally not be supplied by users of is function. | [
"Create",
"a",
"token",
"stream",
"by",
"reading",
"an",
"input",
"file"
] | 9c4cf5f56d56ebbc1fe0e1596807218b7d5d5da4 | https://github.com/Grk0/python-libconf/blob/9c4cf5f56d56ebbc1fe0e1596807218b7d5d5da4/libconf.py#L224-L273 | train |
Grk0/python-libconf | libconf.py | TokenStream.error | def error(self, msg):
'''Raise a ConfigParseError at the current input position'''
if self.finished():
raise ConfigParseError("Unexpected end of input; %s" % (msg,))
else:
t = self.peek()
raise ConfigParseError("Unexpected token %s; %s" % (t, msg)) | python | def error(self, msg):
'''Raise a ConfigParseError at the current input position'''
if self.finished():
raise ConfigParseError("Unexpected end of input; %s" % (msg,))
else:
t = self.peek()
raise ConfigParseError("Unexpected token %s; %s" % (t, msg)) | [
"def",
"error",
"(",
"self",
",",
"msg",
")",
":",
"if",
"self",
".",
"finished",
"(",
")",
":",
"raise",
"ConfigParseError",
"(",
"\"Unexpected end of input; %s\"",
"%",
"(",
"msg",
",",
")",
")",
"else",
":",
"t",
"=",
"self",
".",
"peek",
"(",
")",
"raise",
"ConfigParseError",
"(",
"\"Unexpected token %s; %s\"",
"%",
"(",
"t",
",",
"msg",
")",
")"
] | Raise a ConfigParseError at the current input position | [
"Raise",
"a",
"ConfigParseError",
"at",
"the",
"current",
"input",
"position"
] | 9c4cf5f56d56ebbc1fe0e1596807218b7d5d5da4 | https://github.com/Grk0/python-libconf/blob/9c4cf5f56d56ebbc1fe0e1596807218b7d5d5da4/libconf.py#L321-L327 | train |
FulcrumTechnologies/pyconfluence | pyconfluence/api.py | load_variables | def load_variables():
"""Load variables from environment variables."""
if (not os.environ.get("PYCONFLUENCE_TOKEN") or
not os.environ.get("PYCONFLUENCE_USER") or
not os.environ.get("PYCONFLUENCE_ORG")):
print ("One or more pyconfluence environment variables are not set. "
"See README for directions on how to resolve this.")
sys.exit("Error")
global token
global user
global base_url
token = os.environ["PYCONFLUENCE_TOKEN"]
user = os.environ["PYCONFLUENCE_USER"]
base_url = ("https://" + os.environ["PYCONFLUENCE_ORG"] + ".atlassian"
".net/wiki/rest/api/content") | python | def load_variables():
"""Load variables from environment variables."""
if (not os.environ.get("PYCONFLUENCE_TOKEN") or
not os.environ.get("PYCONFLUENCE_USER") or
not os.environ.get("PYCONFLUENCE_ORG")):
print ("One or more pyconfluence environment variables are not set. "
"See README for directions on how to resolve this.")
sys.exit("Error")
global token
global user
global base_url
token = os.environ["PYCONFLUENCE_TOKEN"]
user = os.environ["PYCONFLUENCE_USER"]
base_url = ("https://" + os.environ["PYCONFLUENCE_ORG"] + ".atlassian"
".net/wiki/rest/api/content") | [
"def",
"load_variables",
"(",
")",
":",
"if",
"(",
"not",
"os",
".",
"environ",
".",
"get",
"(",
"\"PYCONFLUENCE_TOKEN\"",
")",
"or",
"not",
"os",
".",
"environ",
".",
"get",
"(",
"\"PYCONFLUENCE_USER\"",
")",
"or",
"not",
"os",
".",
"environ",
".",
"get",
"(",
"\"PYCONFLUENCE_ORG\"",
")",
")",
":",
"print",
"(",
"\"One or more pyconfluence environment variables are not set. \"",
"\"See README for directions on how to resolve this.\"",
")",
"sys",
".",
"exit",
"(",
"\"Error\"",
")",
"global",
"token",
"global",
"user",
"global",
"base_url",
"token",
"=",
"os",
".",
"environ",
"[",
"\"PYCONFLUENCE_TOKEN\"",
"]",
"user",
"=",
"os",
".",
"environ",
"[",
"\"PYCONFLUENCE_USER\"",
"]",
"base_url",
"=",
"(",
"\"https://\"",
"+",
"os",
".",
"environ",
"[",
"\"PYCONFLUENCE_ORG\"",
"]",
"+",
"\".atlassian\"",
"\".net/wiki/rest/api/content\"",
")"
] | Load variables from environment variables. | [
"Load",
"variables",
"from",
"environment",
"variables",
"."
] | a999726dbc1cbdd3d9062234698eeae799ce84ce | https://github.com/FulcrumTechnologies/pyconfluence/blob/a999726dbc1cbdd3d9062234698eeae799ce84ce/pyconfluence/api.py#L21-L36 | train |
FulcrumTechnologies/pyconfluence | pyconfluence/api.py | rest | def rest(url, req="GET", data=None):
"""Main function to be called from this module.
send a request using method 'req' and to the url. the _rest() function
will add the base_url to this, so 'url' should be something like '/ips'.
"""
load_variables()
return _rest(base_url + url, req, data) | python | def rest(url, req="GET", data=None):
"""Main function to be called from this module.
send a request using method 'req' and to the url. the _rest() function
will add the base_url to this, so 'url' should be something like '/ips'.
"""
load_variables()
return _rest(base_url + url, req, data) | [
"def",
"rest",
"(",
"url",
",",
"req",
"=",
"\"GET\"",
",",
"data",
"=",
"None",
")",
":",
"load_variables",
"(",
")",
"return",
"_rest",
"(",
"base_url",
"+",
"url",
",",
"req",
",",
"data",
")"
] | Main function to be called from this module.
send a request using method 'req' and to the url. the _rest() function
will add the base_url to this, so 'url' should be something like '/ips'. | [
"Main",
"function",
"to",
"be",
"called",
"from",
"this",
"module",
"."
] | a999726dbc1cbdd3d9062234698eeae799ce84ce | https://github.com/FulcrumTechnologies/pyconfluence/blob/a999726dbc1cbdd3d9062234698eeae799ce84ce/pyconfluence/api.py#L39-L47 | train |
FulcrumTechnologies/pyconfluence | pyconfluence/api.py | _rest | def _rest(url, req, data=None):
"""Send a rest rest request to the server."""
if url.upper().startswith("HTTPS"):
print("Secure connection required: Please use HTTPS or https")
return ""
req = req.upper()
if req != "GET" and req != "PUT" and req != "POST" and req != "DELETE":
return ""
status, body = _api_action(url, req, data)
if (int(status) >= 200 and int(status) <= 226):
return body
else:
return body | python | def _rest(url, req, data=None):
"""Send a rest rest request to the server."""
if url.upper().startswith("HTTPS"):
print("Secure connection required: Please use HTTPS or https")
return ""
req = req.upper()
if req != "GET" and req != "PUT" and req != "POST" and req != "DELETE":
return ""
status, body = _api_action(url, req, data)
if (int(status) >= 200 and int(status) <= 226):
return body
else:
return body | [
"def",
"_rest",
"(",
"url",
",",
"req",
",",
"data",
"=",
"None",
")",
":",
"if",
"url",
".",
"upper",
"(",
")",
".",
"startswith",
"(",
"\"HTTPS\"",
")",
":",
"print",
"(",
"\"Secure connection required: Please use HTTPS or https\"",
")",
"return",
"\"\"",
"req",
"=",
"req",
".",
"upper",
"(",
")",
"if",
"req",
"!=",
"\"GET\"",
"and",
"req",
"!=",
"\"PUT\"",
"and",
"req",
"!=",
"\"POST\"",
"and",
"req",
"!=",
"\"DELETE\"",
":",
"return",
"\"\"",
"status",
",",
"body",
"=",
"_api_action",
"(",
"url",
",",
"req",
",",
"data",
")",
"if",
"(",
"int",
"(",
"status",
")",
">=",
"200",
"and",
"int",
"(",
"status",
")",
"<=",
"226",
")",
":",
"return",
"body",
"else",
":",
"return",
"body"
] | Send a rest rest request to the server. | [
"Send",
"a",
"rest",
"rest",
"request",
"to",
"the",
"server",
"."
] | a999726dbc1cbdd3d9062234698eeae799ce84ce | https://github.com/FulcrumTechnologies/pyconfluence/blob/a999726dbc1cbdd3d9062234698eeae799ce84ce/pyconfluence/api.py#L50-L64 | train |
FulcrumTechnologies/pyconfluence | pyconfluence/api.py | _api_action | def _api_action(url, req, data=None):
"""Take action based on what kind of request is needed."""
requisite_headers = {'Accept': 'application/json',
'Content-Type': 'application/json'}
auth = (user, token)
if req == "GET":
response = requests.get(url, headers=requisite_headers, auth=auth)
elif req == "PUT":
response = requests.put(url, headers=requisite_headers, auth=auth,
data=data)
elif req == "POST":
response = requests.post(url, headers=requisite_headers, auth=auth,
data=data)
elif req == "DELETE":
response = requests.delete(url, headers=requisite_headers, auth=auth)
return response.status_code, response.text | python | def _api_action(url, req, data=None):
"""Take action based on what kind of request is needed."""
requisite_headers = {'Accept': 'application/json',
'Content-Type': 'application/json'}
auth = (user, token)
if req == "GET":
response = requests.get(url, headers=requisite_headers, auth=auth)
elif req == "PUT":
response = requests.put(url, headers=requisite_headers, auth=auth,
data=data)
elif req == "POST":
response = requests.post(url, headers=requisite_headers, auth=auth,
data=data)
elif req == "DELETE":
response = requests.delete(url, headers=requisite_headers, auth=auth)
return response.status_code, response.text | [
"def",
"_api_action",
"(",
"url",
",",
"req",
",",
"data",
"=",
"None",
")",
":",
"requisite_headers",
"=",
"{",
"'Accept'",
":",
"'application/json'",
",",
"'Content-Type'",
":",
"'application/json'",
"}",
"auth",
"=",
"(",
"user",
",",
"token",
")",
"if",
"req",
"==",
"\"GET\"",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"headers",
"=",
"requisite_headers",
",",
"auth",
"=",
"auth",
")",
"elif",
"req",
"==",
"\"PUT\"",
":",
"response",
"=",
"requests",
".",
"put",
"(",
"url",
",",
"headers",
"=",
"requisite_headers",
",",
"auth",
"=",
"auth",
",",
"data",
"=",
"data",
")",
"elif",
"req",
"==",
"\"POST\"",
":",
"response",
"=",
"requests",
".",
"post",
"(",
"url",
",",
"headers",
"=",
"requisite_headers",
",",
"auth",
"=",
"auth",
",",
"data",
"=",
"data",
")",
"elif",
"req",
"==",
"\"DELETE\"",
":",
"response",
"=",
"requests",
".",
"delete",
"(",
"url",
",",
"headers",
"=",
"requisite_headers",
",",
"auth",
"=",
"auth",
")",
"return",
"response",
".",
"status_code",
",",
"response",
".",
"text"
] | Take action based on what kind of request is needed. | [
"Take",
"action",
"based",
"on",
"what",
"kind",
"of",
"request",
"is",
"needed",
"."
] | a999726dbc1cbdd3d9062234698eeae799ce84ce | https://github.com/FulcrumTechnologies/pyconfluence/blob/a999726dbc1cbdd3d9062234698eeae799ce84ce/pyconfluence/api.py#L67-L84 | train |
kstaniek/condoor | condoor/patterns.py | PatternManager._platform_patterns | def _platform_patterns(self, platform='generic', compiled=False):
"""Return all the patterns for specific platform."""
patterns = self._dict_compiled.get(platform, None) if compiled else self._dict_text.get(platform, None)
if patterns is None:
raise KeyError("Unknown platform: {}".format(platform))
return patterns | python | def _platform_patterns(self, platform='generic', compiled=False):
"""Return all the patterns for specific platform."""
patterns = self._dict_compiled.get(platform, None) if compiled else self._dict_text.get(platform, None)
if patterns is None:
raise KeyError("Unknown platform: {}".format(platform))
return patterns | [
"def",
"_platform_patterns",
"(",
"self",
",",
"platform",
"=",
"'generic'",
",",
"compiled",
"=",
"False",
")",
":",
"patterns",
"=",
"self",
".",
"_dict_compiled",
".",
"get",
"(",
"platform",
",",
"None",
")",
"if",
"compiled",
"else",
"self",
".",
"_dict_text",
".",
"get",
"(",
"platform",
",",
"None",
")",
"if",
"patterns",
"is",
"None",
":",
"raise",
"KeyError",
"(",
"\"Unknown platform: {}\"",
".",
"format",
"(",
"platform",
")",
")",
"return",
"patterns"
] | Return all the patterns for specific platform. | [
"Return",
"all",
"the",
"patterns",
"for",
"specific",
"platform",
"."
] | 77c054b29d4e286c1d7aca2c74dff86b805e1fae | https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/patterns.py#L56-L61 | train |
kstaniek/condoor | condoor/patterns.py | PatternManager.pattern | def pattern(self, platform, key, compiled=True):
"""Return the pattern defined by the key string specific to the platform.
:param platform:
:param key:
:param compiled:
:return: Pattern string or RE object.
"""
patterns = self._platform_patterns(platform, compiled=compiled)
pattern = patterns.get(key, self._platform_patterns(compiled=compiled).get(key, None))
if pattern is None:
raise KeyError("Patterns database corrupted. Platform: {}, Key: {}".format(platform, key))
return pattern | python | def pattern(self, platform, key, compiled=True):
"""Return the pattern defined by the key string specific to the platform.
:param platform:
:param key:
:param compiled:
:return: Pattern string or RE object.
"""
patterns = self._platform_patterns(platform, compiled=compiled)
pattern = patterns.get(key, self._platform_patterns(compiled=compiled).get(key, None))
if pattern is None:
raise KeyError("Patterns database corrupted. Platform: {}, Key: {}".format(platform, key))
return pattern | [
"def",
"pattern",
"(",
"self",
",",
"platform",
",",
"key",
",",
"compiled",
"=",
"True",
")",
":",
"patterns",
"=",
"self",
".",
"_platform_patterns",
"(",
"platform",
",",
"compiled",
"=",
"compiled",
")",
"pattern",
"=",
"patterns",
".",
"get",
"(",
"key",
",",
"self",
".",
"_platform_patterns",
"(",
"compiled",
"=",
"compiled",
")",
".",
"get",
"(",
"key",
",",
"None",
")",
")",
"if",
"pattern",
"is",
"None",
":",
"raise",
"KeyError",
"(",
"\"Patterns database corrupted. Platform: {}, Key: {}\"",
".",
"format",
"(",
"platform",
",",
"key",
")",
")",
"return",
"pattern"
] | Return the pattern defined by the key string specific to the platform.
:param platform:
:param key:
:param compiled:
:return: Pattern string or RE object. | [
"Return",
"the",
"pattern",
"defined",
"by",
"the",
"key",
"string",
"specific",
"to",
"the",
"platform",
"."
] | 77c054b29d4e286c1d7aca2c74dff86b805e1fae | https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/patterns.py#L76-L90 | train |
kstaniek/condoor | condoor/patterns.py | PatternManager.description | def description(self, platform, key):
"""Return the patter description."""
patterns = self._dict_dscr.get(platform, None)
description = patterns.get(key, None)
return description | python | def description(self, platform, key):
"""Return the patter description."""
patterns = self._dict_dscr.get(platform, None)
description = patterns.get(key, None)
return description | [
"def",
"description",
"(",
"self",
",",
"platform",
",",
"key",
")",
":",
"patterns",
"=",
"self",
".",
"_dict_dscr",
".",
"get",
"(",
"platform",
",",
"None",
")",
"description",
"=",
"patterns",
".",
"get",
"(",
"key",
",",
"None",
")",
"return",
"description"
] | Return the patter description. | [
"Return",
"the",
"patter",
"description",
"."
] | 77c054b29d4e286c1d7aca2c74dff86b805e1fae | https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/patterns.py#L92-L96 | train |
kstaniek/condoor | condoor/patterns.py | PatternManager.platform | def platform(self, with_prompt, platforms=None):
"""Return the platform name based on the prompt matching."""
if platforms is None:
platforms = self._dict['generic']['prompt_detection']
for platform in platforms:
pattern = self.pattern(platform, 'prompt')
result = re.search(pattern, with_prompt)
if result:
return platform
return None | python | def platform(self, with_prompt, platforms=None):
"""Return the platform name based on the prompt matching."""
if platforms is None:
platforms = self._dict['generic']['prompt_detection']
for platform in platforms:
pattern = self.pattern(platform, 'prompt')
result = re.search(pattern, with_prompt)
if result:
return platform
return None | [
"def",
"platform",
"(",
"self",
",",
"with_prompt",
",",
"platforms",
"=",
"None",
")",
":",
"if",
"platforms",
"is",
"None",
":",
"platforms",
"=",
"self",
".",
"_dict",
"[",
"'generic'",
"]",
"[",
"'prompt_detection'",
"]",
"for",
"platform",
"in",
"platforms",
":",
"pattern",
"=",
"self",
".",
"pattern",
"(",
"platform",
",",
"'prompt'",
")",
"result",
"=",
"re",
".",
"search",
"(",
"pattern",
",",
"with_prompt",
")",
"if",
"result",
":",
"return",
"platform",
"return",
"None"
] | Return the platform name based on the prompt matching. | [
"Return",
"the",
"platform",
"name",
"based",
"on",
"the",
"prompt",
"matching",
"."
] | 77c054b29d4e286c1d7aca2c74dff86b805e1fae | https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/patterns.py#L98-L108 | train |
kstaniek/condoor | condoor/drivers/Calvados.py | Driver.after_connect | def after_connect(self):
"""Execute after connect."""
# TODO: check if this works.
show_users = self.device.send("show users", timeout=120)
result = re.search(pattern_manager.pattern(self.platform, 'connected_locally'), show_users)
if result:
self.log('Locally connected to Calvados. Exiting.')
self.device.send('exit')
return True
return False | python | def after_connect(self):
"""Execute after connect."""
# TODO: check if this works.
show_users = self.device.send("show users", timeout=120)
result = re.search(pattern_manager.pattern(self.platform, 'connected_locally'), show_users)
if result:
self.log('Locally connected to Calvados. Exiting.')
self.device.send('exit')
return True
return False | [
"def",
"after_connect",
"(",
"self",
")",
":",
"show_users",
"=",
"self",
".",
"device",
".",
"send",
"(",
"\"show users\"",
",",
"timeout",
"=",
"120",
")",
"result",
"=",
"re",
".",
"search",
"(",
"pattern_manager",
".",
"pattern",
"(",
"self",
".",
"platform",
",",
"'connected_locally'",
")",
",",
"show_users",
")",
"if",
"result",
":",
"self",
".",
"log",
"(",
"'Locally connected to Calvados. Exiting.'",
")",
"self",
".",
"device",
".",
"send",
"(",
"'exit'",
")",
"return",
"True",
"return",
"False"
] | Execute after connect. | [
"Execute",
"after",
"connect",
"."
] | 77c054b29d4e286c1d7aca2c74dff86b805e1fae | https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/drivers/Calvados.py#L47-L56 | train |
kstaniek/condoor | condoor/drivers/jumphost.py | Driver.get_hostname_text | def get_hostname_text(self):
"""Return hostname information from the Unix host."""
# FIXME: fix it, too complex logic
try:
hostname_text = self.device.send('hostname', timeout=10)
if hostname_text:
self.device.hostname = hostname_text.splitlines()[0]
return hostname_text
except CommandError:
self.log("Non Unix jumphost type detected")
return None | python | def get_hostname_text(self):
"""Return hostname information from the Unix host."""
# FIXME: fix it, too complex logic
try:
hostname_text = self.device.send('hostname', timeout=10)
if hostname_text:
self.device.hostname = hostname_text.splitlines()[0]
return hostname_text
except CommandError:
self.log("Non Unix jumphost type detected")
return None | [
"def",
"get_hostname_text",
"(",
"self",
")",
":",
"try",
":",
"hostname_text",
"=",
"self",
".",
"device",
".",
"send",
"(",
"'hostname'",
",",
"timeout",
"=",
"10",
")",
"if",
"hostname_text",
":",
"self",
".",
"device",
".",
"hostname",
"=",
"hostname_text",
".",
"splitlines",
"(",
")",
"[",
"0",
"]",
"return",
"hostname_text",
"except",
"CommandError",
":",
"self",
".",
"log",
"(",
"\"Non Unix jumphost type detected\"",
")",
"return",
"None"
] | Return hostname information from the Unix host. | [
"Return",
"hostname",
"information",
"from",
"the",
"Unix",
"host",
"."
] | 77c054b29d4e286c1d7aca2c74dff86b805e1fae | https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/drivers/jumphost.py#L35-L45 | train |
theherk/figgypy | figgypy/config.py | Config._find_file | def _find_file(f):
"""Find a config file if possible."""
if os.path.isabs(f):
return f
else:
for d in Config._dirs:
_f = os.path.join(d, f)
if os.path.isfile(_f):
return _f
raise FiggypyError(
"could not find configuration file {} in dirs {}"
.format(f, Config._dirs)
) | python | def _find_file(f):
"""Find a config file if possible."""
if os.path.isabs(f):
return f
else:
for d in Config._dirs:
_f = os.path.join(d, f)
if os.path.isfile(_f):
return _f
raise FiggypyError(
"could not find configuration file {} in dirs {}"
.format(f, Config._dirs)
) | [
"def",
"_find_file",
"(",
"f",
")",
":",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"f",
")",
":",
"return",
"f",
"else",
":",
"for",
"d",
"in",
"Config",
".",
"_dirs",
":",
"_f",
"=",
"os",
".",
"path",
".",
"join",
"(",
"d",
",",
"f",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"_f",
")",
":",
"return",
"_f",
"raise",
"FiggypyError",
"(",
"\"could not find configuration file {} in dirs {}\"",
".",
"format",
"(",
"f",
",",
"Config",
".",
"_dirs",
")",
")"
] | Find a config file if possible. | [
"Find",
"a",
"config",
"file",
"if",
"possible",
"."
] | 324d1b281a8df20a26b92f42bf7fda0cca892116 | https://github.com/theherk/figgypy/blob/324d1b281a8df20a26b92f42bf7fda0cca892116/figgypy/config.py#L82-L94 | train |
theherk/figgypy | figgypy/config.py | Config._load_file | def _load_file(self, f):
"""Get values from config file"""
try:
with open(f, 'r') as _fo:
_seria_in = seria.load(_fo)
_y = _seria_in.dump('yaml')
except IOError:
raise FiggypyError("could not open configuration file")
self.values.update(yaml.load(_y)) | python | def _load_file(self, f):
"""Get values from config file"""
try:
with open(f, 'r') as _fo:
_seria_in = seria.load(_fo)
_y = _seria_in.dump('yaml')
except IOError:
raise FiggypyError("could not open configuration file")
self.values.update(yaml.load(_y)) | [
"def",
"_load_file",
"(",
"self",
",",
"f",
")",
":",
"try",
":",
"with",
"open",
"(",
"f",
",",
"'r'",
")",
"as",
"_fo",
":",
"_seria_in",
"=",
"seria",
".",
"load",
"(",
"_fo",
")",
"_y",
"=",
"_seria_in",
".",
"dump",
"(",
"'yaml'",
")",
"except",
"IOError",
":",
"raise",
"FiggypyError",
"(",
"\"could not open configuration file\"",
")",
"self",
".",
"values",
".",
"update",
"(",
"yaml",
".",
"load",
"(",
"_y",
")",
")"
] | Get values from config file | [
"Get",
"values",
"from",
"config",
"file"
] | 324d1b281a8df20a26b92f42bf7fda0cca892116 | https://github.com/theherk/figgypy/blob/324d1b281a8df20a26b92f42bf7fda0cca892116/figgypy/config.py#L96-L104 | train |
theherk/figgypy | figgypy/config.py | Config.setup | def setup(self, config_file=None, aws_config=None, gpg_config=None,
decrypt_gpg=True, decrypt_kms=True):
"""Make setup easier by providing a constructor method.
Move to config_file
File can be located with a filename only, relative path, or absolute path.
If only name or relative path is provided, look in this order:
1. current directory
2. `~/.config/<file_name>`
3. `/etc/<file_name>`
It is a good idea to include you __package__ in the file name.
For example, `cfg = Config(os.path.join(__package__, 'config.yaml'))`.
This way it will look for your_package/config.yaml,
~/.config/your_package/config.yaml, and /etc/your_package/config.yaml.
"""
if aws_config is not None:
self.aws_config = aws_config
if gpg_config is not None:
self.gpg_config = gpg_config
if decrypt_kms is not None:
self.decrypt_kms = decrypt_kms
if decrypt_gpg is not None:
self.decrypt_gpg = decrypt_gpg
# Again, load the file last so that it can rely on other properties.
if config_file is not None:
self.config_file = config_file
return self | python | def setup(self, config_file=None, aws_config=None, gpg_config=None,
decrypt_gpg=True, decrypt_kms=True):
"""Make setup easier by providing a constructor method.
Move to config_file
File can be located with a filename only, relative path, or absolute path.
If only name or relative path is provided, look in this order:
1. current directory
2. `~/.config/<file_name>`
3. `/etc/<file_name>`
It is a good idea to include you __package__ in the file name.
For example, `cfg = Config(os.path.join(__package__, 'config.yaml'))`.
This way it will look for your_package/config.yaml,
~/.config/your_package/config.yaml, and /etc/your_package/config.yaml.
"""
if aws_config is not None:
self.aws_config = aws_config
if gpg_config is not None:
self.gpg_config = gpg_config
if decrypt_kms is not None:
self.decrypt_kms = decrypt_kms
if decrypt_gpg is not None:
self.decrypt_gpg = decrypt_gpg
# Again, load the file last so that it can rely on other properties.
if config_file is not None:
self.config_file = config_file
return self | [
"def",
"setup",
"(",
"self",
",",
"config_file",
"=",
"None",
",",
"aws_config",
"=",
"None",
",",
"gpg_config",
"=",
"None",
",",
"decrypt_gpg",
"=",
"True",
",",
"decrypt_kms",
"=",
"True",
")",
":",
"if",
"aws_config",
"is",
"not",
"None",
":",
"self",
".",
"aws_config",
"=",
"aws_config",
"if",
"gpg_config",
"is",
"not",
"None",
":",
"self",
".",
"gpg_config",
"=",
"gpg_config",
"if",
"decrypt_kms",
"is",
"not",
"None",
":",
"self",
".",
"decrypt_kms",
"=",
"decrypt_kms",
"if",
"decrypt_gpg",
"is",
"not",
"None",
":",
"self",
".",
"decrypt_gpg",
"=",
"decrypt_gpg",
"if",
"config_file",
"is",
"not",
"None",
":",
"self",
".",
"config_file",
"=",
"config_file",
"return",
"self"
] | Make setup easier by providing a constructor method.
Move to config_file
File can be located with a filename only, relative path, or absolute path.
If only name or relative path is provided, look in this order:
1. current directory
2. `~/.config/<file_name>`
3. `/etc/<file_name>`
It is a good idea to include you __package__ in the file name.
For example, `cfg = Config(os.path.join(__package__, 'config.yaml'))`.
This way it will look for your_package/config.yaml,
~/.config/your_package/config.yaml, and /etc/your_package/config.yaml. | [
"Make",
"setup",
"easier",
"by",
"providing",
"a",
"constructor",
"method",
"."
] | 324d1b281a8df20a26b92f42bf7fda0cca892116 | https://github.com/theherk/figgypy/blob/324d1b281a8df20a26b92f42bf7fda0cca892116/figgypy/config.py#L199-L227 | train |
kstaniek/condoor | condoor/protocols/console.py | Console.authenticate | def authenticate(self, driver):
"""Authenticate using the Console Server protocol specific FSM."""
# 0 1 2 3
events = [driver.username_re, driver.password_re, self.device.prompt_re, driver.rommon_re,
# 4 5 6 7 8
driver.unable_to_connect_re, driver.authentication_error_re, pexpect.TIMEOUT, pexpect.EOF]
transitions = [
(driver.username_re, [0], 1, partial(a_send_username, self.username), 10),
(driver.username_re, [1], 1, None, 10),
(driver.password_re, [0, 1], 2, partial(a_send_password, self._acquire_password()),
_C['first_prompt_timeout']),
(driver.username_re, [2], -1, a_authentication_error, 0),
(driver.password_re, [2], -1, a_authentication_error, 0),
(driver.authentication_error_re, [1, 2], -1, a_authentication_error, 0),
(self.device.prompt_re, [0, 1, 2], -1, None, 0),
(driver.rommon_re, [0], -1, partial(a_send, "\r\n"), 0),
(pexpect.TIMEOUT, [0], 1, partial(a_send, "\r\n"), 10),
(pexpect.TIMEOUT, [2], -1, None, 0),
(pexpect.TIMEOUT, [3, 7], -1, ConnectionTimeoutError("Connection Timeout", self.hostname), 0),
(driver.unable_to_connect_re, [0, 1, 2], -1, a_unable_to_connect, 0),
]
self.log("EXPECTED_PROMPT={}".format(pattern_to_str(self.device.prompt_re)))
fsm = FSM("CONSOLE-SERVER-AUTH", self.device, events, transitions, timeout=_C['connect_timeout'],
init_pattern=self.last_pattern)
return fsm.run() | python | def authenticate(self, driver):
"""Authenticate using the Console Server protocol specific FSM."""
# 0 1 2 3
events = [driver.username_re, driver.password_re, self.device.prompt_re, driver.rommon_re,
# 4 5 6 7 8
driver.unable_to_connect_re, driver.authentication_error_re, pexpect.TIMEOUT, pexpect.EOF]
transitions = [
(driver.username_re, [0], 1, partial(a_send_username, self.username), 10),
(driver.username_re, [1], 1, None, 10),
(driver.password_re, [0, 1], 2, partial(a_send_password, self._acquire_password()),
_C['first_prompt_timeout']),
(driver.username_re, [2], -1, a_authentication_error, 0),
(driver.password_re, [2], -1, a_authentication_error, 0),
(driver.authentication_error_re, [1, 2], -1, a_authentication_error, 0),
(self.device.prompt_re, [0, 1, 2], -1, None, 0),
(driver.rommon_re, [0], -1, partial(a_send, "\r\n"), 0),
(pexpect.TIMEOUT, [0], 1, partial(a_send, "\r\n"), 10),
(pexpect.TIMEOUT, [2], -1, None, 0),
(pexpect.TIMEOUT, [3, 7], -1, ConnectionTimeoutError("Connection Timeout", self.hostname), 0),
(driver.unable_to_connect_re, [0, 1, 2], -1, a_unable_to_connect, 0),
]
self.log("EXPECTED_PROMPT={}".format(pattern_to_str(self.device.prompt_re)))
fsm = FSM("CONSOLE-SERVER-AUTH", self.device, events, transitions, timeout=_C['connect_timeout'],
init_pattern=self.last_pattern)
return fsm.run() | [
"def",
"authenticate",
"(",
"self",
",",
"driver",
")",
":",
"events",
"=",
"[",
"driver",
".",
"username_re",
",",
"driver",
".",
"password_re",
",",
"self",
".",
"device",
".",
"prompt_re",
",",
"driver",
".",
"rommon_re",
",",
"driver",
".",
"unable_to_connect_re",
",",
"driver",
".",
"authentication_error_re",
",",
"pexpect",
".",
"TIMEOUT",
",",
"pexpect",
".",
"EOF",
"]",
"transitions",
"=",
"[",
"(",
"driver",
".",
"username_re",
",",
"[",
"0",
"]",
",",
"1",
",",
"partial",
"(",
"a_send_username",
",",
"self",
".",
"username",
")",
",",
"10",
")",
",",
"(",
"driver",
".",
"username_re",
",",
"[",
"1",
"]",
",",
"1",
",",
"None",
",",
"10",
")",
",",
"(",
"driver",
".",
"password_re",
",",
"[",
"0",
",",
"1",
"]",
",",
"2",
",",
"partial",
"(",
"a_send_password",
",",
"self",
".",
"_acquire_password",
"(",
")",
")",
",",
"_C",
"[",
"'first_prompt_timeout'",
"]",
")",
",",
"(",
"driver",
".",
"username_re",
",",
"[",
"2",
"]",
",",
"-",
"1",
",",
"a_authentication_error",
",",
"0",
")",
",",
"(",
"driver",
".",
"password_re",
",",
"[",
"2",
"]",
",",
"-",
"1",
",",
"a_authentication_error",
",",
"0",
")",
",",
"(",
"driver",
".",
"authentication_error_re",
",",
"[",
"1",
",",
"2",
"]",
",",
"-",
"1",
",",
"a_authentication_error",
",",
"0",
")",
",",
"(",
"self",
".",
"device",
".",
"prompt_re",
",",
"[",
"0",
",",
"1",
",",
"2",
"]",
",",
"-",
"1",
",",
"None",
",",
"0",
")",
",",
"(",
"driver",
".",
"rommon_re",
",",
"[",
"0",
"]",
",",
"-",
"1",
",",
"partial",
"(",
"a_send",
",",
"\"\\r\\n\"",
")",
",",
"0",
")",
",",
"(",
"pexpect",
".",
"TIMEOUT",
",",
"[",
"0",
"]",
",",
"1",
",",
"partial",
"(",
"a_send",
",",
"\"\\r\\n\"",
")",
",",
"10",
")",
",",
"(",
"pexpect",
".",
"TIMEOUT",
",",
"[",
"2",
"]",
",",
"-",
"1",
",",
"None",
",",
"0",
")",
",",
"(",
"pexpect",
".",
"TIMEOUT",
",",
"[",
"3",
",",
"7",
"]",
",",
"-",
"1",
",",
"ConnectionTimeoutError",
"(",
"\"Connection Timeout\"",
",",
"self",
".",
"hostname",
")",
",",
"0",
")",
",",
"(",
"driver",
".",
"unable_to_connect_re",
",",
"[",
"0",
",",
"1",
",",
"2",
"]",
",",
"-",
"1",
",",
"a_unable_to_connect",
",",
"0",
")",
",",
"]",
"self",
".",
"log",
"(",
"\"EXPECTED_PROMPT={}\"",
".",
"format",
"(",
"pattern_to_str",
"(",
"self",
".",
"device",
".",
"prompt_re",
")",
")",
")",
"fsm",
"=",
"FSM",
"(",
"\"CONSOLE-SERVER-AUTH\"",
",",
"self",
".",
"device",
",",
"events",
",",
"transitions",
",",
"timeout",
"=",
"_C",
"[",
"'connect_timeout'",
"]",
",",
"init_pattern",
"=",
"self",
".",
"last_pattern",
")",
"return",
"fsm",
".",
"run",
"(",
")"
] | Authenticate using the Console Server protocol specific FSM. | [
"Authenticate",
"using",
"the",
"Console",
"Server",
"protocol",
"specific",
"FSM",
"."
] | 77c054b29d4e286c1d7aca2c74dff86b805e1fae | https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/protocols/console.py#L70-L95 | train |
kstaniek/condoor | condoor/utils.py | delegate | def delegate(attribute_name, method_names):
"""Pass the call to the attribute called attribute_name for every method listed in method_names."""
# hack for python 2.7 as nonlocal is not available
info = {
'attribute': attribute_name,
'methods': method_names
}
def decorator(cls):
"""Decorate class."""
attribute = info['attribute']
if attribute.startswith("__"):
attribute = "_" + cls.__name__ + attribute
for name in info['methods']:
setattr(cls, name, eval("lambda self, *a, **kw: "
"self.{0}.{1}(*a, **kw)".format(attribute, name)))
return cls
return decorator | python | def delegate(attribute_name, method_names):
"""Pass the call to the attribute called attribute_name for every method listed in method_names."""
# hack for python 2.7 as nonlocal is not available
info = {
'attribute': attribute_name,
'methods': method_names
}
def decorator(cls):
"""Decorate class."""
attribute = info['attribute']
if attribute.startswith("__"):
attribute = "_" + cls.__name__ + attribute
for name in info['methods']:
setattr(cls, name, eval("lambda self, *a, **kw: "
"self.{0}.{1}(*a, **kw)".format(attribute, name)))
return cls
return decorator | [
"def",
"delegate",
"(",
"attribute_name",
",",
"method_names",
")",
":",
"info",
"=",
"{",
"'attribute'",
":",
"attribute_name",
",",
"'methods'",
":",
"method_names",
"}",
"def",
"decorator",
"(",
"cls",
")",
":",
"attribute",
"=",
"info",
"[",
"'attribute'",
"]",
"if",
"attribute",
".",
"startswith",
"(",
"\"__\"",
")",
":",
"attribute",
"=",
"\"_\"",
"+",
"cls",
".",
"__name__",
"+",
"attribute",
"for",
"name",
"in",
"info",
"[",
"'methods'",
"]",
":",
"setattr",
"(",
"cls",
",",
"name",
",",
"eval",
"(",
"\"lambda self, *a, **kw: \"",
"\"self.{0}.{1}(*a, **kw)\"",
".",
"format",
"(",
"attribute",
",",
"name",
")",
")",
")",
"return",
"cls",
"return",
"decorator"
] | Pass the call to the attribute called attribute_name for every method listed in method_names. | [
"Pass",
"the",
"call",
"to",
"the",
"attribute",
"called",
"attribute_name",
"for",
"every",
"method",
"listed",
"in",
"method_names",
"."
] | 77c054b29d4e286c1d7aca2c74dff86b805e1fae | https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/utils.py#L12-L29 | train |
kstaniek/condoor | condoor/utils.py | pattern_to_str | def pattern_to_str(pattern):
"""Convert regex pattern to string.
If pattern is string it returns itself,
if pattern is SRE_Pattern then return pattern attribute
:param pattern: pattern object or string
:return: str: pattern sttring
"""
if isinstance(pattern, str):
return repr(pattern)
else:
return repr(pattern.pattern) if pattern else None | python | def pattern_to_str(pattern):
"""Convert regex pattern to string.
If pattern is string it returns itself,
if pattern is SRE_Pattern then return pattern attribute
:param pattern: pattern object or string
:return: str: pattern sttring
"""
if isinstance(pattern, str):
return repr(pattern)
else:
return repr(pattern.pattern) if pattern else None | [
"def",
"pattern_to_str",
"(",
"pattern",
")",
":",
"if",
"isinstance",
"(",
"pattern",
",",
"str",
")",
":",
"return",
"repr",
"(",
"pattern",
")",
"else",
":",
"return",
"repr",
"(",
"pattern",
".",
"pattern",
")",
"if",
"pattern",
"else",
"None"
] | Convert regex pattern to string.
If pattern is string it returns itself,
if pattern is SRE_Pattern then return pattern attribute
:param pattern: pattern object or string
:return: str: pattern sttring | [
"Convert",
"regex",
"pattern",
"to",
"string",
"."
] | 77c054b29d4e286c1d7aca2c74dff86b805e1fae | https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/utils.py#L85-L96 | train |
kstaniek/condoor | condoor/utils.py | levenshtein_distance | def levenshtein_distance(str_a, str_b):
"""Calculate the Levenshtein distance between string a and b.
:param str_a: String - input string a
:param str_b: String - input string b
:return: Number - Levenshtein Distance between string a and b
"""
len_a, len_b = len(str_a), len(str_b)
if len_a > len_b:
str_a, str_b = str_b, str_a
len_a, len_b = len_b, len_a
current = range(len_a + 1)
for i in range(1, len_b + 1):
previous, current = current, [i] + [0] * len_a
for j in range(1, len_a + 1):
add, delete = previous[j] + 1, current[j - 1] + 1
change = previous[j - 1]
if str_a[j - 1] != str_b[i - 1]:
change += + 1
current[j] = min(add, delete, change)
return current[len_a] | python | def levenshtein_distance(str_a, str_b):
"""Calculate the Levenshtein distance between string a and b.
:param str_a: String - input string a
:param str_b: String - input string b
:return: Number - Levenshtein Distance between string a and b
"""
len_a, len_b = len(str_a), len(str_b)
if len_a > len_b:
str_a, str_b = str_b, str_a
len_a, len_b = len_b, len_a
current = range(len_a + 1)
for i in range(1, len_b + 1):
previous, current = current, [i] + [0] * len_a
for j in range(1, len_a + 1):
add, delete = previous[j] + 1, current[j - 1] + 1
change = previous[j - 1]
if str_a[j - 1] != str_b[i - 1]:
change += + 1
current[j] = min(add, delete, change)
return current[len_a] | [
"def",
"levenshtein_distance",
"(",
"str_a",
",",
"str_b",
")",
":",
"len_a",
",",
"len_b",
"=",
"len",
"(",
"str_a",
")",
",",
"len",
"(",
"str_b",
")",
"if",
"len_a",
">",
"len_b",
":",
"str_a",
",",
"str_b",
"=",
"str_b",
",",
"str_a",
"len_a",
",",
"len_b",
"=",
"len_b",
",",
"len_a",
"current",
"=",
"range",
"(",
"len_a",
"+",
"1",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len_b",
"+",
"1",
")",
":",
"previous",
",",
"current",
"=",
"current",
",",
"[",
"i",
"]",
"+",
"[",
"0",
"]",
"*",
"len_a",
"for",
"j",
"in",
"range",
"(",
"1",
",",
"len_a",
"+",
"1",
")",
":",
"add",
",",
"delete",
"=",
"previous",
"[",
"j",
"]",
"+",
"1",
",",
"current",
"[",
"j",
"-",
"1",
"]",
"+",
"1",
"change",
"=",
"previous",
"[",
"j",
"-",
"1",
"]",
"if",
"str_a",
"[",
"j",
"-",
"1",
"]",
"!=",
"str_b",
"[",
"i",
"-",
"1",
"]",
":",
"change",
"+=",
"+",
"1",
"current",
"[",
"j",
"]",
"=",
"min",
"(",
"add",
",",
"delete",
",",
"change",
")",
"return",
"current",
"[",
"len_a",
"]"
] | Calculate the Levenshtein distance between string a and b.
:param str_a: String - input string a
:param str_b: String - input string b
:return: Number - Levenshtein Distance between string a and b | [
"Calculate",
"the",
"Levenshtein",
"distance",
"between",
"string",
"a",
"and",
"b",
"."
] | 77c054b29d4e286c1d7aca2c74dff86b805e1fae | https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/utils.py#L99-L119 | train |
kstaniek/condoor | condoor/utils.py | parse_inventory | def parse_inventory(inventory_output=None):
"""Parse the inventory text and return udi dict."""
udi = {
"name": "",
"description": "",
"pid": "",
"vid": "",
"sn": ""
}
if inventory_output is None:
return udi
# find the record with chassis text in name or descr
capture_next = False
chassis_udi_text = None
for line in inventory_output.split('\n'):
lc_line = line.lower()
if ('chassis' in lc_line or 'switch system' in lc_line or 'rack' in lc_line) and 'name' in lc_line and 'descr':
capture_next = True
chassis_udi_text = line
continue
if capture_next:
inventory_output = chassis_udi_text + "\n" + line
break
match = re.search(r"(?i)NAME: (?P<name>.*?),? (?i)DESCR", inventory_output, re.MULTILINE)
if match:
udi['name'] = match.group('name').strip('" ,')
match = re.search(r"(?i)DESCR: (?P<description>.*)", inventory_output, re.MULTILINE)
if match:
udi['description'] = match.group('description').strip('" ')
match = re.search(r"(?i)PID: (?P<pid>.*?),? ", inventory_output, re.MULTILINE)
if match:
udi['pid'] = match.group('pid')
match = re.search(r"(?i)VID: (?P<vid>.*?),? ", inventory_output, re.MULTILINE)
if match:
udi['vid'] = match.group('vid')
match = re.search(r"(?i)SN: (?P<sn>.*)", inventory_output, re.MULTILINE)
if match:
udi['sn'] = match.group('sn').strip()
return udi | python | def parse_inventory(inventory_output=None):
"""Parse the inventory text and return udi dict."""
udi = {
"name": "",
"description": "",
"pid": "",
"vid": "",
"sn": ""
}
if inventory_output is None:
return udi
# find the record with chassis text in name or descr
capture_next = False
chassis_udi_text = None
for line in inventory_output.split('\n'):
lc_line = line.lower()
if ('chassis' in lc_line or 'switch system' in lc_line or 'rack' in lc_line) and 'name' in lc_line and 'descr':
capture_next = True
chassis_udi_text = line
continue
if capture_next:
inventory_output = chassis_udi_text + "\n" + line
break
match = re.search(r"(?i)NAME: (?P<name>.*?),? (?i)DESCR", inventory_output, re.MULTILINE)
if match:
udi['name'] = match.group('name').strip('" ,')
match = re.search(r"(?i)DESCR: (?P<description>.*)", inventory_output, re.MULTILINE)
if match:
udi['description'] = match.group('description').strip('" ')
match = re.search(r"(?i)PID: (?P<pid>.*?),? ", inventory_output, re.MULTILINE)
if match:
udi['pid'] = match.group('pid')
match = re.search(r"(?i)VID: (?P<vid>.*?),? ", inventory_output, re.MULTILINE)
if match:
udi['vid'] = match.group('vid')
match = re.search(r"(?i)SN: (?P<sn>.*)", inventory_output, re.MULTILINE)
if match:
udi['sn'] = match.group('sn').strip()
return udi | [
"def",
"parse_inventory",
"(",
"inventory_output",
"=",
"None",
")",
":",
"udi",
"=",
"{",
"\"name\"",
":",
"\"\"",
",",
"\"description\"",
":",
"\"\"",
",",
"\"pid\"",
":",
"\"\"",
",",
"\"vid\"",
":",
"\"\"",
",",
"\"sn\"",
":",
"\"\"",
"}",
"if",
"inventory_output",
"is",
"None",
":",
"return",
"udi",
"capture_next",
"=",
"False",
"chassis_udi_text",
"=",
"None",
"for",
"line",
"in",
"inventory_output",
".",
"split",
"(",
"'\\n'",
")",
":",
"lc_line",
"=",
"line",
".",
"lower",
"(",
")",
"if",
"(",
"'chassis'",
"in",
"lc_line",
"or",
"'switch system'",
"in",
"lc_line",
"or",
"'rack'",
"in",
"lc_line",
")",
"and",
"'name'",
"in",
"lc_line",
"and",
"'descr'",
":",
"capture_next",
"=",
"True",
"chassis_udi_text",
"=",
"line",
"continue",
"if",
"capture_next",
":",
"inventory_output",
"=",
"chassis_udi_text",
"+",
"\"\\n\"",
"+",
"line",
"break",
"match",
"=",
"re",
".",
"search",
"(",
"r\"(?i)NAME: (?P<name>.*?),? (?i)DESCR\"",
",",
"inventory_output",
",",
"re",
".",
"MULTILINE",
")",
"if",
"match",
":",
"udi",
"[",
"'name'",
"]",
"=",
"match",
".",
"group",
"(",
"'name'",
")",
".",
"strip",
"(",
"'\" ,'",
")",
"match",
"=",
"re",
".",
"search",
"(",
"r\"(?i)DESCR: (?P<description>.*)\"",
",",
"inventory_output",
",",
"re",
".",
"MULTILINE",
")",
"if",
"match",
":",
"udi",
"[",
"'description'",
"]",
"=",
"match",
".",
"group",
"(",
"'description'",
")",
".",
"strip",
"(",
"'\" '",
")",
"match",
"=",
"re",
".",
"search",
"(",
"r\"(?i)PID: (?P<pid>.*?),? \"",
",",
"inventory_output",
",",
"re",
".",
"MULTILINE",
")",
"if",
"match",
":",
"udi",
"[",
"'pid'",
"]",
"=",
"match",
".",
"group",
"(",
"'pid'",
")",
"match",
"=",
"re",
".",
"search",
"(",
"r\"(?i)VID: (?P<vid>.*?),? \"",
",",
"inventory_output",
",",
"re",
".",
"MULTILINE",
")",
"if",
"match",
":",
"udi",
"[",
"'vid'",
"]",
"=",
"match",
".",
"group",
"(",
"'vid'",
")",
"match",
"=",
"re",
".",
"search",
"(",
"r\"(?i)SN: (?P<sn>.*)\"",
",",
"inventory_output",
",",
"re",
".",
"MULTILINE",
")",
"if",
"match",
":",
"udi",
"[",
"'sn'",
"]",
"=",
"match",
".",
"group",
"(",
"'sn'",
")",
".",
"strip",
"(",
")",
"return",
"udi"
] | Parse the inventory text and return udi dict. | [
"Parse",
"the",
"inventory",
"text",
"and",
"return",
"udi",
"dict",
"."
] | 77c054b29d4e286c1d7aca2c74dff86b805e1fae | https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/utils.py#L122-L165 | train |
kstaniek/condoor | condoor/utils.py | normalize_urls | def normalize_urls(urls):
"""Overload urls and make list of lists of urls."""
_urls = []
if isinstance(urls, list):
if urls:
if isinstance(urls[0], list):
# multiple connections (list of the lists)
_urls = urls
elif isinstance(urls[0], str):
# single connections (make it list of the lists)
_urls = [urls]
else:
raise RuntimeError("No target host url provided.")
elif isinstance(urls, str):
_urls = [[urls]]
return _urls | python | def normalize_urls(urls):
"""Overload urls and make list of lists of urls."""
_urls = []
if isinstance(urls, list):
if urls:
if isinstance(urls[0], list):
# multiple connections (list of the lists)
_urls = urls
elif isinstance(urls[0], str):
# single connections (make it list of the lists)
_urls = [urls]
else:
raise RuntimeError("No target host url provided.")
elif isinstance(urls, str):
_urls = [[urls]]
return _urls | [
"def",
"normalize_urls",
"(",
"urls",
")",
":",
"_urls",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"urls",
",",
"list",
")",
":",
"if",
"urls",
":",
"if",
"isinstance",
"(",
"urls",
"[",
"0",
"]",
",",
"list",
")",
":",
"_urls",
"=",
"urls",
"elif",
"isinstance",
"(",
"urls",
"[",
"0",
"]",
",",
"str",
")",
":",
"_urls",
"=",
"[",
"urls",
"]",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"No target host url provided.\"",
")",
"elif",
"isinstance",
"(",
"urls",
",",
"str",
")",
":",
"_urls",
"=",
"[",
"[",
"urls",
"]",
"]",
"return",
"_urls"
] | Overload urls and make list of lists of urls. | [
"Overload",
"urls",
"and",
"make",
"list",
"of",
"lists",
"of",
"urls",
"."
] | 77c054b29d4e286c1d7aca2c74dff86b805e1fae | https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/utils.py#L233-L248 | train |
kstaniek/condoor | condoor/utils.py | yaml_file_to_dict | def yaml_file_to_dict(script_name, path=None):
"""Read yaml file and return the dict.
It assumes the module file exists with the defaults.
If the CONDOOR_{SCRIPT_NAME} env is set then the user file from the env is loaded and merged with the default
There can be user file located in ~/.condoor directory with the {script_name}.yaml filename. If exists
it is merget with default config.
"""
def load_yaml(file_path):
"""Load YAML file from full file path and return dict."""
with open(file_path, 'r') as yamlfile:
try:
dictionary = yaml.load(yamlfile)
except yaml.YAMLError:
return {}
return dictionary
def merge(user, default):
"""Merge two dicts."""
if isinstance(user, dict) and isinstance(default, dict):
for k, v in default.iteritems():
if k not in user:
user[k] = v
else:
user[k] = merge(user[k], v)
return user
if path is None:
path = os.path.abspath('.')
config_file_path = os.path.join(path, script_name + '.yaml')
if not os.path.exists(config_file_path):
raise RuntimeError('Config file does not exist: {}'.format(config_file_path))
default_dict = load_yaml(config_file_path)
user_config_file_path = os.path.join(os.path.expanduser('~'), '.condoor', os.path.basename(script_name) + '.yaml')
user_config_file_path = os.getenv('CONDOOR_' + os.path.basename(script_name).upper(), user_config_file_path)
if os.path.exists(user_config_file_path):
user_dict = load_yaml(user_config_file_path)
if user_dict:
default_dict = merge(user_dict, default_dict)
return default_dict | python | def yaml_file_to_dict(script_name, path=None):
"""Read yaml file and return the dict.
It assumes the module file exists with the defaults.
If the CONDOOR_{SCRIPT_NAME} env is set then the user file from the env is loaded and merged with the default
There can be user file located in ~/.condoor directory with the {script_name}.yaml filename. If exists
it is merget with default config.
"""
def load_yaml(file_path):
"""Load YAML file from full file path and return dict."""
with open(file_path, 'r') as yamlfile:
try:
dictionary = yaml.load(yamlfile)
except yaml.YAMLError:
return {}
return dictionary
def merge(user, default):
"""Merge two dicts."""
if isinstance(user, dict) and isinstance(default, dict):
for k, v in default.iteritems():
if k not in user:
user[k] = v
else:
user[k] = merge(user[k], v)
return user
if path is None:
path = os.path.abspath('.')
config_file_path = os.path.join(path, script_name + '.yaml')
if not os.path.exists(config_file_path):
raise RuntimeError('Config file does not exist: {}'.format(config_file_path))
default_dict = load_yaml(config_file_path)
user_config_file_path = os.path.join(os.path.expanduser('~'), '.condoor', os.path.basename(script_name) + '.yaml')
user_config_file_path = os.getenv('CONDOOR_' + os.path.basename(script_name).upper(), user_config_file_path)
if os.path.exists(user_config_file_path):
user_dict = load_yaml(user_config_file_path)
if user_dict:
default_dict = merge(user_dict, default_dict)
return default_dict | [
"def",
"yaml_file_to_dict",
"(",
"script_name",
",",
"path",
"=",
"None",
")",
":",
"def",
"load_yaml",
"(",
"file_path",
")",
":",
"with",
"open",
"(",
"file_path",
",",
"'r'",
")",
"as",
"yamlfile",
":",
"try",
":",
"dictionary",
"=",
"yaml",
".",
"load",
"(",
"yamlfile",
")",
"except",
"yaml",
".",
"YAMLError",
":",
"return",
"{",
"}",
"return",
"dictionary",
"def",
"merge",
"(",
"user",
",",
"default",
")",
":",
"if",
"isinstance",
"(",
"user",
",",
"dict",
")",
"and",
"isinstance",
"(",
"default",
",",
"dict",
")",
":",
"for",
"k",
",",
"v",
"in",
"default",
".",
"iteritems",
"(",
")",
":",
"if",
"k",
"not",
"in",
"user",
":",
"user",
"[",
"k",
"]",
"=",
"v",
"else",
":",
"user",
"[",
"k",
"]",
"=",
"merge",
"(",
"user",
"[",
"k",
"]",
",",
"v",
")",
"return",
"user",
"if",
"path",
"is",
"None",
":",
"path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"'.'",
")",
"config_file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"script_name",
"+",
"'.yaml'",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"config_file_path",
")",
":",
"raise",
"RuntimeError",
"(",
"'Config file does not exist: {}'",
".",
"format",
"(",
"config_file_path",
")",
")",
"default_dict",
"=",
"load_yaml",
"(",
"config_file_path",
")",
"user_config_file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~'",
")",
",",
"'.condoor'",
",",
"os",
".",
"path",
".",
"basename",
"(",
"script_name",
")",
"+",
"'.yaml'",
")",
"user_config_file_path",
"=",
"os",
".",
"getenv",
"(",
"'CONDOOR_'",
"+",
"os",
".",
"path",
".",
"basename",
"(",
"script_name",
")",
".",
"upper",
"(",
")",
",",
"user_config_file_path",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"user_config_file_path",
")",
":",
"user_dict",
"=",
"load_yaml",
"(",
"user_config_file_path",
")",
"if",
"user_dict",
":",
"default_dict",
"=",
"merge",
"(",
"user_dict",
",",
"default_dict",
")",
"return",
"default_dict"
] | Read yaml file and return the dict.
It assumes the module file exists with the defaults.
If the CONDOOR_{SCRIPT_NAME} env is set then the user file from the env is loaded and merged with the default
There can be user file located in ~/.condoor directory with the {script_name}.yaml filename. If exists
it is merget with default config. | [
"Read",
"yaml",
"file",
"and",
"return",
"the",
"dict",
"."
] | 77c054b29d4e286c1d7aca2c74dff86b805e1fae | https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/utils.py#L280-L323 | train |
kstaniek/condoor | condoor/utils.py | FilteredFile.write | def write(self, text):
"""Override the standard write method to filter the content."""
index = text.find('\n')
if index == -1:
self._buffer = self._buffer + text
else:
self._buffer = self._buffer + text[:index + 1]
if self._pattern:
# pattern already compiled no need to check
result = re.search(self._pattern, self._buffer)
if result:
for group in result.groups():
if group:
self._buffer = self._buffer.replace(group, "***")
self._file.write(self._buffer)
self._file.flush()
self._buffer = text[index + 1:] | python | def write(self, text):
"""Override the standard write method to filter the content."""
index = text.find('\n')
if index == -1:
self._buffer = self._buffer + text
else:
self._buffer = self._buffer + text[:index + 1]
if self._pattern:
# pattern already compiled no need to check
result = re.search(self._pattern, self._buffer)
if result:
for group in result.groups():
if group:
self._buffer = self._buffer.replace(group, "***")
self._file.write(self._buffer)
self._file.flush()
self._buffer = text[index + 1:] | [
"def",
"write",
"(",
"self",
",",
"text",
")",
":",
"index",
"=",
"text",
".",
"find",
"(",
"'\\n'",
")",
"if",
"index",
"==",
"-",
"1",
":",
"self",
".",
"_buffer",
"=",
"self",
".",
"_buffer",
"+",
"text",
"else",
":",
"self",
".",
"_buffer",
"=",
"self",
".",
"_buffer",
"+",
"text",
"[",
":",
"index",
"+",
"1",
"]",
"if",
"self",
".",
"_pattern",
":",
"result",
"=",
"re",
".",
"search",
"(",
"self",
".",
"_pattern",
",",
"self",
".",
"_buffer",
")",
"if",
"result",
":",
"for",
"group",
"in",
"result",
".",
"groups",
"(",
")",
":",
"if",
"group",
":",
"self",
".",
"_buffer",
"=",
"self",
".",
"_buffer",
".",
"replace",
"(",
"group",
",",
"\"***\"",
")",
"self",
".",
"_file",
".",
"write",
"(",
"self",
".",
"_buffer",
")",
"self",
".",
"_file",
".",
"flush",
"(",
")",
"self",
".",
"_buffer",
"=",
"text",
"[",
"index",
"+",
"1",
":",
"]"
] | Override the standard write method to filter the content. | [
"Override",
"the",
"standard",
"write",
"method",
"to",
"filter",
"the",
"content",
"."
] | 77c054b29d4e286c1d7aca2c74dff86b805e1fae | https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/utils.py#L193-L209 | train |
ajdavis/GreenletProfiler | _vendorized_yappi/yappi.py | start | def start(builtins=False, profile_threads=True):
"""
Start profiler.
"""
if profile_threads:
threading.setprofile(_callback)
_yappi.start(builtins, profile_threads) | python | def start(builtins=False, profile_threads=True):
"""
Start profiler.
"""
if profile_threads:
threading.setprofile(_callback)
_yappi.start(builtins, profile_threads) | [
"def",
"start",
"(",
"builtins",
"=",
"False",
",",
"profile_threads",
"=",
"True",
")",
":",
"if",
"profile_threads",
":",
"threading",
".",
"setprofile",
"(",
"_callback",
")",
"_yappi",
".",
"start",
"(",
"builtins",
",",
"profile_threads",
")"
] | Start profiler. | [
"Start",
"profiler",
"."
] | 700349864a4f368a8a73a2a60f048c2e818d7cea | https://github.com/ajdavis/GreenletProfiler/blob/700349864a4f368a8a73a2a60f048c2e818d7cea/_vendorized_yappi/yappi.py#L700-L706 | train |
ajdavis/GreenletProfiler | _vendorized_yappi/yappi.py | set_clock_type | def set_clock_type(type):
"""
Sets the internal clock type for timing. Profiler shall not have any previous stats.
Otherwise an exception is thrown.
"""
type = type.upper()
if type not in CLOCK_TYPES:
raise YappiError("Invalid clock type:%s" % (type))
_yappi.set_clock_type(CLOCK_TYPES[type]) | python | def set_clock_type(type):
"""
Sets the internal clock type for timing. Profiler shall not have any previous stats.
Otherwise an exception is thrown.
"""
type = type.upper()
if type not in CLOCK_TYPES:
raise YappiError("Invalid clock type:%s" % (type))
_yappi.set_clock_type(CLOCK_TYPES[type]) | [
"def",
"set_clock_type",
"(",
"type",
")",
":",
"type",
"=",
"type",
".",
"upper",
"(",
")",
"if",
"type",
"not",
"in",
"CLOCK_TYPES",
":",
"raise",
"YappiError",
"(",
"\"Invalid clock type:%s\"",
"%",
"(",
"type",
")",
")",
"_yappi",
".",
"set_clock_type",
"(",
"CLOCK_TYPES",
"[",
"type",
"]",
")"
] | Sets the internal clock type for timing. Profiler shall not have any previous stats.
Otherwise an exception is thrown. | [
"Sets",
"the",
"internal",
"clock",
"type",
"for",
"timing",
".",
"Profiler",
"shall",
"not",
"have",
"any",
"previous",
"stats",
".",
"Otherwise",
"an",
"exception",
"is",
"thrown",
"."
] | 700349864a4f368a8a73a2a60f048c2e818d7cea | https://github.com/ajdavis/GreenletProfiler/blob/700349864a4f368a8a73a2a60f048c2e818d7cea/_vendorized_yappi/yappi.py#L755-L764 | train |
hwmrocker/smtplibaio | smtplibaio/streams.py | SMTPStreamReader.read_reply | async def read_reply(self):
"""
Reads a reply from the server.
Raises:
ConnectionResetError: If the connection with the server is lost
(we can't read any response anymore). Or if the server
replies without a proper return code.
Returns:
(int, str): A (code, full_message) 2-tuple consisting of:
- server response code ;
- server response string corresponding to response code
(multiline responses are returned in a single string).
"""
code = 500
messages = []
go_on = True
while go_on:
try:
line = await self.readline()
except ValueError as e:
# ValueError is raised when limit is reached before we could
# get an entire line.
# We return what we got with a 500 code and we stop to read
# the reply to avoid being flooded.
code = 500
go_on = False
else:
try:
code = int(line[:3])
except ValueError as e:
# We either:
# - Got an empty line (connection is probably down),
# - Got a line without a valid return code.
# In both case, it shouldn't happen, hence:
raise ConnectionResetError("Connection lost.") from e
else:
# Check is we have a multiline response:
go_on = line[3:4] == b"-"
message = line[4:].strip(b" \t\r\n").decode("ascii")
messages.append(message)
full_message = "\n".join(messages)
return code, full_message | python | async def read_reply(self):
"""
Reads a reply from the server.
Raises:
ConnectionResetError: If the connection with the server is lost
(we can't read any response anymore). Or if the server
replies without a proper return code.
Returns:
(int, str): A (code, full_message) 2-tuple consisting of:
- server response code ;
- server response string corresponding to response code
(multiline responses are returned in a single string).
"""
code = 500
messages = []
go_on = True
while go_on:
try:
line = await self.readline()
except ValueError as e:
# ValueError is raised when limit is reached before we could
# get an entire line.
# We return what we got with a 500 code and we stop to read
# the reply to avoid being flooded.
code = 500
go_on = False
else:
try:
code = int(line[:3])
except ValueError as e:
# We either:
# - Got an empty line (connection is probably down),
# - Got a line without a valid return code.
# In both case, it shouldn't happen, hence:
raise ConnectionResetError("Connection lost.") from e
else:
# Check is we have a multiline response:
go_on = line[3:4] == b"-"
message = line[4:].strip(b" \t\r\n").decode("ascii")
messages.append(message)
full_message = "\n".join(messages)
return code, full_message | [
"async",
"def",
"read_reply",
"(",
"self",
")",
":",
"code",
"=",
"500",
"messages",
"=",
"[",
"]",
"go_on",
"=",
"True",
"while",
"go_on",
":",
"try",
":",
"line",
"=",
"await",
"self",
".",
"readline",
"(",
")",
"except",
"ValueError",
"as",
"e",
":",
"code",
"=",
"500",
"go_on",
"=",
"False",
"else",
":",
"try",
":",
"code",
"=",
"int",
"(",
"line",
"[",
":",
"3",
"]",
")",
"except",
"ValueError",
"as",
"e",
":",
"raise",
"ConnectionResetError",
"(",
"\"Connection lost.\"",
")",
"from",
"e",
"else",
":",
"go_on",
"=",
"line",
"[",
"3",
":",
"4",
"]",
"==",
"b\"-\"",
"message",
"=",
"line",
"[",
"4",
":",
"]",
".",
"strip",
"(",
"b\" \\t\\r\\n\"",
")",
".",
"decode",
"(",
"\"ascii\"",
")",
"messages",
".",
"append",
"(",
"message",
")",
"full_message",
"=",
"\"\\n\"",
".",
"join",
"(",
"messages",
")",
"return",
"code",
",",
"full_message"
] | Reads a reply from the server.
Raises:
ConnectionResetError: If the connection with the server is lost
(we can't read any response anymore). Or if the server
replies without a proper return code.
Returns:
(int, str): A (code, full_message) 2-tuple consisting of:
- server response code ;
- server response string corresponding to response code
(multiline responses are returned in a single string). | [
"Reads",
"a",
"reply",
"from",
"the",
"server",
"."
] | 84ce8e45b7e706476739d0efcb416c18ecabbbb6 | https://github.com/hwmrocker/smtplibaio/blob/84ce8e45b7e706476739d0efcb416c18ecabbbb6/smtplibaio/streams.py#L31-L79 | train |
kstaniek/condoor | condoor/hopinfo.py | make_hop_info_from_url | def make_hop_info_from_url(url, verify_reachability=None):
"""Build HopInfo object from url.
It allows only telnet and ssh as a valid protocols.
Args:
url (str): The url string describing the node. i.e.
telnet://[email protected]. The protocol, username and address
portion of url is mandatory. Port and password is optional.
If port is missing the standard protocol -> port mapping is done.
The password is optional i.e. for TS access directly to console
ports.
The path part is treated as additional password required for some
systems, i.e. enable password for IOS devices.:
telnet://<username>:<password>@<host>:<port>/<enable_password>
<enable_password> is optional
verify_reachability: This is optional callable returning boolean
if node is reachable. It can be used to verify reachability
of the node before making a connection. It can speedup the
connection process when node not reachable especially with
telnet having long timeout.
Returns:
HopInfo object or None if url is invalid or protocol not supported
"""
parsed = urlparse(url)
username = None if parsed.username is None else unquote(parsed.username) # It's None if not exists
password = None if parsed.password is None else unquote(parsed.password) # It's None if not exists
try:
enable_password = parse_qs(parsed.query)["enable_password"][0]
except KeyError:
enable_password = None
hop_info = HopInfo(
parsed.scheme,
parsed.hostname,
username,
password,
parsed.port,
enable_password,
verify_reachability=verify_reachability
)
if hop_info.is_valid():
return hop_info
raise InvalidHopInfoError | python | def make_hop_info_from_url(url, verify_reachability=None):
"""Build HopInfo object from url.
It allows only telnet and ssh as a valid protocols.
Args:
url (str): The url string describing the node. i.e.
telnet://[email protected]. The protocol, username and address
portion of url is mandatory. Port and password is optional.
If port is missing the standard protocol -> port mapping is done.
The password is optional i.e. for TS access directly to console
ports.
The path part is treated as additional password required for some
systems, i.e. enable password for IOS devices.:
telnet://<username>:<password>@<host>:<port>/<enable_password>
<enable_password> is optional
verify_reachability: This is optional callable returning boolean
if node is reachable. It can be used to verify reachability
of the node before making a connection. It can speedup the
connection process when node not reachable especially with
telnet having long timeout.
Returns:
HopInfo object or None if url is invalid or protocol not supported
"""
parsed = urlparse(url)
username = None if parsed.username is None else unquote(parsed.username) # It's None if not exists
password = None if parsed.password is None else unquote(parsed.password) # It's None if not exists
try:
enable_password = parse_qs(parsed.query)["enable_password"][0]
except KeyError:
enable_password = None
hop_info = HopInfo(
parsed.scheme,
parsed.hostname,
username,
password,
parsed.port,
enable_password,
verify_reachability=verify_reachability
)
if hop_info.is_valid():
return hop_info
raise InvalidHopInfoError | [
"def",
"make_hop_info_from_url",
"(",
"url",
",",
"verify_reachability",
"=",
"None",
")",
":",
"parsed",
"=",
"urlparse",
"(",
"url",
")",
"username",
"=",
"None",
"if",
"parsed",
".",
"username",
"is",
"None",
"else",
"unquote",
"(",
"parsed",
".",
"username",
")",
"password",
"=",
"None",
"if",
"parsed",
".",
"password",
"is",
"None",
"else",
"unquote",
"(",
"parsed",
".",
"password",
")",
"try",
":",
"enable_password",
"=",
"parse_qs",
"(",
"parsed",
".",
"query",
")",
"[",
"\"enable_password\"",
"]",
"[",
"0",
"]",
"except",
"KeyError",
":",
"enable_password",
"=",
"None",
"hop_info",
"=",
"HopInfo",
"(",
"parsed",
".",
"scheme",
",",
"parsed",
".",
"hostname",
",",
"username",
",",
"password",
",",
"parsed",
".",
"port",
",",
"enable_password",
",",
"verify_reachability",
"=",
"verify_reachability",
")",
"if",
"hop_info",
".",
"is_valid",
"(",
")",
":",
"return",
"hop_info",
"raise",
"InvalidHopInfoError"
] | Build HopInfo object from url.
It allows only telnet and ssh as a valid protocols.
Args:
url (str): The url string describing the node. i.e.
telnet://[email protected]. The protocol, username and address
portion of url is mandatory. Port and password is optional.
If port is missing the standard protocol -> port mapping is done.
The password is optional i.e. for TS access directly to console
ports.
The path part is treated as additional password required for some
systems, i.e. enable password for IOS devices.:
telnet://<username>:<password>@<host>:<port>/<enable_password>
<enable_password> is optional
verify_reachability: This is optional callable returning boolean
if node is reachable. It can be used to verify reachability
of the node before making a connection. It can speedup the
connection process when node not reachable especially with
telnet having long timeout.
Returns:
HopInfo object or None if url is invalid or protocol not supported | [
"Build",
"HopInfo",
"object",
"from",
"url",
"."
] | 77c054b29d4e286c1d7aca2c74dff86b805e1fae | https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/hopinfo.py#L16-L63 | train |
kstaniek/condoor | condoor/hopinfo.py | HopInfo.is_reachable | def is_reachable(self):
"""Return if host is reachable."""
if self.verify_reachability and \
hasattr(self.verify_reachability, '__call__'):
return self.verify_reachability(host=self.hostname, port=self.port)
# assume is reachable if can't verify
return True | python | def is_reachable(self):
"""Return if host is reachable."""
if self.verify_reachability and \
hasattr(self.verify_reachability, '__call__'):
return self.verify_reachability(host=self.hostname, port=self.port)
# assume is reachable if can't verify
return True | [
"def",
"is_reachable",
"(",
"self",
")",
":",
"if",
"self",
".",
"verify_reachability",
"and",
"hasattr",
"(",
"self",
".",
"verify_reachability",
",",
"'__call__'",
")",
":",
"return",
"self",
".",
"verify_reachability",
"(",
"host",
"=",
"self",
".",
"hostname",
",",
"port",
"=",
"self",
".",
"port",
")",
"return",
"True"
] | Return if host is reachable. | [
"Return",
"if",
"host",
"is",
"reachable",
"."
] | 77c054b29d4e286c1d7aca2c74dff86b805e1fae | https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/hopinfo.py#L118-L124 | train |
Subsets and Splits