body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
8e7b8edb646d4e8fadcdca86d701f8c615778097cb8ff4989f674fb4f03fd734
@blocker.command('toggle', aliases=['設定', 't']) @commands.has_guild_permissions(manage_messages=True) @commands.cooldown(1, 10, commands.BucketType.guild) @setting.Setting('guild', 'Emoji Blocker 0', HELP) async def _toggle(self, ctx: commands.Context, *, mode: DataManager.Mode): "!lang ja\n --------\n リアクションか絵文字またはスタンプのブロックの有効/無効の切り替えをします。 \n ですが、有効にしても対象のロールを追加しなければ意味がありませんので、削除対象のロールの設定を忘れずに。\n\n Parameters\n ----------\n mode : emoji または stamp または reaction\n 削除するものです。\n\n Aliases\n -------\n t, 設定\n\n !lang en\n --------\n Enable/disable the blocking of emoji or stamps or reaction. \n But even if you enable it, it will be useless if you don't add the target role, so don't forget to set the target role for deletion.\n\n Parameters\n ----------\n mode : emoji / stamp / reaction\n It is what RT will delete.\n\n Aliases\n -------\n t" (await ctx.trigger_typing()) (await self.write(ctx.guild.id, mode)) (await ctx.reply('Ok'))
!lang ja -------- リアクションか絵文字またはスタンプのブロックの有効/無効の切り替えをします。 ですが、有効にしても対象のロールを追加しなければ意味がありませんので、削除対象のロールの設定を忘れずに。 Parameters ---------- mode : emoji または stamp または reaction 削除するものです。 Aliases ------- t, 設定 !lang en -------- Enable/disable the blocking of emoji or stamps or reaction. But even if you enable it, it will be useless if you don't add the target role, so don't forget to set the target role for deletion. Parameters ---------- mode : emoji / stamp / reaction It is what RT will delete. Aliases ------- t
cogs/blocker.py
_toggle
RT-Team/rt-bot
26
python
@blocker.command('toggle', aliases=['設定', 't']) @commands.has_guild_permissions(manage_messages=True) @commands.cooldown(1, 10, commands.BucketType.guild) @setting.Setting('guild', 'Emoji Blocker 0', HELP) async def _toggle(self, ctx: commands.Context, *, mode: DataManager.Mode): "!lang ja\n --------\n リアクションか絵文字またはスタンプのブロックの有効/無効の切り替えをします。 \n ですが、有効にしても対象のロールを追加しなければ意味がありませんので、削除対象のロールの設定を忘れずに。\n\n Parameters\n ----------\n mode : emoji または stamp または reaction\n 削除するものです。\n\n Aliases\n -------\n t, 設定\n\n !lang en\n --------\n Enable/disable the blocking of emoji or stamps or reaction. \n But even if you enable it, it will be useless if you don't add the target role, so don't forget to set the target role for deletion.\n\n Parameters\n ----------\n mode : emoji / stamp / reaction\n It is what RT will delete.\n\n Aliases\n -------\n t" (await ctx.trigger_typing()) (await self.write(ctx.guild.id, mode)) (await ctx.reply('Ok'))
@blocker.command('toggle', aliases=['設定', 't']) @commands.has_guild_permissions(manage_messages=True) @commands.cooldown(1, 10, commands.BucketType.guild) @setting.Setting('guild', 'Emoji Blocker 0', HELP) async def _toggle(self, ctx: commands.Context, *, mode: DataManager.Mode): "!lang ja\n --------\n リアクションか絵文字またはスタンプのブロックの有効/無効の切り替えをします。 \n ですが、有効にしても対象のロールを追加しなければ意味がありませんので、削除対象のロールの設定を忘れずに。\n\n Parameters\n ----------\n mode : emoji または stamp または reaction\n 削除するものです。\n\n Aliases\n -------\n t, 設定\n\n !lang en\n --------\n Enable/disable the blocking of emoji or stamps or reaction. \n But even if you enable it, it will be useless if you don't add the target role, so don't forget to set the target role for deletion.\n\n Parameters\n ----------\n mode : emoji / stamp / reaction\n It is what RT will delete.\n\n Aliases\n -------\n t" (await ctx.trigger_typing()) (await self.write(ctx.guild.id, mode)) (await ctx.reply('Ok'))<|docstring|>!lang ja -------- リアクションか絵文字またはスタンプのブロックの有効/無効の切り替えをします。 ですが、有効にしても対象のロールを追加しなければ意味がありませんので、削除対象のロールの設定を忘れずに。 Parameters ---------- mode : emoji または stamp または reaction 削除するものです。 Aliases ------- t, 設定 !lang en -------- Enable/disable the blocking of emoji or stamps or reaction. But even if you enable it, it will be useless if you don't add the target role, so don't forget to set the target role for deletion. Parameters ---------- mode : emoji / stamp / reaction It is what RT will delete. Aliases ------- t<|endoftext|>
d88745ef7c7a3afc62241eabd99a27299fd40311c36f46cd7e6fde02834b2967
@blocker.group(aliases=['ロール', '役職', 'r'], headding={'ja': '文字ブロックで削除対象とするロールの設定リストを表示します。', 'en': 'Displays the configuration list of roles to be deleted in the character block.'}) @setting.Setting('guild', 'Emoji Blocker 1', HELP) async def role(self, ctx: commands.Context): '!lang ja\n --------\n 削除対象とするロールを管理するコマンドです。 \n `rt!blocker role`と実行すると設定されているものの一覧が表示されます。 \n これで設定しても`rt!blocker toggle`を実行するまでは何も起きません。\n\n Aliases\n -------\n r, ロール, 役職\n\n !lang en\n --------\n This command is used to manage the roles to be deleted. \n If you run `rt!blocker role`, a list of the configured roles will be displayed. \n If you set it up this way, nothing will happen until you run `rt!blocker toggle`.\n\n Aliases\n -------\n r' if (not ctx.invoked_subcommand): if (ctx.guild.id in self.cache): embed = discord.Embed(title=self.__cog_name__, color=self.bot.Colors.normal) for (mode, roles) in list(self.cache[ctx.guild.id].items()): if roles: embed.add_field(name=mode, value='\n'.join((f'・<@&{role_id}>' for role_id in roles))) (await ctx.reply(embed=embed))
!lang ja -------- 削除対象とするロールを管理するコマンドです。 `rt!blocker role`と実行すると設定されているものの一覧が表示されます。 これで設定しても`rt!blocker toggle`を実行するまでは何も起きません。 Aliases ------- r, ロール, 役職 !lang en -------- This command is used to manage the roles to be deleted. If you run `rt!blocker role`, a list of the configured roles will be displayed. If you set it up this way, nothing will happen until you run `rt!blocker toggle`. Aliases ------- r
cogs/blocker.py
role
RT-Team/rt-bot
26
python
@blocker.group(aliases=['ロール', '役職', 'r'], headding={'ja': '文字ブロックで削除対象とするロールの設定リストを表示します。', 'en': 'Displays the configuration list of roles to be deleted in the character block.'}) @setting.Setting('guild', 'Emoji Blocker 1', HELP) async def role(self, ctx: commands.Context): '!lang ja\n --------\n 削除対象とするロールを管理するコマンドです。 \n `rt!blocker role`と実行すると設定されているものの一覧が表示されます。 \n これで設定しても`rt!blocker toggle`を実行するまでは何も起きません。\n\n Aliases\n -------\n r, ロール, 役職\n\n !lang en\n --------\n This command is used to manage the roles to be deleted. \n If you run `rt!blocker role`, a list of the configured roles will be displayed. \n If you set it up this way, nothing will happen until you run `rt!blocker toggle`.\n\n Aliases\n -------\n r' if (not ctx.invoked_subcommand): if (ctx.guild.id in self.cache): embed = discord.Embed(title=self.__cog_name__, color=self.bot.Colors.normal) for (mode, roles) in list(self.cache[ctx.guild.id].items()): if roles: embed.add_field(name=mode, value='\n'.join((f'・<@&{role_id}>' for role_id in roles))) (await ctx.reply(embed=embed))
@blocker.group(aliases=['ロール', '役職', 'r'], headding={'ja': '文字ブロックで削除対象とするロールの設定リストを表示します。', 'en': 'Displays the configuration list of roles to be deleted in the character block.'}) @setting.Setting('guild', 'Emoji Blocker 1', HELP) async def role(self, ctx: commands.Context): '!lang ja\n --------\n 削除対象とするロールを管理するコマンドです。 \n `rt!blocker role`と実行すると設定されているものの一覧が表示されます。 \n これで設定しても`rt!blocker toggle`を実行するまでは何も起きません。\n\n Aliases\n -------\n r, ロール, 役職\n\n !lang en\n --------\n This command is used to manage the roles to be deleted. \n If you run `rt!blocker role`, a list of the configured roles will be displayed. \n If you set it up this way, nothing will happen until you run `rt!blocker toggle`.\n\n Aliases\n -------\n r' if (not ctx.invoked_subcommand): if (ctx.guild.id in self.cache): embed = discord.Embed(title=self.__cog_name__, color=self.bot.Colors.normal) for (mode, roles) in list(self.cache[ctx.guild.id].items()): if roles: embed.add_field(name=mode, value='\n'.join((f'・<@&{role_id}>' for role_id in roles))) (await ctx.reply(embed=embed))<|docstring|>!lang ja -------- 削除対象とするロールを管理するコマンドです。 `rt!blocker role`と実行すると設定されているものの一覧が表示されます。 これで設定しても`rt!blocker toggle`を実行するまでは何も起きません。 Aliases ------- r, ロール, 役職 !lang en -------- This command is used to manage the roles to be deleted. If you run `rt!blocker role`, a list of the configured roles will be displayed. If you set it up this way, nothing will happen until you run `rt!blocker toggle`. Aliases ------- r<|endoftext|>
1c3d85fc3d0275292430393c7321ea3b1ef06e2f4abfed81b7d47eaa4a7ae87c
@role.command(aliases=['追加', 'a']) @commands.cooldown(1, 8, commands.BucketType.guild) @commands.has_guild_permissions(manage_messages=True) @setting.Setting('guild', 'Emoji Blocker 2', HELP) async def add(self, ctx: commands.Context, mode: DataManager.Mode, *, role: 'Role'): "!lang ja\n --------\n 所有していると絵文字等を送信することができなくなるロールを設定します。\n\n Parameters\n ----------\n mode : emoji / stamp / reaction\n 絵文字かスタンプかリアクションどっちの時での設定かです。\n role : 役職名,IDまたはメンション\n 設定する役職です。\n\n Aliases\n -------\n a, ついか\n\n !lang en\n --------\n Set a role that, when owned, will not allow you to send emoji and other text.\n\n Parameters\n ----------\n mode : emoji / stamp / reaction\n Emoji or Stamp or reaction\n role : role's name, role's ID or role mention\n Target role\n\n Aliases\n -------\n a" try: (await self.add_role(ctx.guild.id, mode, role.id)) except AssertionError: (await ctx.reply('これ以上設定できないまたはまだ設定が有効になっていません。')) else: (await ctx.reply('Ok'))
!lang ja -------- 所有していると絵文字等を送信することができなくなるロールを設定します。 Parameters ---------- mode : emoji / stamp / reaction 絵文字かスタンプかリアクションどっちの時での設定かです。 role : 役職名,IDまたはメンション 設定する役職です。 Aliases ------- a, ついか !lang en -------- Set a role that, when owned, will not allow you to send emoji and other text. Parameters ---------- mode : emoji / stamp / reaction Emoji or Stamp or reaction role : role's name, role's ID or role mention Target role Aliases ------- a
cogs/blocker.py
add
RT-Team/rt-bot
26
python
@role.command(aliases=['追加', 'a']) @commands.cooldown(1, 8, commands.BucketType.guild) @commands.has_guild_permissions(manage_messages=True) @setting.Setting('guild', 'Emoji Blocker 2', HELP) async def add(self, ctx: commands.Context, mode: DataManager.Mode, *, role: 'Role'): "!lang ja\n --------\n 所有していると絵文字等を送信することができなくなるロールを設定します。\n\n Parameters\n ----------\n mode : emoji / stamp / reaction\n 絵文字かスタンプかリアクションどっちの時での設定かです。\n role : 役職名,IDまたはメンション\n 設定する役職です。\n\n Aliases\n -------\n a, ついか\n\n !lang en\n --------\n Set a role that, when owned, will not allow you to send emoji and other text.\n\n Parameters\n ----------\n mode : emoji / stamp / reaction\n Emoji or Stamp or reaction\n role : role's name, role's ID or role mention\n Target role\n\n Aliases\n -------\n a" try: (await self.add_role(ctx.guild.id, mode, role.id)) except AssertionError: (await ctx.reply('これ以上設定できないまたはまだ設定が有効になっていません。')) else: (await ctx.reply('Ok'))
@role.command(aliases=['追加', 'a']) @commands.cooldown(1, 8, commands.BucketType.guild) @commands.has_guild_permissions(manage_messages=True) @setting.Setting('guild', 'Emoji Blocker 2', HELP) async def add(self, ctx: commands.Context, mode: DataManager.Mode, *, role: 'Role'): "!lang ja\n --------\n 所有していると絵文字等を送信することができなくなるロールを設定します。\n\n Parameters\n ----------\n mode : emoji / stamp / reaction\n 絵文字かスタンプかリアクションどっちの時での設定かです。\n role : 役職名,IDまたはメンション\n 設定する役職です。\n\n Aliases\n -------\n a, ついか\n\n !lang en\n --------\n Set a role that, when owned, will not allow you to send emoji and other text.\n\n Parameters\n ----------\n mode : emoji / stamp / reaction\n Emoji or Stamp or reaction\n role : role's name, role's ID or role mention\n Target role\n\n Aliases\n -------\n a" try: (await self.add_role(ctx.guild.id, mode, role.id)) except AssertionError: (await ctx.reply('これ以上設定できないまたはまだ設定が有効になっていません。')) else: (await ctx.reply('Ok'))<|docstring|>!lang ja -------- 所有していると絵文字等を送信することができなくなるロールを設定します。 Parameters ---------- mode : emoji / stamp / reaction 絵文字かスタンプかリアクションどっちの時での設定かです。 role : 役職名,IDまたはメンション 設定する役職です。 Aliases ------- a, ついか !lang en -------- Set a role that, when owned, will not allow you to send emoji and other text. Parameters ---------- mode : emoji / stamp / reaction Emoji or Stamp or reaction role : role's name, role's ID or role mention Target role Aliases ------- a<|endoftext|>
c861e89efb10e5a90babc261d927e42b0942d6ff89a4b3339a97902bc2c25070
@role.command(aliases=['削除', 'rm']) @commands.cooldown(1, 8, commands.BucketType.guild) @commands.has_guild_permissions(manage_messages=True) @setting.Setting('guild', 'Emoji Blocker 3', HELP) async def remove(self, ctx: commands.Context, mode: DataManager.Mode, *, role: 'Role'): '!lang ja\n --------\n `add`の逆です。\n\n Aliases\n -------\n rm, 削除\n\n !lang en\n --------\n The opposite of `add`.\n\n Aliases\n -------\n rm' (await self.remove_role(ctx.guild.id, mode, role.id)) (await ctx.reply('Ok'))
!lang ja -------- `add`の逆です。 Aliases ------- rm, 削除 !lang en -------- The opposite of `add`. Aliases ------- rm
cogs/blocker.py
remove
RT-Team/rt-bot
26
python
@role.command(aliases=['削除', 'rm']) @commands.cooldown(1, 8, commands.BucketType.guild) @commands.has_guild_permissions(manage_messages=True) @setting.Setting('guild', 'Emoji Blocker 3', HELP) async def remove(self, ctx: commands.Context, mode: DataManager.Mode, *, role: 'Role'): '!lang ja\n --------\n `add`の逆です。\n\n Aliases\n -------\n rm, 削除\n\n !lang en\n --------\n The opposite of `add`.\n\n Aliases\n -------\n rm' (await self.remove_role(ctx.guild.id, mode, role.id)) (await ctx.reply('Ok'))
@role.command(aliases=['削除', 'rm']) @commands.cooldown(1, 8, commands.BucketType.guild) @commands.has_guild_permissions(manage_messages=True) @setting.Setting('guild', 'Emoji Blocker 3', HELP) async def remove(self, ctx: commands.Context, mode: DataManager.Mode, *, role: 'Role'): '!lang ja\n --------\n `add`の逆です。\n\n Aliases\n -------\n rm, 削除\n\n !lang en\n --------\n The opposite of `add`.\n\n Aliases\n -------\n rm' (await self.remove_role(ctx.guild.id, mode, role.id)) (await ctx.reply('Ok'))<|docstring|>!lang ja -------- `add`の逆です。 Aliases ------- rm, 削除 !lang en -------- The opposite of `add`. Aliases ------- rm<|endoftext|>
9a703cc6afe2bd5c6b20e110612a4c58c70b638da3e71dcff6b0e5702dc4a8b9
def is_should_check(self, author: discord.Member) -> Iterator[str]: 'ブロックをすべきかを確かめます。' for (mode, roles) in list(self.cache[author.guild.id].items()): if any(((author.get_role(role_id) or (role_id == 0)) for role_id in roles)): (yield mode)
ブロックをすべきかを確かめます。
cogs/blocker.py
is_should_check
RT-Team/rt-bot
26
python
def is_should_check(self, author: discord.Member) -> Iterator[str]: for (mode, roles) in list(self.cache[author.guild.id].items()): if any(((author.get_role(role_id) or (role_id == 0)) for role_id in roles)): (yield mode)
def is_should_check(self, author: discord.Member) -> Iterator[str]: for (mode, roles) in list(self.cache[author.guild.id].items()): if any(((author.get_role(role_id) or (role_id == 0)) for role_id in roles)): (yield mode)<|docstring|>ブロックをすべきかを確かめます。<|endoftext|>
4ff36342dd545775ff45d5c6643715fa9d218c71969e40f08cb3868b26909594
@staticmethod def precision_recall_f1(true: np.ndarray, pred: np.ndarray): 'Compute precision, recall and f1_score.' num_predicted = np.unique(pred).size num_intersect = np.intersect1d(pred, true).size num_observed = np.unique(true).size p = (num_intersect / num_predicted) r = (num_intersect / num_observed) f1 = ((((2 * p) * r) / (p + r)) if ((p != 0) or (r != 0)) else 0) return (p, r, f1)
Compute precision, recall and f1_score.
deepr/examples/movielens/jobs/evaluate.py
precision_recall_f1
drohde/deepr
0
python
@staticmethod def precision_recall_f1(true: np.ndarray, pred: np.ndarray): num_predicted = np.unique(pred).size num_intersect = np.intersect1d(pred, true).size num_observed = np.unique(true).size p = (num_intersect / num_predicted) r = (num_intersect / num_observed) f1 = ((((2 * p) * r) / (p + r)) if ((p != 0) or (r != 0)) else 0) return (p, r, f1)
@staticmethod def precision_recall_f1(true: np.ndarray, pred: np.ndarray): num_predicted = np.unique(pred).size num_intersect = np.intersect1d(pred, true).size num_observed = np.unique(true).size p = (num_intersect / num_predicted) r = (num_intersect / num_observed) f1 = ((((2 * p) * r) / (p + r)) if ((p != 0) or (r != 0)) else 0) return (p, r, f1)<|docstring|>Compute precision, recall and f1_score.<|endoftext|>
4314a5d38535714b8dc5fe9b0e278e82946d579dc4e9b18f5e7983ecd5573bc6
@abc.abstractmethod def is_success(self): 'Predicate that return True if solve was successfull.' pass
Predicate that return True if solve was successfull.
galini/solvers/solution.py
is_success
cog-imperial/galini
14
python
@abc.abstractmethod def is_success(self): pass
@abc.abstractmethod def is_success(self): pass<|docstring|>Predicate that return True if solve was successfull.<|endoftext|>
0f4248aa1650b33c0ff6b8d0cb766af416f2ac2d5400f7b0bb27534a7a2ae70f
@abc.abstractmethod def is_infeasible(self): 'Predicate that return True if problem is infeasible.' pass
Predicate that return True if problem is infeasible.
galini/solvers/solution.py
is_infeasible
cog-imperial/galini
14
python
@abc.abstractmethod def is_infeasible(self): pass
@abc.abstractmethod def is_infeasible(self): pass<|docstring|>Predicate that return True if problem is infeasible.<|endoftext|>
159ae7eedffb7e8189c9f92d24044ee98e3b205441cf939fac9d7f2b586cc002
@abc.abstractmethod def is_unbounded(self): 'Predicate that return True if problem is unbounded.' pass
Predicate that return True if problem is unbounded.
galini/solvers/solution.py
is_unbounded
cog-imperial/galini
14
python
@abc.abstractmethod def is_unbounded(self): pass
@abc.abstractmethod def is_unbounded(self): pass<|docstring|>Predicate that return True if problem is unbounded.<|endoftext|>
dde2454cfc853910af9a1713b902e77a409e3a6e11ac485339a6517943259a4d
@abc.abstractmethod def description(self): 'Return status description.' pass
Return status description.
galini/solvers/solution.py
description
cog-imperial/galini
14
python
@abc.abstractmethod def description(self): pass
@abc.abstractmethod def description(self): pass<|docstring|>Return status description.<|endoftext|>
c73e5fccb35a1c0b4977dc47d391550f4a5d675006925302d2091f3117c75852
def setUp(self): 'Create filespace ' self.gpfile.create_filespace('filespace_test_a') super(SwitchCheckpointTestCase, self).setUp()
Create filespace
src/test/tinc/tincrepo/mpp/gpdb/tests/storage/pg_twophase/switch_checkpoint.py
setUp
lintzc/GPDB
1
python
def setUp(self): ' ' self.gpfile.create_filespace('filespace_test_a') super(SwitchCheckpointTestCase, self).setUp()
def setUp(self): ' ' self.gpfile.create_filespace('filespace_test_a') super(SwitchCheckpointTestCase, self).setUp()<|docstring|>Create filespace<|endoftext|>
f76df1cc31464cdab6f6132abcc3fcfe015d5781728fbc6445e0792e6286b6b0
def tearDown(self): ' Cleanup up the filespace created ' self.gpfile.drop_filespace('filespace_test_a') super(SwitchCheckpointTestCase, self).tearDown()
Cleanup up the filespace created
src/test/tinc/tincrepo/mpp/gpdb/tests/storage/pg_twophase/switch_checkpoint.py
tearDown
lintzc/GPDB
1
python
def tearDown(self): ' ' self.gpfile.drop_filespace('filespace_test_a') super(SwitchCheckpointTestCase, self).tearDown()
def tearDown(self): ' ' self.gpfile.drop_filespace('filespace_test_a') super(SwitchCheckpointTestCase, self).tearDown()<|docstring|>Cleanup up the filespace created<|endoftext|>
6480b5c9cccedc8b582bb1f961e03898cc7b9354a551d40015ea244e3a0c448f
def switch_checkpoint(self, cluster_state, fault_type, crash_type): " \n @param skip_state : skip/noskip checkpoint\n @param cluster_state : sync/change_tracking/resync\n @param ddl_type : create/drop\n @fault_type : commit/abort . Uses the same parameter to pass in 'end_prepare_two_phase_sleep'\n @crash_type : gpstop_i/gpstop_a/failover_to_primary/failover_to_mirror\n @description: Test the state of transactions after fault on master at diff stages followed by crash-recovery. \n Faults are used to suspend the transactions at three stages\n 1. After prepare has been sent by master and acknowledge by all workers\n 2. After distributed commit has been flushed to xlog on master\n 3. After commit prepared has been sent out acknowledged before distributed forget\n Steps:\n 0. Check the state of the cluster before proceeding the test execution\n 1. Run pre_sqls\n 2. Run any faults required before the trigger_sqls based on the fault_type as well as cluster_state\n 3. Run switch_checkpoint loop. switch_xlog, checkpoint in case of 'dtm_broadcast_prepare' fault \n 4. Crash and recover. Resume suspended faults if needed\n 5. Run validate_loop\n 7. Recover the cluster in case if needed\n 8. Validate using gpcheckcat and gpcheckmirrorseg\n\n " test_dir = {'dtm_broadcast_prepare': 'switch_ckpt_a,switch_ckpt_b', 'dtm_broadcast_commit_prepared': 'switch_ckpt_a,switch_ckpt_b', 'dtm_xlog_distributed_commit': 'switch_ckpt_c'} test_case_list0 = [] test_case_list0.append('mpp.gpdb.tests.storage.lib.dbstate.DbStateClass.check_system') self.test_case_scenario.append(test_case_list0) test_case_list1 = [] if (fault_type == 'dtm_broadcast_commit_prepared'): test_case_list1.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_a.pre_sql.test_presqls.TestPreSQLClass') test_case_list1.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_b.pre_sql.test_presqls.TestPreSQLClass') if (fault_type == 'dtm_broadcast_prepare'): test_case_list1.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_a.pre_sql.test_presqls.TestPreSQLClass') test_case_list1.append('mpp.gpdb.tests.storage.pg_twophase.checkpoint.test_checkpoint.TestCheckpointClass') test_case_list1.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_b.pre_sql.test_presqls.TestPreSQLClass') if (fault_type == 'dtm_xlog_distributed_commit'): test_case_list1.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_c.pre_sql.test_presqls.TestPreSQLClass') self.test_case_scenario.append(test_case_list1) test_case_list2 = [] test_case_list2.append(('mpp.gpdb.tests.storage.pg_twophase.PgtwoPhaseClass.switch_ckpt_faults_before_trigger', [cluster_state, fault_type])) self.test_case_scenario.append(test_case_list2) test_case_list3 = [] test_case_list3.append('mpp.gpdb.tests.storage.pg_twophase.PgtwoPhaseClass.switch_ckpt_switch_xlog') self.test_case_scenario.append(test_case_list3) test_case_list4 = [] if (fault_type == 'dtm_broadcast_commit_prepared'): test_case_list4.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_a.trigger_sql.test_triggersqls.TestTriggerSQLClass') test_case_list4.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_b.trigger_sql.test_triggersqls.TestTriggerSQLClass') if (fault_type == 'dtm_broadcast_prepare'): test_case_list4.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_a.trigger_sql.test_triggersqls.TestTriggerSQLClass') test_case_list4.append('mpp.gpdb.tests.storage.pg_twophase.checkpoint.test_checkpoint.TestCheckpointClass') test_case_list4.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_b.trigger_sql.test_triggersqls.TestTriggerSQLClass') if (fault_type == 'dtm_xlog_distributed_commit'): test_case_list4.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_c.trigger_sql.test_triggersqls.TestTriggerSQLClass') test_case_list4.append(('mpp.gpdb.tests.storage.pg_twophase.PgtwoPhaseClass.switch_ckpt_crash_and_recover', [crash_type, fault_type, test_dir[fault_type], cluster_state])) self.test_case_scenario.append(test_case_list4) test_case_list5 = [] test_case_list5.append(('mpp.gpdb.tests.storage.pg_twophase.PgtwoPhaseClass.run_gprecover', [crash_type, cluster_state])) self.test_case_scenario.append(test_case_list5) test_case_list6 = [] if (fault_type == 'dtm_broadcast_commit_prepared'): test_case_list6.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_a.post_sql.test_postsqls.TestPostSQLClass') test_case_list6.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_b.post_sql.test_postsqls.TestPostSQLClass') if ((fault_type == 'dtm_broadcast_prepare') and (crash_type not in 'gpstop_i')): test_case_list6.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_a.post_sql.test_postsqls.TestPostSQLClass') test_case_list6.append('mpp.gpdb.tests.storage.pg_twophase.checkpoint.test_checkpoint.TestCheckpointClass') test_case_list6.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_b.post_sql.test_postsqls.TestPostSQLClass') if (fault_type == 'dtm_xlog_distributed_commit'): test_case_list6.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_c.post_sql.test_postsqls.TestPostSQLClass') self.test_case_scenario.append(test_case_list6) test_case_list7 = [] if (fault_type in ('dtm_broadcast_commit_prepared', 'dtm_broadcast_prepare')): test_case_list7.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_b.cleanup_sql.test_cleanup.TestCleanupClass') if (fault_type == 'dtm_xlog_distributed_commit'): test_case_list7.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_c.cleanup_sql.test_cleanup.TestCleanupClass') self.test_case_scenario.append(test_case_list7) test_case_list8 = [] test_case_list8.append('mpp.gpdb.tests.storage.lib.dbstate.DbStateClass.run_validation') self.test_case_scenario.append(test_case_list8) test_case_list9 = [] test_case_list9.append('mpp.gpdb.tests.storage.pg_twophase.PgtwoPhaseClass.cleanup_dangling_processes') self.test_case_scenario.append(test_case_list9)
@param skip_state : skip/noskip checkpoint @param cluster_state : sync/change_tracking/resync @param ddl_type : create/drop @fault_type : commit/abort . Uses the same parameter to pass in 'end_prepare_two_phase_sleep' @crash_type : gpstop_i/gpstop_a/failover_to_primary/failover_to_mirror @description: Test the state of transactions after fault on master at diff stages followed by crash-recovery. Faults are used to suspend the transactions at three stages 1. After prepare has been sent by master and acknowledge by all workers 2. After distributed commit has been flushed to xlog on master 3. After commit prepared has been sent out acknowledged before distributed forget Steps: 0. Check the state of the cluster before proceeding the test execution 1. Run pre_sqls 2. Run any faults required before the trigger_sqls based on the fault_type as well as cluster_state 3. Run switch_checkpoint loop. switch_xlog, checkpoint in case of 'dtm_broadcast_prepare' fault 4. Crash and recover. Resume suspended faults if needed 5. Run validate_loop 7. Recover the cluster in case if needed 8. Validate using gpcheckcat and gpcheckmirrorseg
src/test/tinc/tincrepo/mpp/gpdb/tests/storage/pg_twophase/switch_checkpoint.py
switch_checkpoint
lintzc/GPDB
1
python
def switch_checkpoint(self, cluster_state, fault_type, crash_type): " \n @param skip_state : skip/noskip checkpoint\n @param cluster_state : sync/change_tracking/resync\n @param ddl_type : create/drop\n @fault_type : commit/abort . Uses the same parameter to pass in 'end_prepare_two_phase_sleep'\n @crash_type : gpstop_i/gpstop_a/failover_to_primary/failover_to_mirror\n @description: Test the state of transactions after fault on master at diff stages followed by crash-recovery. \n Faults are used to suspend the transactions at three stages\n 1. After prepare has been sent by master and acknowledge by all workers\n 2. After distributed commit has been flushed to xlog on master\n 3. After commit prepared has been sent out acknowledged before distributed forget\n Steps:\n 0. Check the state of the cluster before proceeding the test execution\n 1. Run pre_sqls\n 2. Run any faults required before the trigger_sqls based on the fault_type as well as cluster_state\n 3. Run switch_checkpoint loop. switch_xlog, checkpoint in case of 'dtm_broadcast_prepare' fault \n 4. Crash and recover. Resume suspended faults if needed\n 5. Run validate_loop\n 7. Recover the cluster in case if needed\n 8. Validate using gpcheckcat and gpcheckmirrorseg\n\n " test_dir = {'dtm_broadcast_prepare': 'switch_ckpt_a,switch_ckpt_b', 'dtm_broadcast_commit_prepared': 'switch_ckpt_a,switch_ckpt_b', 'dtm_xlog_distributed_commit': 'switch_ckpt_c'} test_case_list0 = [] test_case_list0.append('mpp.gpdb.tests.storage.lib.dbstate.DbStateClass.check_system') self.test_case_scenario.append(test_case_list0) test_case_list1 = [] if (fault_type == 'dtm_broadcast_commit_prepared'): test_case_list1.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_a.pre_sql.test_presqls.TestPreSQLClass') test_case_list1.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_b.pre_sql.test_presqls.TestPreSQLClass') if (fault_type == 'dtm_broadcast_prepare'): test_case_list1.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_a.pre_sql.test_presqls.TestPreSQLClass') test_case_list1.append('mpp.gpdb.tests.storage.pg_twophase.checkpoint.test_checkpoint.TestCheckpointClass') test_case_list1.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_b.pre_sql.test_presqls.TestPreSQLClass') if (fault_type == 'dtm_xlog_distributed_commit'): test_case_list1.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_c.pre_sql.test_presqls.TestPreSQLClass') self.test_case_scenario.append(test_case_list1) test_case_list2 = [] test_case_list2.append(('mpp.gpdb.tests.storage.pg_twophase.PgtwoPhaseClass.switch_ckpt_faults_before_trigger', [cluster_state, fault_type])) self.test_case_scenario.append(test_case_list2) test_case_list3 = [] test_case_list3.append('mpp.gpdb.tests.storage.pg_twophase.PgtwoPhaseClass.switch_ckpt_switch_xlog') self.test_case_scenario.append(test_case_list3) test_case_list4 = [] if (fault_type == 'dtm_broadcast_commit_prepared'): test_case_list4.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_a.trigger_sql.test_triggersqls.TestTriggerSQLClass') test_case_list4.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_b.trigger_sql.test_triggersqls.TestTriggerSQLClass') if (fault_type == 'dtm_broadcast_prepare'): test_case_list4.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_a.trigger_sql.test_triggersqls.TestTriggerSQLClass') test_case_list4.append('mpp.gpdb.tests.storage.pg_twophase.checkpoint.test_checkpoint.TestCheckpointClass') test_case_list4.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_b.trigger_sql.test_triggersqls.TestTriggerSQLClass') if (fault_type == 'dtm_xlog_distributed_commit'): test_case_list4.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_c.trigger_sql.test_triggersqls.TestTriggerSQLClass') test_case_list4.append(('mpp.gpdb.tests.storage.pg_twophase.PgtwoPhaseClass.switch_ckpt_crash_and_recover', [crash_type, fault_type, test_dir[fault_type], cluster_state])) self.test_case_scenario.append(test_case_list4) test_case_list5 = [] test_case_list5.append(('mpp.gpdb.tests.storage.pg_twophase.PgtwoPhaseClass.run_gprecover', [crash_type, cluster_state])) self.test_case_scenario.append(test_case_list5) test_case_list6 = [] if (fault_type == 'dtm_broadcast_commit_prepared'): test_case_list6.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_a.post_sql.test_postsqls.TestPostSQLClass') test_case_list6.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_b.post_sql.test_postsqls.TestPostSQLClass') if ((fault_type == 'dtm_broadcast_prepare') and (crash_type not in 'gpstop_i')): test_case_list6.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_a.post_sql.test_postsqls.TestPostSQLClass') test_case_list6.append('mpp.gpdb.tests.storage.pg_twophase.checkpoint.test_checkpoint.TestCheckpointClass') test_case_list6.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_b.post_sql.test_postsqls.TestPostSQLClass') if (fault_type == 'dtm_xlog_distributed_commit'): test_case_list6.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_c.post_sql.test_postsqls.TestPostSQLClass') self.test_case_scenario.append(test_case_list6) test_case_list7 = [] if (fault_type in ('dtm_broadcast_commit_prepared', 'dtm_broadcast_prepare')): test_case_list7.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_b.cleanup_sql.test_cleanup.TestCleanupClass') if (fault_type == 'dtm_xlog_distributed_commit'): test_case_list7.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_c.cleanup_sql.test_cleanup.TestCleanupClass') self.test_case_scenario.append(test_case_list7) test_case_list8 = [] test_case_list8.append('mpp.gpdb.tests.storage.lib.dbstate.DbStateClass.run_validation') self.test_case_scenario.append(test_case_list8) test_case_list9 = [] test_case_list9.append('mpp.gpdb.tests.storage.pg_twophase.PgtwoPhaseClass.cleanup_dangling_processes') self.test_case_scenario.append(test_case_list9)
def switch_checkpoint(self, cluster_state, fault_type, crash_type): " \n @param skip_state : skip/noskip checkpoint\n @param cluster_state : sync/change_tracking/resync\n @param ddl_type : create/drop\n @fault_type : commit/abort . Uses the same parameter to pass in 'end_prepare_two_phase_sleep'\n @crash_type : gpstop_i/gpstop_a/failover_to_primary/failover_to_mirror\n @description: Test the state of transactions after fault on master at diff stages followed by crash-recovery. \n Faults are used to suspend the transactions at three stages\n 1. After prepare has been sent by master and acknowledge by all workers\n 2. After distributed commit has been flushed to xlog on master\n 3. After commit prepared has been sent out acknowledged before distributed forget\n Steps:\n 0. Check the state of the cluster before proceeding the test execution\n 1. Run pre_sqls\n 2. Run any faults required before the trigger_sqls based on the fault_type as well as cluster_state\n 3. Run switch_checkpoint loop. switch_xlog, checkpoint in case of 'dtm_broadcast_prepare' fault \n 4. Crash and recover. Resume suspended faults if needed\n 5. Run validate_loop\n 7. Recover the cluster in case if needed\n 8. Validate using gpcheckcat and gpcheckmirrorseg\n\n " test_dir = {'dtm_broadcast_prepare': 'switch_ckpt_a,switch_ckpt_b', 'dtm_broadcast_commit_prepared': 'switch_ckpt_a,switch_ckpt_b', 'dtm_xlog_distributed_commit': 'switch_ckpt_c'} test_case_list0 = [] test_case_list0.append('mpp.gpdb.tests.storage.lib.dbstate.DbStateClass.check_system') self.test_case_scenario.append(test_case_list0) test_case_list1 = [] if (fault_type == 'dtm_broadcast_commit_prepared'): test_case_list1.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_a.pre_sql.test_presqls.TestPreSQLClass') test_case_list1.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_b.pre_sql.test_presqls.TestPreSQLClass') if (fault_type == 'dtm_broadcast_prepare'): test_case_list1.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_a.pre_sql.test_presqls.TestPreSQLClass') test_case_list1.append('mpp.gpdb.tests.storage.pg_twophase.checkpoint.test_checkpoint.TestCheckpointClass') test_case_list1.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_b.pre_sql.test_presqls.TestPreSQLClass') if (fault_type == 'dtm_xlog_distributed_commit'): test_case_list1.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_c.pre_sql.test_presqls.TestPreSQLClass') self.test_case_scenario.append(test_case_list1) test_case_list2 = [] test_case_list2.append(('mpp.gpdb.tests.storage.pg_twophase.PgtwoPhaseClass.switch_ckpt_faults_before_trigger', [cluster_state, fault_type])) self.test_case_scenario.append(test_case_list2) test_case_list3 = [] test_case_list3.append('mpp.gpdb.tests.storage.pg_twophase.PgtwoPhaseClass.switch_ckpt_switch_xlog') self.test_case_scenario.append(test_case_list3) test_case_list4 = [] if (fault_type == 'dtm_broadcast_commit_prepared'): test_case_list4.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_a.trigger_sql.test_triggersqls.TestTriggerSQLClass') test_case_list4.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_b.trigger_sql.test_triggersqls.TestTriggerSQLClass') if (fault_type == 'dtm_broadcast_prepare'): test_case_list4.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_a.trigger_sql.test_triggersqls.TestTriggerSQLClass') test_case_list4.append('mpp.gpdb.tests.storage.pg_twophase.checkpoint.test_checkpoint.TestCheckpointClass') test_case_list4.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_b.trigger_sql.test_triggersqls.TestTriggerSQLClass') if (fault_type == 'dtm_xlog_distributed_commit'): test_case_list4.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_c.trigger_sql.test_triggersqls.TestTriggerSQLClass') test_case_list4.append(('mpp.gpdb.tests.storage.pg_twophase.PgtwoPhaseClass.switch_ckpt_crash_and_recover', [crash_type, fault_type, test_dir[fault_type], cluster_state])) self.test_case_scenario.append(test_case_list4) test_case_list5 = [] test_case_list5.append(('mpp.gpdb.tests.storage.pg_twophase.PgtwoPhaseClass.run_gprecover', [crash_type, cluster_state])) self.test_case_scenario.append(test_case_list5) test_case_list6 = [] if (fault_type == 'dtm_broadcast_commit_prepared'): test_case_list6.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_a.post_sql.test_postsqls.TestPostSQLClass') test_case_list6.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_b.post_sql.test_postsqls.TestPostSQLClass') if ((fault_type == 'dtm_broadcast_prepare') and (crash_type not in 'gpstop_i')): test_case_list6.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_a.post_sql.test_postsqls.TestPostSQLClass') test_case_list6.append('mpp.gpdb.tests.storage.pg_twophase.checkpoint.test_checkpoint.TestCheckpointClass') test_case_list6.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_b.post_sql.test_postsqls.TestPostSQLClass') if (fault_type == 'dtm_xlog_distributed_commit'): test_case_list6.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_c.post_sql.test_postsqls.TestPostSQLClass') self.test_case_scenario.append(test_case_list6) test_case_list7 = [] if (fault_type in ('dtm_broadcast_commit_prepared', 'dtm_broadcast_prepare')): test_case_list7.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_b.cleanup_sql.test_cleanup.TestCleanupClass') if (fault_type == 'dtm_xlog_distributed_commit'): test_case_list7.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_c.cleanup_sql.test_cleanup.TestCleanupClass') self.test_case_scenario.append(test_case_list7) test_case_list8 = [] test_case_list8.append('mpp.gpdb.tests.storage.lib.dbstate.DbStateClass.run_validation') self.test_case_scenario.append(test_case_list8) test_case_list9 = [] test_case_list9.append('mpp.gpdb.tests.storage.pg_twophase.PgtwoPhaseClass.cleanup_dangling_processes') self.test_case_scenario.append(test_case_list9)<|docstring|>@param skip_state : skip/noskip checkpoint @param cluster_state : sync/change_tracking/resync @param ddl_type : create/drop @fault_type : commit/abort . Uses the same parameter to pass in 'end_prepare_two_phase_sleep' @crash_type : gpstop_i/gpstop_a/failover_to_primary/failover_to_mirror @description: Test the state of transactions after fault on master at diff stages followed by crash-recovery. Faults are used to suspend the transactions at three stages 1. After prepare has been sent by master and acknowledge by all workers 2. After distributed commit has been flushed to xlog on master 3. After commit prepared has been sent out acknowledged before distributed forget Steps: 0. Check the state of the cluster before proceeding the test execution 1. Run pre_sqls 2. Run any faults required before the trigger_sqls based on the fault_type as well as cluster_state 3. Run switch_checkpoint loop. switch_xlog, checkpoint in case of 'dtm_broadcast_prepare' fault 4. Crash and recover. Resume suspended faults if needed 5. Run validate_loop 7. Recover the cluster in case if needed 8. Validate using gpcheckcat and gpcheckmirrorseg<|endoftext|>
e82215c222b253e794fe66701d9d18ebfc30d092c38152c9317e49594b8b7775
def switch_checkpoint_serial(self, cluster_state, fault_type, crash_type='gpstop_i'): '\n @description: Tests with more switch_xlogs before trigger sqls\n ' test_case_list0 = [] test_case_list0.append('mpp.gpdb.tests.storage.lib.dbstate.DbStateClass.check_system') self.test_case_scenario.append(test_case_list0) test_case_list1 = [] test_case_list1.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_serial.pre_sql.test_presqls.TestPreSQLClass') self.test_case_scenario.append(test_case_list1) test_case_list2 = [] test_case_list2.append(('mpp.gpdb.tests.storage.pg_twophase.PgtwoPhaseClass.switch_ckpt_faults_before_trigger', [cluster_state, fault_type])) self.test_case_scenario.append(test_case_list2) test_case_list3 = [] test_case_list3.append('mpp.gpdb.tests.storage.pg_twophase.PgtwoPhaseClass.switch_ckpt_switch_xlog') self.test_case_scenario.append(test_case_list3) test_dir = 'switch_ckpt_serial' test_case_list4 = [] test_case_list4.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_serial.trigger_sql.test_triggersqls.TestTriggerSQLClass') test_case_list4.append(('mpp.gpdb.tests.storage.pg_twophase.PgtwoPhaseClass.switch_ckpt_crash_and_recover', [crash_type, fault_type, test_dir, cluster_state])) self.test_case_scenario.append(test_case_list4) test_case_list5 = [] test_case_list5.append(('mpp.gpdb.tests.storage.pg_twophase.PgtwoPhaseClass.switch_checkpoint_loop', [fault_type])) self.test_case_scenario.append(test_case_list5) test_case_list6 = [] test_case_list6.append(('mpp.gpdb.tests.storage.pg_twophase.PgtwoPhaseClass.run_gprecover', [crash_type, cluster_state])) self.test_case_scenario.append(test_case_list6) if (fault_type not in 'dtm_broadcast_prepare'): test_case_list7 = [] test_case_list7.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_serial.post_sql.test_postsqls.TestPostSQLClass') self.test_case_scenario.append(test_case_list7) test_case_list8 = [] test_case_list8.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_serial.cleanup_sql.test_cleanup.TestCleanupClass') self.test_case_scenario.append(test_case_list8) test_case_list9 = [] test_case_list9.append('mpp.gpdb.tests.storage.lib.dbstate.DbStateClass.run_validation') self.test_case_scenario.append(test_case_list9) test_case_list10 = [] test_case_list10.append('mpp.gpdb.tests.storage.pg_twophase.PgtwoPhaseClass.cleanup_dangling_processes') self.test_case_scenario.append(test_case_list10)
@description: Tests with more switch_xlogs before trigger sqls
src/test/tinc/tincrepo/mpp/gpdb/tests/storage/pg_twophase/switch_checkpoint.py
switch_checkpoint_serial
lintzc/GPDB
1
python
def switch_checkpoint_serial(self, cluster_state, fault_type, crash_type='gpstop_i'): '\n \n ' test_case_list0 = [] test_case_list0.append('mpp.gpdb.tests.storage.lib.dbstate.DbStateClass.check_system') self.test_case_scenario.append(test_case_list0) test_case_list1 = [] test_case_list1.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_serial.pre_sql.test_presqls.TestPreSQLClass') self.test_case_scenario.append(test_case_list1) test_case_list2 = [] test_case_list2.append(('mpp.gpdb.tests.storage.pg_twophase.PgtwoPhaseClass.switch_ckpt_faults_before_trigger', [cluster_state, fault_type])) self.test_case_scenario.append(test_case_list2) test_case_list3 = [] test_case_list3.append('mpp.gpdb.tests.storage.pg_twophase.PgtwoPhaseClass.switch_ckpt_switch_xlog') self.test_case_scenario.append(test_case_list3) test_dir = 'switch_ckpt_serial' test_case_list4 = [] test_case_list4.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_serial.trigger_sql.test_triggersqls.TestTriggerSQLClass') test_case_list4.append(('mpp.gpdb.tests.storage.pg_twophase.PgtwoPhaseClass.switch_ckpt_crash_and_recover', [crash_type, fault_type, test_dir, cluster_state])) self.test_case_scenario.append(test_case_list4) test_case_list5 = [] test_case_list5.append(('mpp.gpdb.tests.storage.pg_twophase.PgtwoPhaseClass.switch_checkpoint_loop', [fault_type])) self.test_case_scenario.append(test_case_list5) test_case_list6 = [] test_case_list6.append(('mpp.gpdb.tests.storage.pg_twophase.PgtwoPhaseClass.run_gprecover', [crash_type, cluster_state])) self.test_case_scenario.append(test_case_list6) if (fault_type not in 'dtm_broadcast_prepare'): test_case_list7 = [] test_case_list7.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_serial.post_sql.test_postsqls.TestPostSQLClass') self.test_case_scenario.append(test_case_list7) test_case_list8 = [] test_case_list8.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_serial.cleanup_sql.test_cleanup.TestCleanupClass') self.test_case_scenario.append(test_case_list8) test_case_list9 = [] test_case_list9.append('mpp.gpdb.tests.storage.lib.dbstate.DbStateClass.run_validation') self.test_case_scenario.append(test_case_list9) test_case_list10 = [] test_case_list10.append('mpp.gpdb.tests.storage.pg_twophase.PgtwoPhaseClass.cleanup_dangling_processes') self.test_case_scenario.append(test_case_list10)
def switch_checkpoint_serial(self, cluster_state, fault_type, crash_type='gpstop_i'): '\n \n ' test_case_list0 = [] test_case_list0.append('mpp.gpdb.tests.storage.lib.dbstate.DbStateClass.check_system') self.test_case_scenario.append(test_case_list0) test_case_list1 = [] test_case_list1.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_serial.pre_sql.test_presqls.TestPreSQLClass') self.test_case_scenario.append(test_case_list1) test_case_list2 = [] test_case_list2.append(('mpp.gpdb.tests.storage.pg_twophase.PgtwoPhaseClass.switch_ckpt_faults_before_trigger', [cluster_state, fault_type])) self.test_case_scenario.append(test_case_list2) test_case_list3 = [] test_case_list3.append('mpp.gpdb.tests.storage.pg_twophase.PgtwoPhaseClass.switch_ckpt_switch_xlog') self.test_case_scenario.append(test_case_list3) test_dir = 'switch_ckpt_serial' test_case_list4 = [] test_case_list4.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_serial.trigger_sql.test_triggersqls.TestTriggerSQLClass') test_case_list4.append(('mpp.gpdb.tests.storage.pg_twophase.PgtwoPhaseClass.switch_ckpt_crash_and_recover', [crash_type, fault_type, test_dir, cluster_state])) self.test_case_scenario.append(test_case_list4) test_case_list5 = [] test_case_list5.append(('mpp.gpdb.tests.storage.pg_twophase.PgtwoPhaseClass.switch_checkpoint_loop', [fault_type])) self.test_case_scenario.append(test_case_list5) test_case_list6 = [] test_case_list6.append(('mpp.gpdb.tests.storage.pg_twophase.PgtwoPhaseClass.run_gprecover', [crash_type, cluster_state])) self.test_case_scenario.append(test_case_list6) if (fault_type not in 'dtm_broadcast_prepare'): test_case_list7 = [] test_case_list7.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_serial.post_sql.test_postsqls.TestPostSQLClass') self.test_case_scenario.append(test_case_list7) test_case_list8 = [] test_case_list8.append('mpp.gpdb.tests.storage.pg_twophase.switch_ckpt_serial.cleanup_sql.test_cleanup.TestCleanupClass') self.test_case_scenario.append(test_case_list8) test_case_list9 = [] test_case_list9.append('mpp.gpdb.tests.storage.lib.dbstate.DbStateClass.run_validation') self.test_case_scenario.append(test_case_list9) test_case_list10 = [] test_case_list10.append('mpp.gpdb.tests.storage.pg_twophase.PgtwoPhaseClass.cleanup_dangling_processes') self.test_case_scenario.append(test_case_list10)<|docstring|>@description: Tests with more switch_xlogs before trigger sqls<|endoftext|>
75c0b3643a99936901d57fcab626c44ea420db2fe25438880048cec245f93dc0
def path(self): 'Return the path to the file that backs this database.' return self.path
Return the path to the file that backs this database.
parlai/agents/tfidf_retriever/doc_db.py
path
ShaojieJiang/FACE_orig
1
python
def path(self): return self.path
def path(self): return self.path<|docstring|>Return the path to the file that backs this database.<|endoftext|>
9bb1b2a9436426ad9cd5fbfb93617806c0da10b343f8b8192425d649d0a9f597
def close(self): 'Close the connection to the database.' self.connection.close()
Close the connection to the database.
parlai/agents/tfidf_retriever/doc_db.py
close
ShaojieJiang/FACE_orig
1
python
def close(self): self.connection.close()
def close(self): self.connection.close()<|docstring|>Close the connection to the database.<|endoftext|>
96aa5b4c40ba3ccfaca6e3d7ee9ff304e79610dac86e880e1543265d3e898922
def get_doc_ids(self): 'Fetch all ids of docs stored in the db.' cursor = self.connection.cursor() cursor.execute('SELECT id FROM documents') results = [r[0] for r in cursor.fetchall()] cursor.close() return results
Fetch all ids of docs stored in the db.
parlai/agents/tfidf_retriever/doc_db.py
get_doc_ids
ShaojieJiang/FACE_orig
1
python
def get_doc_ids(self): cursor = self.connection.cursor() cursor.execute('SELECT id FROM documents') results = [r[0] for r in cursor.fetchall()] cursor.close() return results
def get_doc_ids(self): cursor = self.connection.cursor() cursor.execute('SELECT id FROM documents') results = [r[0] for r in cursor.fetchall()] cursor.close() return results<|docstring|>Fetch all ids of docs stored in the db.<|endoftext|>
0ff74c0d51b1960aac61de131289b11c31625c13f7aabf625f3c318ab0318502
def get_doc_text(self, doc_id): "Fetch the raw text of the doc for 'doc_id'." cursor = self.connection.cursor() cursor.execute('SELECT text FROM documents WHERE id = ?', (utils.normalize(doc_id),)) result = cursor.fetchone() cursor.close() return (result if (result is None) else result[0])
Fetch the raw text of the doc for 'doc_id'.
parlai/agents/tfidf_retriever/doc_db.py
get_doc_text
ShaojieJiang/FACE_orig
1
python
def get_doc_text(self, doc_id): cursor = self.connection.cursor() cursor.execute('SELECT text FROM documents WHERE id = ?', (utils.normalize(doc_id),)) result = cursor.fetchone() cursor.close() return (result if (result is None) else result[0])
def get_doc_text(self, doc_id): cursor = self.connection.cursor() cursor.execute('SELECT text FROM documents WHERE id = ?', (utils.normalize(doc_id),)) result = cursor.fetchone() cursor.close() return (result if (result is None) else result[0])<|docstring|>Fetch the raw text of the doc for 'doc_id'.<|endoftext|>
4fbb7d937a2f5b290a7e68e36fcc86dd4b973e4aa2aefac948c9865927f3bbc0
def get_doc_value(self, doc_id): "Fetch the raw text of the doc for 'doc_id'." cursor = self.connection.cursor() cursor.execute('SELECT value FROM documents WHERE id = ?', (utils.normalize(doc_id),)) result = cursor.fetchone() cursor.close() return (result if (result is None) else result[0])
Fetch the raw text of the doc for 'doc_id'.
parlai/agents/tfidf_retriever/doc_db.py
get_doc_value
ShaojieJiang/FACE_orig
1
python
def get_doc_value(self, doc_id): cursor = self.connection.cursor() cursor.execute('SELECT value FROM documents WHERE id = ?', (utils.normalize(doc_id),)) result = cursor.fetchone() cursor.close() return (result if (result is None) else result[0])
def get_doc_value(self, doc_id): cursor = self.connection.cursor() cursor.execute('SELECT value FROM documents WHERE id = ?', (utils.normalize(doc_id),)) result = cursor.fetchone() cursor.close() return (result if (result is None) else result[0])<|docstring|>Fetch the raw text of the doc for 'doc_id'.<|endoftext|>
57ac1a9a9e219b594c502d781a6bdd0579ce5943cf07d2bbe53bac57bcf6b7de
def task_id_arg(f: Optional[Callable]=None, *, required=True): '\n By default, the task ID is made required; pass `required=False` to the\n decorator arguments to make it optional.\n ' if (f is None): return functools.partial(task_id_arg, required=required) return click.argument('TASK_ID', required=required)(f)
By default, the task ID is made required; pass `required=False` to the decorator arguments to make it optional.
src/globus_cli/commands/task/_common.py
task_id_arg
sirosen/temp-cli-test
47
python
def task_id_arg(f: Optional[Callable]=None, *, required=True): '\n By default, the task ID is made required; pass `required=False` to the\n decorator arguments to make it optional.\n ' if (f is None): return functools.partial(task_id_arg, required=required) return click.argument('TASK_ID', required=required)(f)
def task_id_arg(f: Optional[Callable]=None, *, required=True): '\n By default, the task ID is made required; pass `required=False` to the\n decorator arguments to make it optional.\n ' if (f is None): return functools.partial(task_id_arg, required=required) return click.argument('TASK_ID', required=required)(f)<|docstring|>By default, the task ID is made required; pass `required=False` to the decorator arguments to make it optional.<|endoftext|>
f001155a1f1255ed25bfc4cd031bc4c666dfcf223347cf20f1af84ec9221c165
def hook_makeOutline(VO, blines): 'Return (tlines, bnodes, levels) for Body lines blines.\n blines is either Vim buffer object (Body) or list of buffer lines.\n ' Z = len(blines) (tlines, bnodes, levels) = ([], [], []) (tlines_add, bnodes_add, levels_add) = (tlines.append, bnodes.append, levels.append) isFenced = False for i in xrange(Z): bline = blines[i] if isFenced: if re.match(isFenced, bline): isFenced = False continue if (bline.lstrip().startswith('#') and ('<<' in bline)): r_m = region_match(bline) if (r_m and (r_m.group(1) != 'Region')): isFenced = ('^\\s*%s\\s*$' % re.escape((r_m.group(3) or ''))) continue elif (not bline.startswith('*')): continue m = headline_match(bline) if (not m): continue lev = len(m.group(1)) head = bline[lev:].strip() tline = (' %s|%s' % (('. ' * (lev - 1)), head)) tlines_add(tline) bnodes_add((i + 1)) levels_add(lev) return (tlines, bnodes, levels)
Return (tlines, bnodes, levels) for Body lines blines. blines is either Vim buffer object (Body) or list of buffer lines.
vimsetting/bundle/VOom/autoload/voom/voom_mode_viki.py
hook_makeOutline
thuleqaid/boost_study
1
python
def hook_makeOutline(VO, blines): 'Return (tlines, bnodes, levels) for Body lines blines.\n blines is either Vim buffer object (Body) or list of buffer lines.\n ' Z = len(blines) (tlines, bnodes, levels) = ([], [], []) (tlines_add, bnodes_add, levels_add) = (tlines.append, bnodes.append, levels.append) isFenced = False for i in xrange(Z): bline = blines[i] if isFenced: if re.match(isFenced, bline): isFenced = False continue if (bline.lstrip().startswith('#') and ('<<' in bline)): r_m = region_match(bline) if (r_m and (r_m.group(1) != 'Region')): isFenced = ('^\\s*%s\\s*$' % re.escape((r_m.group(3) or ))) continue elif (not bline.startswith('*')): continue m = headline_match(bline) if (not m): continue lev = len(m.group(1)) head = bline[lev:].strip() tline = (' %s|%s' % (('. ' * (lev - 1)), head)) tlines_add(tline) bnodes_add((i + 1)) levels_add(lev) return (tlines, bnodes, levels)
def hook_makeOutline(VO, blines): 'Return (tlines, bnodes, levels) for Body lines blines.\n blines is either Vim buffer object (Body) or list of buffer lines.\n ' Z = len(blines) (tlines, bnodes, levels) = ([], [], []) (tlines_add, bnodes_add, levels_add) = (tlines.append, bnodes.append, levels.append) isFenced = False for i in xrange(Z): bline = blines[i] if isFenced: if re.match(isFenced, bline): isFenced = False continue if (bline.lstrip().startswith('#') and ('<<' in bline)): r_m = region_match(bline) if (r_m and (r_m.group(1) != 'Region')): isFenced = ('^\\s*%s\\s*$' % re.escape((r_m.group(3) or ))) continue elif (not bline.startswith('*')): continue m = headline_match(bline) if (not m): continue lev = len(m.group(1)) head = bline[lev:].strip() tline = (' %s|%s' % (('. ' * (lev - 1)), head)) tlines_add(tline) bnodes_add((i + 1)) levels_add(lev) return (tlines, bnodes, levels)<|docstring|>Return (tlines, bnodes, levels) for Body lines blines. blines is either Vim buffer object (Body) or list of buffer lines.<|endoftext|>
63759670897a08280b39539f2b49bc34ba3708257b22a1fc5ba2de8a08404d71
def hook_newHeadline(VO, level, blnum, tlnum): 'Return (tree_head, bodyLines).\n tree_head is new headline string in Tree buffer (text after |).\n bodyLines is list of lines to insert in Body buffer.\n ' tree_head = 'NewHeadline' bodyLines = [('%s %s' % (('*' * level), tree_head)), ''] return (tree_head, bodyLines)
Return (tree_head, bodyLines). tree_head is new headline string in Tree buffer (text after |). bodyLines is list of lines to insert in Body buffer.
vimsetting/bundle/VOom/autoload/voom/voom_mode_viki.py
hook_newHeadline
thuleqaid/boost_study
1
python
def hook_newHeadline(VO, level, blnum, tlnum): 'Return (tree_head, bodyLines).\n tree_head is new headline string in Tree buffer (text after |).\n bodyLines is list of lines to insert in Body buffer.\n ' tree_head = 'NewHeadline' bodyLines = [('%s %s' % (('*' * level), tree_head)), ] return (tree_head, bodyLines)
def hook_newHeadline(VO, level, blnum, tlnum): 'Return (tree_head, bodyLines).\n tree_head is new headline string in Tree buffer (text after |).\n bodyLines is list of lines to insert in Body buffer.\n ' tree_head = 'NewHeadline' bodyLines = [('%s %s' % (('*' * level), tree_head)), ] return (tree_head, bodyLines)<|docstring|>Return (tree_head, bodyLines). tree_head is new headline string in Tree buffer (text after |). bodyLines is list of lines to insert in Body buffer.<|endoftext|>
89dd06d603c8fff41a3730459a31646085cf7cac0e411e98f53afe348baae657
def hook_changeLevBodyHead(VO, h, levDelta): 'Increase of decrease level number of Body headline by levDelta.' if (levDelta == 0): return h m = headline_match(h) level = len(m.group(1)) return ('%s%s' % (('*' * (level + levDelta)), h[m.end(1):]))
Increase of decrease level number of Body headline by levDelta.
vimsetting/bundle/VOom/autoload/voom/voom_mode_viki.py
hook_changeLevBodyHead
thuleqaid/boost_study
1
python
def hook_changeLevBodyHead(VO, h, levDelta): if (levDelta == 0): return h m = headline_match(h) level = len(m.group(1)) return ('%s%s' % (('*' * (level + levDelta)), h[m.end(1):]))
def hook_changeLevBodyHead(VO, h, levDelta): if (levDelta == 0): return h m = headline_match(h) level = len(m.group(1)) return ('%s%s' % (('*' * (level + levDelta)), h[m.end(1):]))<|docstring|>Increase of decrease level number of Body headline by levDelta.<|endoftext|>
9c83d704153d25f57bc74f7e46873ee0fa50c36ba5ef6df1396515ea9b3707ce
def setup_states(self, state_dict, start_state): '\n Given a dictionary of states and a state to start in,\n creates the self.state_dict.\n ' self.state_dict = state_dict self.state_name = start_state self.state = self.state_dict[self.state_name]
Given a dictionary of states and a state to start in, creates the self.state_dict.
state_machine.py
setup_states
maxerbox/Lunar-Lander-with-pygame
2
python
def setup_states(self, state_dict, start_state): '\n Given a dictionary of states and a state to start in,\n creates the self.state_dict.\n ' self.state_dict = state_dict self.state_name = start_state self.state = self.state_dict[self.state_name]
def setup_states(self, state_dict, start_state): '\n Given a dictionary of states and a state to start in,\n creates the self.state_dict.\n ' self.state_dict = state_dict self.state_name = start_state self.state = self.state_dict[self.state_name]<|docstring|>Given a dictionary of states and a state to start in, creates the self.state_dict.<|endoftext|>
a4669dc6c8e39a49162bffcc1eea42f43299b4667db22e019b715cdc56d936d4
def update(self, keys, now): '\n Checks if a state is done or has called for a game quit.\n State is flipped if neccessary and State.update is called.\n ' self.now = now if self.state.quit: self.done = True elif self.state.done: self.flip_state() self.state.update(keys, now)
Checks if a state is done or has called for a game quit. State is flipped if neccessary and State.update is called.
state_machine.py
update
maxerbox/Lunar-Lander-with-pygame
2
python
def update(self, keys, now): '\n Checks if a state is done or has called for a game quit.\n State is flipped if neccessary and State.update is called.\n ' self.now = now if self.state.quit: self.done = True elif self.state.done: self.flip_state() self.state.update(keys, now)
def update(self, keys, now): '\n Checks if a state is done or has called for a game quit.\n State is flipped if neccessary and State.update is called.\n ' self.now = now if self.state.quit: self.done = True elif self.state.done: self.flip_state() self.state.update(keys, now)<|docstring|>Checks if a state is done or has called for a game quit. State is flipped if neccessary and State.update is called.<|endoftext|>
7890b25cdc5f4decbb3a3dae027a6632c19c5a09d8ef803b291330daf447fb0d
def flip_state(self): '\n When a State changes to done necessary startup and cleanup functions\n are called and the current State is changed.\n ' (previous, self.state_name) = (self.state_name, self.state.next) persist = self.state.cleanup() self.state = self.state_dict[self.state_name] self.state.startup(self.now, persist) self.state.previous = previous
When a State changes to done necessary startup and cleanup functions are called and the current State is changed.
state_machine.py
flip_state
maxerbox/Lunar-Lander-with-pygame
2
python
def flip_state(self): '\n When a State changes to done necessary startup and cleanup functions\n are called and the current State is changed.\n ' (previous, self.state_name) = (self.state_name, self.state.next) persist = self.state.cleanup() self.state = self.state_dict[self.state_name] self.state.startup(self.now, persist) self.state.previous = previous
def flip_state(self): '\n When a State changes to done necessary startup and cleanup functions\n are called and the current State is changed.\n ' (previous, self.state_name) = (self.state_name, self.state.next) persist = self.state.cleanup() self.state = self.state_dict[self.state_name] self.state.startup(self.now, persist) self.state.previous = previous<|docstring|>When a State changes to done necessary startup and cleanup functions are called and the current State is changed.<|endoftext|>
ac11a630835095a8d3d1c5fd23528a275f658aa34db784ed736cdfbe694bd7dc
def get_event(self, event): '\n Pass events down to current State.\n ' self.state.get_event(event)
Pass events down to current State.
state_machine.py
get_event
maxerbox/Lunar-Lander-with-pygame
2
python
def get_event(self, event): '\n \n ' self.state.get_event(event)
def get_event(self, event): '\n \n ' self.state.get_event(event)<|docstring|>Pass events down to current State.<|endoftext|>
2e7d888822bfc91457c9b06dd5010cffc366d0e7c9a08ae9e92af31410f71bf4
def get_event(self, event): '\n Processes events that were passed from the main event loop.\n Must be overloaded in children.\n ' pass
Processes events that were passed from the main event loop. Must be overloaded in children.
state_machine.py
get_event
maxerbox/Lunar-Lander-with-pygame
2
python
def get_event(self, event): '\n Processes events that were passed from the main event loop.\n Must be overloaded in children.\n ' pass
def get_event(self, event): '\n Processes events that were passed from the main event loop.\n Must be overloaded in children.\n ' pass<|docstring|>Processes events that were passed from the main event loop. Must be overloaded in children.<|endoftext|>
be5d3c5fff0b017c2cecdce451d4b5d91939936fb70053f098d50d7d7095107d
def startup(self, now, persistant): '\n Add variables passed in persistant to the proper attributes and\n set the start time of the State to the current time.\n ' self.persist = persistant self.start_time = now
Add variables passed in persistant to the proper attributes and set the start time of the State to the current time.
state_machine.py
startup
maxerbox/Lunar-Lander-with-pygame
2
python
def startup(self, now, persistant): '\n Add variables passed in persistant to the proper attributes and\n set the start time of the State to the current time.\n ' self.persist = persistant self.start_time = now
def startup(self, now, persistant): '\n Add variables passed in persistant to the proper attributes and\n set the start time of the State to the current time.\n ' self.persist = persistant self.start_time = now<|docstring|>Add variables passed in persistant to the proper attributes and set the start time of the State to the current time.<|endoftext|>
0a408402c272943379f0dabe874c7a5b5e095f09f6d0df76eb1510063662c643
def cleanup(self): '\n Add variables that should persist to the self.persist dictionary.\n Then reset State.done to False.\n ' self.done = False return self.persist
Add variables that should persist to the self.persist dictionary. Then reset State.done to False.
state_machine.py
cleanup
maxerbox/Lunar-Lander-with-pygame
2
python
def cleanup(self): '\n Add variables that should persist to the self.persist dictionary.\n Then reset State.done to False.\n ' self.done = False return self.persist
def cleanup(self): '\n Add variables that should persist to the self.persist dictionary.\n Then reset State.done to False.\n ' self.done = False return self.persist<|docstring|>Add variables that should persist to the self.persist dictionary. Then reset State.done to False.<|endoftext|>
96c50c7485d9eec179dc680b2436ad6ffba63f63c89013c43ee0b0eb3f8dc6a8
def update(self, keys, now): 'Update function for state. Must be overloaded in children.' pass
Update function for state. Must be overloaded in children.
state_machine.py
update
maxerbox/Lunar-Lander-with-pygame
2
python
def update(self, keys, now): pass
def update(self, keys, now): pass<|docstring|>Update function for state. Must be overloaded in children.<|endoftext|>
2b34d8a31ec55d158a64d98f0ab90eaa8a862efd3e281dc973de1ead128858aa
@pytest.mark.sanity def test_kill_essential(): 'kill the essential task, verify that both tasks are relaunched against a matching executor' verify_shared_executor('hello-0') old_tasks = sdk_tasks.get_service_tasks(config.SERVICE_NAME, 'hello-0') assert (len(old_tasks) == 2) sdk_cmd.kill_task_with_pattern('shared-volume/essential', 'nobody', agent_host=old_tasks[0].host) sdk_tasks.check_tasks_updated(config.SERVICE_NAME, 'hello-0', [t.id for t in old_tasks]) sdk_plan.wait_for_completed_recovery(config.SERVICE_NAME) verify_shared_executor('hello-0', delete_files=False)
kill the essential task, verify that both tasks are relaunched against a matching executor
frameworks/helloworld/tests/test_nonessential_tasks.py
test_kill_essential
kaiwalyajoshi/dcos-commons
201
python
@pytest.mark.sanity def test_kill_essential(): verify_shared_executor('hello-0') old_tasks = sdk_tasks.get_service_tasks(config.SERVICE_NAME, 'hello-0') assert (len(old_tasks) == 2) sdk_cmd.kill_task_with_pattern('shared-volume/essential', 'nobody', agent_host=old_tasks[0].host) sdk_tasks.check_tasks_updated(config.SERVICE_NAME, 'hello-0', [t.id for t in old_tasks]) sdk_plan.wait_for_completed_recovery(config.SERVICE_NAME) verify_shared_executor('hello-0', delete_files=False)
@pytest.mark.sanity def test_kill_essential(): verify_shared_executor('hello-0') old_tasks = sdk_tasks.get_service_tasks(config.SERVICE_NAME, 'hello-0') assert (len(old_tasks) == 2) sdk_cmd.kill_task_with_pattern('shared-volume/essential', 'nobody', agent_host=old_tasks[0].host) sdk_tasks.check_tasks_updated(config.SERVICE_NAME, 'hello-0', [t.id for t in old_tasks]) sdk_plan.wait_for_completed_recovery(config.SERVICE_NAME) verify_shared_executor('hello-0', delete_files=False)<|docstring|>kill the essential task, verify that both tasks are relaunched against a matching executor<|endoftext|>
f5d8f57b1e4bf42c5b472f4c23900952c1017fb6e22d9e13d33ccfc420151525
@pytest.mark.sanity def test_kill_nonessential(): 'kill the nonessential task, verify that the nonessential task is relaunched against the same executor as before' verify_shared_executor('hello-0') old_tasks = sdk_tasks.get_service_tasks(config.SERVICE_NAME, 'hello-0') assert (len(old_tasks) == 2) old_essential_task = [t for t in old_tasks if (t.name == 'hello-0-essential')][0] old_nonessential_task = [t for t in old_tasks if (t.name == 'hello-0-nonessential')][0] sdk_cmd.kill_task_with_pattern('shared-volume/nonessential', 'nobody', agent_host=old_nonessential_task.host) sdk_tasks.check_tasks_updated(config.SERVICE_NAME, 'hello-0-nonessential', [old_nonessential_task.id]) sdk_plan.wait_for_completed_recovery(config.SERVICE_NAME) sdk_tasks.check_tasks_not_updated(config.SERVICE_NAME, 'hello-0-essential', [old_essential_task.id]) verify_shared_executor('hello-0', expected_files=['nonessential'])
kill the nonessential task, verify that the nonessential task is relaunched against the same executor as before
frameworks/helloworld/tests/test_nonessential_tasks.py
test_kill_nonessential
kaiwalyajoshi/dcos-commons
201
python
@pytest.mark.sanity def test_kill_nonessential(): verify_shared_executor('hello-0') old_tasks = sdk_tasks.get_service_tasks(config.SERVICE_NAME, 'hello-0') assert (len(old_tasks) == 2) old_essential_task = [t for t in old_tasks if (t.name == 'hello-0-essential')][0] old_nonessential_task = [t for t in old_tasks if (t.name == 'hello-0-nonessential')][0] sdk_cmd.kill_task_with_pattern('shared-volume/nonessential', 'nobody', agent_host=old_nonessential_task.host) sdk_tasks.check_tasks_updated(config.SERVICE_NAME, 'hello-0-nonessential', [old_nonessential_task.id]) sdk_plan.wait_for_completed_recovery(config.SERVICE_NAME) sdk_tasks.check_tasks_not_updated(config.SERVICE_NAME, 'hello-0-essential', [old_essential_task.id]) verify_shared_executor('hello-0', expected_files=['nonessential'])
@pytest.mark.sanity def test_kill_nonessential(): verify_shared_executor('hello-0') old_tasks = sdk_tasks.get_service_tasks(config.SERVICE_NAME, 'hello-0') assert (len(old_tasks) == 2) old_essential_task = [t for t in old_tasks if (t.name == 'hello-0-essential')][0] old_nonessential_task = [t for t in old_tasks if (t.name == 'hello-0-nonessential')][0] sdk_cmd.kill_task_with_pattern('shared-volume/nonessential', 'nobody', agent_host=old_nonessential_task.host) sdk_tasks.check_tasks_updated(config.SERVICE_NAME, 'hello-0-nonessential', [old_nonessential_task.id]) sdk_plan.wait_for_completed_recovery(config.SERVICE_NAME) sdk_tasks.check_tasks_not_updated(config.SERVICE_NAME, 'hello-0-essential', [old_essential_task.id]) verify_shared_executor('hello-0', expected_files=['nonessential'])<|docstring|>kill the nonessential task, verify that the nonessential task is relaunched against the same executor as before<|endoftext|>
282ad76be48bdc8e25f58be77862db710e7e064298bfd2aeab0f7864614d4e6f
def verify_shared_executor(pod_name, expected_files=['essential', 'nonessential'], delete_files=True): "verify that both tasks share the same executor:\n - matching ExecutorInfo\n - both 'essential' and 'nonessential' present in shared-volume/ across both tasks\n " (rc, stdout, _) = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'pod info {}'.format(pod_name), print_output=False) assert (rc == 0), 'Pod info failed' try: tasks = json.loads(stdout) except Exception: log.exception('Failed to parse pod info: {}'.format(stdout)) assert False, 'Failed to parse pod info, see above' assert (len(tasks) == 2), 'Expected 2 tasks: {}'.format(stdout) executor = tasks[0]['info']['executor'] for task in tasks[1:]: assert (executor == task['info']['executor']) task_names = [task['info']['name'] for task in tasks] for task_name in task_names: filenames = sdk_cmd.run_cli('task ls {} shared-volume/'.format(task_name))[1].strip().split() assert (set(expected_files) == set(filenames)) if delete_files: if sdk_utils.dcos_version_less_than('1.10'): expected_file_path = sdk_cmd.service_task_exec(config.SERVICE_NAME, task_names[0], ('find /var/lib/mesos/slave/volumes -iname ' + filenames[0]))[1].strip() volume_dir = os.path.dirname(expected_file_path) else: volume_dir = 'shared-volume/' sdk_cmd.service_task_exec(config.SERVICE_NAME, task_names[0], ('rm ' + ' '.join([os.path.join(volume_dir, name) for name in filenames])))
verify that both tasks share the same executor: - matching ExecutorInfo - both 'essential' and 'nonessential' present in shared-volume/ across both tasks
frameworks/helloworld/tests/test_nonessential_tasks.py
verify_shared_executor
kaiwalyajoshi/dcos-commons
201
python
def verify_shared_executor(pod_name, expected_files=['essential', 'nonessential'], delete_files=True): "verify that both tasks share the same executor:\n - matching ExecutorInfo\n - both 'essential' and 'nonessential' present in shared-volume/ across both tasks\n " (rc, stdout, _) = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'pod info {}'.format(pod_name), print_output=False) assert (rc == 0), 'Pod info failed' try: tasks = json.loads(stdout) except Exception: log.exception('Failed to parse pod info: {}'.format(stdout)) assert False, 'Failed to parse pod info, see above' assert (len(tasks) == 2), 'Expected 2 tasks: {}'.format(stdout) executor = tasks[0]['info']['executor'] for task in tasks[1:]: assert (executor == task['info']['executor']) task_names = [task['info']['name'] for task in tasks] for task_name in task_names: filenames = sdk_cmd.run_cli('task ls {} shared-volume/'.format(task_name))[1].strip().split() assert (set(expected_files) == set(filenames)) if delete_files: if sdk_utils.dcos_version_less_than('1.10'): expected_file_path = sdk_cmd.service_task_exec(config.SERVICE_NAME, task_names[0], ('find /var/lib/mesos/slave/volumes -iname ' + filenames[0]))[1].strip() volume_dir = os.path.dirname(expected_file_path) else: volume_dir = 'shared-volume/' sdk_cmd.service_task_exec(config.SERVICE_NAME, task_names[0], ('rm ' + ' '.join([os.path.join(volume_dir, name) for name in filenames])))
def verify_shared_executor(pod_name, expected_files=['essential', 'nonessential'], delete_files=True): "verify that both tasks share the same executor:\n - matching ExecutorInfo\n - both 'essential' and 'nonessential' present in shared-volume/ across both tasks\n " (rc, stdout, _) = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'pod info {}'.format(pod_name), print_output=False) assert (rc == 0), 'Pod info failed' try: tasks = json.loads(stdout) except Exception: log.exception('Failed to parse pod info: {}'.format(stdout)) assert False, 'Failed to parse pod info, see above' assert (len(tasks) == 2), 'Expected 2 tasks: {}'.format(stdout) executor = tasks[0]['info']['executor'] for task in tasks[1:]: assert (executor == task['info']['executor']) task_names = [task['info']['name'] for task in tasks] for task_name in task_names: filenames = sdk_cmd.run_cli('task ls {} shared-volume/'.format(task_name))[1].strip().split() assert (set(expected_files) == set(filenames)) if delete_files: if sdk_utils.dcos_version_less_than('1.10'): expected_file_path = sdk_cmd.service_task_exec(config.SERVICE_NAME, task_names[0], ('find /var/lib/mesos/slave/volumes -iname ' + filenames[0]))[1].strip() volume_dir = os.path.dirname(expected_file_path) else: volume_dir = 'shared-volume/' sdk_cmd.service_task_exec(config.SERVICE_NAME, task_names[0], ('rm ' + ' '.join([os.path.join(volume_dir, name) for name in filenames])))<|docstring|>verify that both tasks share the same executor: - matching ExecutorInfo - both 'essential' and 'nonessential' present in shared-volume/ across both tasks<|endoftext|>
9e2fc6b4ae5657a6c412cc5e5c331b5051ad9b2f0fec7a5f709d430c5c36cb03
def log(message, prefix_newline=False): 'Logging function, provides a hook to suppress or redirect log messages.' print((((('\n' if prefix_newline else '') + '{0:.2f}'.format(time.time())) + ': ') + str(message)))
Logging function, provides a hook to suppress or redirect log messages.
lib/kb_stringtie/Utils/file_utils.py
log
Tianhao-Gu/kb_stringtie
1
python
def log(message, prefix_newline=False): print((((('\n' if prefix_newline else ) + '{0:.2f}'.format(time.time())) + ': ') + str(message)))
def log(message, prefix_newline=False): print((((('\n' if prefix_newline else ) + '{0:.2f}'.format(time.time())) + ': ') + str(message)))<|docstring|>Logging function, provides a hook to suppress or redirect log messages.<|endoftext|>
3ec2341300c534fdf19a040e9d01468f15563129645e208a367d27bbd1180a32
def _make_gff(file_path, append_file, novel_prefix='MSTRG.'): 'Works on the very narrow case of gtf files from stringtie.' type_idx = 2 written_genes = set() timestamp = str(datetime.datetime.now()).split('.')[0] if (not os.path.isfile(file_path)): raise ValueError('{} is not a file'.format(file_path)) with open(append_file, 'a') as output_file: with open(file_path, 'r') as input_file: for line in input_file: if (line[0] == '#'): continue sl = line.split('\t') gene_id = sl[(- 1)].split('gene_id "')[1].split('"')[0] transcript_id = line.split('transcript_id "')[1].split('"')[0] if ('ref_gene_id "' in sl[(- 1)]): gene_id = sl[(- 1)].split('ref_gene_id "')[1].split('"')[0] elif ((sl[type_idx] == 'transcript') and (gene_id not in written_genes)): sl[type_idx] = 'gene' sl[(- 1)] = 'ID={}; note="Spoofed gene for a RNASeq transcript\n'.format(gene_id) output_file.write('\t'.join(sl)) written_genes.add(gene_id) if (novel_prefix in transcript_id): if (sl[type_idx] == 'exon'): sl[(- 1)] = 'Parent={}'.format(transcript_id) elif gene_id: sl[type_idx] = 'transcript' sl[(- 1)] = 'ID={}; Parent={}'.format(transcript_id, gene_id) sl[(- 1)] += '; note=Predicted transcript from RNASeq run on {}\n'.format(timestamp) output_file.write('\t'.join(sl)) return append_file
Works on the very narrow case of gtf files from stringtie.
lib/kb_stringtie/Utils/file_utils.py
_make_gff
Tianhao-Gu/kb_stringtie
1
python
def _make_gff(file_path, append_file, novel_prefix='MSTRG.'): type_idx = 2 written_genes = set() timestamp = str(datetime.datetime.now()).split('.')[0] if (not os.path.isfile(file_path)): raise ValueError('{} is not a file'.format(file_path)) with open(append_file, 'a') as output_file: with open(file_path, 'r') as input_file: for line in input_file: if (line[0] == '#'): continue sl = line.split('\t') gene_id = sl[(- 1)].split('gene_id "')[1].split('"')[0] transcript_id = line.split('transcript_id "')[1].split('"')[0] if ('ref_gene_id "' in sl[(- 1)]): gene_id = sl[(- 1)].split('ref_gene_id "')[1].split('"')[0] elif ((sl[type_idx] == 'transcript') and (gene_id not in written_genes)): sl[type_idx] = 'gene' sl[(- 1)] = 'ID={}; note="Spoofed gene for a RNASeq transcript\n'.format(gene_id) output_file.write('\t'.join(sl)) written_genes.add(gene_id) if (novel_prefix in transcript_id): if (sl[type_idx] == 'exon'): sl[(- 1)] = 'Parent={}'.format(transcript_id) elif gene_id: sl[type_idx] = 'transcript' sl[(- 1)] = 'ID={}; Parent={}'.format(transcript_id, gene_id) sl[(- 1)] += '; note=Predicted transcript from RNASeq run on {}\n'.format(timestamp) output_file.write('\t'.join(sl)) return append_file
def _make_gff(file_path, append_file, novel_prefix='MSTRG.'): type_idx = 2 written_genes = set() timestamp = str(datetime.datetime.now()).split('.')[0] if (not os.path.isfile(file_path)): raise ValueError('{} is not a file'.format(file_path)) with open(append_file, 'a') as output_file: with open(file_path, 'r') as input_file: for line in input_file: if (line[0] == '#'): continue sl = line.split('\t') gene_id = sl[(- 1)].split('gene_id "')[1].split('"')[0] transcript_id = line.split('transcript_id "')[1].split('"')[0] if ('ref_gene_id "' in sl[(- 1)]): gene_id = sl[(- 1)].split('ref_gene_id "')[1].split('"')[0] elif ((sl[type_idx] == 'transcript') and (gene_id not in written_genes)): sl[type_idx] = 'gene' sl[(- 1)] = 'ID={}; note="Spoofed gene for a RNASeq transcript\n'.format(gene_id) output_file.write('\t'.join(sl)) written_genes.add(gene_id) if (novel_prefix in transcript_id): if (sl[type_idx] == 'exon'): sl[(- 1)] = 'Parent={}'.format(transcript_id) elif gene_id: sl[type_idx] = 'transcript' sl[(- 1)] = 'ID={}; Parent={}'.format(transcript_id, gene_id) sl[(- 1)] += '; note=Predicted transcript from RNASeq run on {}\n'.format(timestamp) output_file.write('\t'.join(sl)) return append_file<|docstring|>Works on the very narrow case of gtf files from stringtie.<|endoftext|>
c20a49ff2d9924b7c5658f0799130e6dfa89832e5ea10f289127377ba8bb19b0
def exchange_gene_ids(result_directory): '\n exchange_gene_ids: exchange gene_ids with gene_name\n ' log('starting exchanging gene_ids with gene_name') result_files = os.listdir(result_directory) if ('transcripts.gtf' in result_files): pass _update_transcripts(result_directory) if ('t_data.ctab' in result_files): _update_t_data(result_directory)
exchange_gene_ids: exchange gene_ids with gene_name
lib/kb_stringtie/Utils/file_utils.py
exchange_gene_ids
Tianhao-Gu/kb_stringtie
1
python
def exchange_gene_ids(result_directory): '\n \n ' log('starting exchanging gene_ids with gene_name') result_files = os.listdir(result_directory) if ('transcripts.gtf' in result_files): pass _update_transcripts(result_directory) if ('t_data.ctab' in result_files): _update_t_data(result_directory)
def exchange_gene_ids(result_directory): '\n \n ' log('starting exchanging gene_ids with gene_name') result_files = os.listdir(result_directory) if ('transcripts.gtf' in result_files): pass _update_transcripts(result_directory) if ('t_data.ctab' in result_files): _update_t_data(result_directory)<|docstring|>exchange_gene_ids: exchange gene_ids with gene_name<|endoftext|>
c7e24cb101d2ecf1d1fee272000176f2523b886e03c60dd7a9d7f112373a2df8
def external_superset_url(): 'The URL under which the Superset instance can be reached by users e.g. https://superset.bi.example.com' return 'http://localhost:8088'
The URL under which the Superset instance can be reached by users e.g. https://superset.bi.example.com
mara_superset/config.py
external_superset_url
leo-schick/mara-superset
3
python
def external_superset_url(): return 'http://localhost:8088'
def external_superset_url(): return 'http://localhost:8088'<|docstring|>The URL under which the Superset instance can be reached by users e.g. https://superset.bi.example.com<|endoftext|>
80b2e38b47e94dc5df702720aea9e0e2a4b9f92e491038dd4442070180d4da75
def internal_superset_url(): 'The URL under which the Superset instance can be reached by from mara (usually circumventing SSOs etc.)' return 'http://localhost:8088'
The URL under which the Superset instance can be reached by from mara (usually circumventing SSOs etc.)
mara_superset/config.py
internal_superset_url
leo-schick/mara-superset
3
python
def internal_superset_url(): return 'http://localhost:8088'
def internal_superset_url(): return 'http://localhost:8088'<|docstring|>The URL under which the Superset instance can be reached by from mara (usually circumventing SSOs etc.)<|endoftext|>
3a0b2f42f2607ac9228f33c87ca4d8ec4acb02046b83a88d0a426b5092b8c2c1
def superset_api_username() -> str: 'The email of the user for accessing the superset api' return 'admin'
The email of the user for accessing the superset api
mara_superset/config.py
superset_api_username
leo-schick/mara-superset
3
python
def superset_api_username() -> str: return 'admin'
def superset_api_username() -> str: return 'admin'<|docstring|>The email of the user for accessing the superset api<|endoftext|>
559d1fb88e7de7ba74e3746ac7cfdad0115816e130bc1b17350d69a7996ee3b7
def superset_api_password(): 'The password of the user for accessing the superset api' return 'admin'
The password of the user for accessing the superset api
mara_superset/config.py
superset_api_password
leo-schick/mara-superset
3
python
def superset_api_password(): return 'admin'
def superset_api_password(): return 'admin'<|docstring|>The password of the user for accessing the superset api<|endoftext|>
d2897227210ce903f2a65fc59833162ea10463fecad483ac1a70766f249dce03
def superset_data_db_alias() -> str: 'The alias of the database that Superset reads data from' return 'superset-data-read'
The alias of the database that Superset reads data from
mara_superset/config.py
superset_data_db_alias
leo-schick/mara-superset
3
python
def superset_data_db_alias() -> str: return 'superset-data-read'
def superset_data_db_alias() -> str: return 'superset-data-read'<|docstring|>The alias of the database that Superset reads data from<|endoftext|>
cf90c0ff64f15003ee795ce4ef527e2089083958b540c122a3b2feb526ed614b
def superset_data_db_name() -> str: 'The name (in Superset) of the database that Superset reads from' return 'MyCompany DWH'
The name (in Superset) of the database that Superset reads from
mara_superset/config.py
superset_data_db_name
leo-schick/mara-superset
3
python
def superset_data_db_name() -> str: return 'MyCompany DWH'
def superset_data_db_name() -> str: return 'MyCompany DWH'<|docstring|>The name (in Superset) of the database that Superset reads from<|endoftext|>
6fdef680fe9eee13a73c5f2b2c6f7013b875a990048bdeba5f875da545df0c89
def superset_data_db_schema() -> str: 'The name of the schema where the flattered data sets for Superset are stored' return 'superset'
The name of the schema where the flattered data sets for Superset are stored
mara_superset/config.py
superset_data_db_schema
leo-schick/mara-superset
3
python
def superset_data_db_schema() -> str: return 'superset'
def superset_data_db_schema() -> str: return 'superset'<|docstring|>The name of the schema where the flattered data sets for Superset are stored<|endoftext|>
0aa56bb435c7a94c4a775eb27aa12b737c84fde634b1d53131401c7e2b901908
def metadata_update_strategy(): 'The default update strategy to be used when synchronizing metadata' from .metadata import UpdateStrategy return (UpdateStrategy.CREATE | UpdateStrategy.UPDATE)
The default update strategy to be used when synchronizing metadata
mara_superset/config.py
metadata_update_strategy
leo-schick/mara-superset
3
python
def metadata_update_strategy(): from .metadata import UpdateStrategy return (UpdateStrategy.CREATE | UpdateStrategy.UPDATE)
def metadata_update_strategy(): from .metadata import UpdateStrategy return (UpdateStrategy.CREATE | UpdateStrategy.UPDATE)<|docstring|>The default update strategy to be used when synchronizing metadata<|endoftext|>
0099486041b246607d0ce8a93acdfb35dd0a80f27831a908276b15dbfc4200c1
def __init__(self, configuration): '\n Init OpsGenie client to interact with OpsGenie Api\n Parameters\n ----------\n configuration : Configuration\n ' if (not isinstance(configuration, Configuration)): InvalidConfigurationError() configuration.validate() self.configuration = configuration self._alertService = AlertService(configuration) self._userService = UserService(configuration) self._teamService = TeamService(configuration) self._accountService = AccountService(configuration)
Init OpsGenie client to interact with OpsGenie Api Parameters ---------- configuration : Configuration
opsgenie/client.py
__init__
kumarappan-arumugam/opsgenie-py
0
python
def __init__(self, configuration): '\n Init OpsGenie client to interact with OpsGenie Api\n Parameters\n ----------\n configuration : Configuration\n ' if (not isinstance(configuration, Configuration)): InvalidConfigurationError() configuration.validate() self.configuration = configuration self._alertService = AlertService(configuration) self._userService = UserService(configuration) self._teamService = TeamService(configuration) self._accountService = AccountService(configuration)
def __init__(self, configuration): '\n Init OpsGenie client to interact with OpsGenie Api\n Parameters\n ----------\n configuration : Configuration\n ' if (not isinstance(configuration, Configuration)): InvalidConfigurationError() configuration.validate() self.configuration = configuration self._alertService = AlertService(configuration) self._userService = UserService(configuration) self._teamService = TeamService(configuration) self._accountService = AccountService(configuration)<|docstring|>Init OpsGenie client to interact with OpsGenie Api Parameters ---------- configuration : Configuration<|endoftext|>
6040ca45eb4d276391afd24ca9e248cb031690b33dd7674485264ba3f7891cda
@property def accounts(self): '\n Returns\n -------\n UserService\n ' return self._accountService
Returns ------- UserService
opsgenie/client.py
accounts
kumarappan-arumugam/opsgenie-py
0
python
@property def accounts(self): '\n Returns\n -------\n UserService\n ' return self._accountService
@property def accounts(self): '\n Returns\n -------\n UserService\n ' return self._accountService<|docstring|>Returns ------- UserService<|endoftext|>
1734ff8eccff6317d9be1fdd4ebf8f2f9ebc8967e5553523c7426b63abaa29df
@property def alerts(self): '\n Returns\n -------\n AlertService\n ' return self._alertService
Returns ------- AlertService
opsgenie/client.py
alerts
kumarappan-arumugam/opsgenie-py
0
python
@property def alerts(self): '\n Returns\n -------\n AlertService\n ' return self._alertService
@property def alerts(self): '\n Returns\n -------\n AlertService\n ' return self._alertService<|docstring|>Returns ------- AlertService<|endoftext|>
18b374b707e53a110050a6db3fe695e079920a931059ab83417bf0461fefccb6
@property def users(self): '\n Returns\n -------\n UserService\n ' return self._userService
Returns ------- UserService
opsgenie/client.py
users
kumarappan-arumugam/opsgenie-py
0
python
@property def users(self): '\n Returns\n -------\n UserService\n ' return self._userService
@property def users(self): '\n Returns\n -------\n UserService\n ' return self._userService<|docstring|>Returns ------- UserService<|endoftext|>
cc45179d9789f6b5d14cba831b48331cde94ae10ca237944e7ceff201f2a0c23
@property def teams(self): '\n Returns\n -------\n UserService\n ' return self._teamService
Returns ------- UserService
opsgenie/client.py
teams
kumarappan-arumugam/opsgenie-py
0
python
@property def teams(self): '\n Returns\n -------\n UserService\n ' return self._teamService
@property def teams(self): '\n Returns\n -------\n UserService\n ' return self._teamService<|docstring|>Returns ------- UserService<|endoftext|>
fc92f17d51175a2d6c0ba9c47bf9e8d2267eccb05ccc26d5901e99f70a3eed62
def __init__(self, dataarray): 'Initialize the base accessor.' self._dataarray = dataarray
Initialize the base accessor.
decode/core/__init__.py
__init__
deshima-dev/decode
6
python
def __init__(self, dataarray): self._dataarray = dataarray
def __init__(self, dataarray): self._dataarray = dataarray<|docstring|>Initialize the base accessor.<|endoftext|>
fe122eadd55052785a7f725c4507c59c529b94a6511b02441e417a7a4ec673a1
def __getattr__(self, name): 'self._dataarray.name <=> self.name.' return getattr(self._dataarray, name)
self._dataarray.name <=> self.name.
decode/core/__init__.py
__getattr__
deshima-dev/decode
6
python
def __getattr__(self, name): return getattr(self._dataarray, name)
def __getattr__(self, name): return getattr(self._dataarray, name)<|docstring|>self._dataarray.name <=> self.name.<|endoftext|>
f8cc134b17817f49487f46aecacb5d906de75f9ad55fa2f754f5ecaf7c10aff8
def __setstate__(self, state): 'A method used for pickling.' self.__dict__ = state
A method used for pickling.
decode/core/__init__.py
__setstate__
deshima-dev/decode
6
python
def __setstate__(self, state): self.__dict__ = state
def __setstate__(self, state): self.__dict__ = state<|docstring|>A method used for pickling.<|endoftext|>
27528e88eb42d705c9e8de3cfb61a2ca54a96c37ef3d56b48be412037acdbe1a
def __getstate__(self): 'A method used for unpickling.' return self.__dict__
A method used for unpickling.
decode/core/__init__.py
__getstate__
deshima-dev/decode
6
python
def __getstate__(self): return self.__dict__
def __getstate__(self): return self.__dict__<|docstring|>A method used for unpickling.<|endoftext|>
794cbc5caa7db7dbc5c6068a1876292e09f2ff6574ff0c2ce5b4590493e005e8
@property def scalarcoords(self): "A dictionary of values that don't label any axes (point-like)." return {k: v.values for (k, v) in self.coords.items() if (v.dims == ())}
A dictionary of values that don't label any axes (point-like).
decode/core/__init__.py
scalarcoords
deshima-dev/decode
6
python
@property def scalarcoords(self): return {k: v.values for (k, v) in self.coords.items() if (v.dims == ())}
@property def scalarcoords(self): return {k: v.values for (k, v) in self.coords.items() if (v.dims == ())}<|docstring|>A dictionary of values that don't label any axes (point-like).<|endoftext|>
186adefbfa75329c10effcae6691fbe5a0ebf258be0a5c978c60069a0b56fc97
def runTest(self): 'This function will add sequence(s) under schema node.' db_name = parent_node_dict['database'][(- 1)]['db_name'] schema_info = parent_node_dict['schema'][(- 1)] self.server_id = schema_info['server_id'] self.db_id = schema_info['db_id'] db_con = database_utils.connect_database(self, utils.SERVER_GROUP, self.server_id, self.db_id) if (not db_con['data']['connected']): raise Exception('Could not connect to database to add sequence.') schema_id = schema_info['schema_id'] schema_name = schema_info['schema_name'] schema_response = schema_utils.verify_schemas(self.server, db_name, schema_name) if (not schema_response): raise Exception('Could not find the schema to add sequence.') db_user = self.server['username'] common_data = {'relacl': [{'grantee': db_user, 'grantor': db_user, 'privileges': [{'privilege_type': 'r', 'privilege': True, 'with_grant': True}, {'privilege_type': 'w', 'privilege': True, 'with_grant': False}, {'privilege_type': 'U', 'privilege': True, 'with_grant': False}]}], 'schema': schema_name, 'seqowner': db_user} self.data.update(common_data) response = self.tester.post(((((((((self.url + str(utils.SERVER_GROUP)) + '/') + str(self.server_id)) + '/') + str(self.db_id)) + '/') + str(schema_id)) + '/'), data=json.dumps(self.data), content_type='html/json') self.assertEquals(response.status_code, 200)
This function will add sequence(s) under schema node.
local/lib/python3.6/site-packages/pgadmin4/pgadmin/browser/server_groups/servers/databases/schemas/sequences/tests/test_sequence_add.py
runTest
sahilsdei/django_ecommerce
0
python
def runTest(self): db_name = parent_node_dict['database'][(- 1)]['db_name'] schema_info = parent_node_dict['schema'][(- 1)] self.server_id = schema_info['server_id'] self.db_id = schema_info['db_id'] db_con = database_utils.connect_database(self, utils.SERVER_GROUP, self.server_id, self.db_id) if (not db_con['data']['connected']): raise Exception('Could not connect to database to add sequence.') schema_id = schema_info['schema_id'] schema_name = schema_info['schema_name'] schema_response = schema_utils.verify_schemas(self.server, db_name, schema_name) if (not schema_response): raise Exception('Could not find the schema to add sequence.') db_user = self.server['username'] common_data = {'relacl': [{'grantee': db_user, 'grantor': db_user, 'privileges': [{'privilege_type': 'r', 'privilege': True, 'with_grant': True}, {'privilege_type': 'w', 'privilege': True, 'with_grant': False}, {'privilege_type': 'U', 'privilege': True, 'with_grant': False}]}], 'schema': schema_name, 'seqowner': db_user} self.data.update(common_data) response = self.tester.post(((((((((self.url + str(utils.SERVER_GROUP)) + '/') + str(self.server_id)) + '/') + str(self.db_id)) + '/') + str(schema_id)) + '/'), data=json.dumps(self.data), content_type='html/json') self.assertEquals(response.status_code, 200)
def runTest(self): db_name = parent_node_dict['database'][(- 1)]['db_name'] schema_info = parent_node_dict['schema'][(- 1)] self.server_id = schema_info['server_id'] self.db_id = schema_info['db_id'] db_con = database_utils.connect_database(self, utils.SERVER_GROUP, self.server_id, self.db_id) if (not db_con['data']['connected']): raise Exception('Could not connect to database to add sequence.') schema_id = schema_info['schema_id'] schema_name = schema_info['schema_name'] schema_response = schema_utils.verify_schemas(self.server, db_name, schema_name) if (not schema_response): raise Exception('Could not find the schema to add sequence.') db_user = self.server['username'] common_data = {'relacl': [{'grantee': db_user, 'grantor': db_user, 'privileges': [{'privilege_type': 'r', 'privilege': True, 'with_grant': True}, {'privilege_type': 'w', 'privilege': True, 'with_grant': False}, {'privilege_type': 'U', 'privilege': True, 'with_grant': False}]}], 'schema': schema_name, 'seqowner': db_user} self.data.update(common_data) response = self.tester.post(((((((((self.url + str(utils.SERVER_GROUP)) + '/') + str(self.server_id)) + '/') + str(self.db_id)) + '/') + str(schema_id)) + '/'), data=json.dumps(self.data), content_type='html/json') self.assertEquals(response.status_code, 200)<|docstring|>This function will add sequence(s) under schema node.<|endoftext|>
7fbd3935b9bcc6191bebe36e9144aad18fa255a8b258975a894438d67d383f73
def __init__(self, drop_num=500): 'SRS defense method.\n\n Args:\n drop_num (int, optional): number of points to drop.\n Defaults to 500.\n ' super(SRSDefense, self).__init__() self.drop_num = drop_num
SRS defense method. Args: drop_num (int, optional): number of points to drop. Defaults to 500.
baselines/defense/drop_points/SRS.py
__init__
code-roamer/IF-Defense
36
python
def __init__(self, drop_num=500): 'SRS defense method.\n\n Args:\n drop_num (int, optional): number of points to drop.\n Defaults to 500.\n ' super(SRSDefense, self).__init__() self.drop_num = drop_num
def __init__(self, drop_num=500): 'SRS defense method.\n\n Args:\n drop_num (int, optional): number of points to drop.\n Defaults to 500.\n ' super(SRSDefense, self).__init__() self.drop_num = drop_num<|docstring|>SRS defense method. Args: drop_num (int, optional): number of points to drop. Defaults to 500.<|endoftext|>
4838462ce0b5a6a8075a33e8209ca092192d94471d32c7d0478a203faf994088
def random_drop(self, pc): 'Random drop self.drop_num points in each pc.\n\n Args:\n pc (torch.FloatTensor): batch input pc, [B, K, 3]\n ' (B, K) = pc.shape[:2] idx = [np.random.choice(K, (K - self.drop_num), replace=False) for _ in range(B)] pc = torch.stack([pc[i][torch.from_numpy(idx[i]).long().to(pc.device)] for i in range(B)]) return pc
Random drop self.drop_num points in each pc. Args: pc (torch.FloatTensor): batch input pc, [B, K, 3]
baselines/defense/drop_points/SRS.py
random_drop
code-roamer/IF-Defense
36
python
def random_drop(self, pc): 'Random drop self.drop_num points in each pc.\n\n Args:\n pc (torch.FloatTensor): batch input pc, [B, K, 3]\n ' (B, K) = pc.shape[:2] idx = [np.random.choice(K, (K - self.drop_num), replace=False) for _ in range(B)] pc = torch.stack([pc[i][torch.from_numpy(idx[i]).long().to(pc.device)] for i in range(B)]) return pc
def random_drop(self, pc): 'Random drop self.drop_num points in each pc.\n\n Args:\n pc (torch.FloatTensor): batch input pc, [B, K, 3]\n ' (B, K) = pc.shape[:2] idx = [np.random.choice(K, (K - self.drop_num), replace=False) for _ in range(B)] pc = torch.stack([pc[i][torch.from_numpy(idx[i]).long().to(pc.device)] for i in range(B)]) return pc<|docstring|>Random drop self.drop_num points in each pc. Args: pc (torch.FloatTensor): batch input pc, [B, K, 3]<|endoftext|>
ce8676ab32c141f44ec837f8d657b73c67698504852ed18abb35cd1e2e569b77
def FoodsView(request): '\n API endpoint that retunrs filtered foods\n ' from urllib.parse import urlparse, parse_qs with open(os.path.join(os.path.dirname(__file__), 'food_data.json'), 'r') as json_file: data = json.load(json_file) foods = data['report']['foods'] params = parse_qs(request.META['QUERY_STRING']) print('params: {}'.format(params)) def filter_foods(nutrients, ranges): return (is_in_range(nutrients[1], ranges['protein[0]'], ranges['protein[1]']) and is_in_range(nutrients[2], ranges['fat[0]'], ranges['fat[1]']) and is_in_range(nutrients[3], ranges['carb[0]'], ranges['carb[1]']) and is_in_range(nutrients[4], ranges['sugar[0]'], ranges['sugar[1]'])) def is_in_range(nutrient, min, max): gm = (nutrient['gm'] if (nutrient['gm'] != '--') else 0) return (int(min[0]) <= gm <= int(max[0])) response_data = ([food for food in foods if filter_foods(food['nutrients'], params)] if (params != None) else foods) return JsonResponse(response_data, safe=False)
API endpoint that retunrs filtered foods
project/api/views.py
FoodsView
daryllcheng/python_rest
0
python
def FoodsView(request): '\n \n ' from urllib.parse import urlparse, parse_qs with open(os.path.join(os.path.dirname(__file__), 'food_data.json'), 'r') as json_file: data = json.load(json_file) foods = data['report']['foods'] params = parse_qs(request.META['QUERY_STRING']) print('params: {}'.format(params)) def filter_foods(nutrients, ranges): return (is_in_range(nutrients[1], ranges['protein[0]'], ranges['protein[1]']) and is_in_range(nutrients[2], ranges['fat[0]'], ranges['fat[1]']) and is_in_range(nutrients[3], ranges['carb[0]'], ranges['carb[1]']) and is_in_range(nutrients[4], ranges['sugar[0]'], ranges['sugar[1]'])) def is_in_range(nutrient, min, max): gm = (nutrient['gm'] if (nutrient['gm'] != '--') else 0) return (int(min[0]) <= gm <= int(max[0])) response_data = ([food for food in foods if filter_foods(food['nutrients'], params)] if (params != None) else foods) return JsonResponse(response_data, safe=False)
def FoodsView(request): '\n \n ' from urllib.parse import urlparse, parse_qs with open(os.path.join(os.path.dirname(__file__), 'food_data.json'), 'r') as json_file: data = json.load(json_file) foods = data['report']['foods'] params = parse_qs(request.META['QUERY_STRING']) print('params: {}'.format(params)) def filter_foods(nutrients, ranges): return (is_in_range(nutrients[1], ranges['protein[0]'], ranges['protein[1]']) and is_in_range(nutrients[2], ranges['fat[0]'], ranges['fat[1]']) and is_in_range(nutrients[3], ranges['carb[0]'], ranges['carb[1]']) and is_in_range(nutrients[4], ranges['sugar[0]'], ranges['sugar[1]'])) def is_in_range(nutrient, min, max): gm = (nutrient['gm'] if (nutrient['gm'] != '--') else 0) return (int(min[0]) <= gm <= int(max[0])) response_data = ([food for food in foods if filter_foods(food['nutrients'], params)] if (params != None) else foods) return JsonResponse(response_data, safe=False)<|docstring|>API endpoint that retunrs filtered foods<|endoftext|>
9d1710c36e8f2ec29be0d0e97da1e4a4dc19d95b85bc34c151d00eabab4300ce
def test_zerocross(): 'Test zerocross with legendre polynomials.' def _bonnet(d, x): if (d == 0): return np.ones_like(x) elif (d == 1): return x else: return ((((((2 * d) - 1) * x) * _bonnet((d - 1), x)) - ((d - 1) * _bonnet((d - 2), x))) / d) x = np.linspace((- 1), 1, 100) legendre = np.empty([100, 5], dtype='float32') for n in range(5): legendre[(:, n)] = _bonnet(n, x) zx = np.linspace(0, 4, 5) assert all((zx == graph.zerocross(legendre)))
Test zerocross with legendre polynomials.
nigsp/tests/test_graph.py
test_zerocross
smoia/nigsp
2
python
def test_zerocross(): def _bonnet(d, x): if (d == 0): return np.ones_like(x) elif (d == 1): return x else: return ((((((2 * d) - 1) * x) * _bonnet((d - 1), x)) - ((d - 1) * _bonnet((d - 2), x))) / d) x = np.linspace((- 1), 1, 100) legendre = np.empty([100, 5], dtype='float32') for n in range(5): legendre[(:, n)] = _bonnet(n, x) zx = np.linspace(0, 4, 5) assert all((zx == graph.zerocross(legendre)))
def test_zerocross(): def _bonnet(d, x): if (d == 0): return np.ones_like(x) elif (d == 1): return x else: return ((((((2 * d) - 1) * x) * _bonnet((d - 1), x)) - ((d - 1) * _bonnet((d - 2), x))) / d) x = np.linspace((- 1), 1, 100) legendre = np.empty([100, 5], dtype='float32') for n in range(5): legendre[(:, n)] = _bonnet(n, x) zx = np.linspace(0, 4, 5) assert all((zx == graph.zerocross(legendre)))<|docstring|>Test zerocross with legendre polynomials.<|endoftext|>
0a753d112550deb189c5383bcdee14ffbf73518118621c0a2c9fe691cb11da17
def test_nodestrength(): 'Test nodestrength with random matrix.' a = np.random.rand(3, 3) a = (a - a.mean()) b = np.abs(a) s = b.sum(axis=0) f = s.mean(axis=(- 1)) n = graph.nodestrength(a) m = graph.nodestrength(b) o = graph.nodestrength(a, mean=True) assert all((n == m)) assert all((n == s)) assert (f == o)
Test nodestrength with random matrix.
nigsp/tests/test_graph.py
test_nodestrength
smoia/nigsp
2
python
def test_nodestrength(): a = np.random.rand(3, 3) a = (a - a.mean()) b = np.abs(a) s = b.sum(axis=0) f = s.mean(axis=(- 1)) n = graph.nodestrength(a) m = graph.nodestrength(b) o = graph.nodestrength(a, mean=True) assert all((n == m)) assert all((n == s)) assert (f == o)
def test_nodestrength(): a = np.random.rand(3, 3) a = (a - a.mean()) b = np.abs(a) s = b.sum(axis=0) f = s.mean(axis=(- 1)) n = graph.nodestrength(a) m = graph.nodestrength(b) o = graph.nodestrength(a, mean=True) assert all((n == m)) assert all((n == s)) assert (f == o)<|docstring|>Test nodestrength with random matrix.<|endoftext|>
fa751f4dbe93747e29610591676530e123a80ef3b07d090fa7fe93c23fa5379d
def find_dataset_lib(dataset_name): '\n Give the option --dataset [datasetname], import "data/datasetname_dataset.py"\n :param dataset_name: --dataset\n :return: "data/datasetname_dataset.py"\n ' dataset_filename = (('data.' + dataset_name) + '_dataset') datasetlib = importlib.import_module(dataset_filename) dataset = None target_dataset_name = (dataset_name.replace('_', '') + 'dataset') for (name, cls) in datasetlib.__dict__.items(): if (name.lower() == target_dataset_name.lower()): dataset = cls if (dataset is None): logger.info(('In %s.py, there should be a class name that matches %s in lowercase.' % (dataset_filename, target_dataset_name))) exit(0) return dataset
Give the option --dataset [datasetname], import "data/datasetname_dataset.py" :param dataset_name: --dataset :return: "data/datasetname_dataset.py"
merged_depth/nets/DiverseDepth/data/load_dataset.py
find_dataset_lib
mfkiwl/merged_depth
33
python
def find_dataset_lib(dataset_name): '\n Give the option --dataset [datasetname], import "data/datasetname_dataset.py"\n :param dataset_name: --dataset\n :return: "data/datasetname_dataset.py"\n ' dataset_filename = (('data.' + dataset_name) + '_dataset') datasetlib = importlib.import_module(dataset_filename) dataset = None target_dataset_name = (dataset_name.replace('_', ) + 'dataset') for (name, cls) in datasetlib.__dict__.items(): if (name.lower() == target_dataset_name.lower()): dataset = cls if (dataset is None): logger.info(('In %s.py, there should be a class name that matches %s in lowercase.' % (dataset_filename, target_dataset_name))) exit(0) return dataset
def find_dataset_lib(dataset_name): '\n Give the option --dataset [datasetname], import "data/datasetname_dataset.py"\n :param dataset_name: --dataset\n :return: "data/datasetname_dataset.py"\n ' dataset_filename = (('data.' + dataset_name) + '_dataset') datasetlib = importlib.import_module(dataset_filename) dataset = None target_dataset_name = (dataset_name.replace('_', ) + 'dataset') for (name, cls) in datasetlib.__dict__.items(): if (name.lower() == target_dataset_name.lower()): dataset = cls if (dataset is None): logger.info(('In %s.py, there should be a class name that matches %s in lowercase.' % (dataset_filename, target_dataset_name))) exit(0) return dataset<|docstring|>Give the option --dataset [datasetname], import "data/datasetname_dataset.py" :param dataset_name: --dataset :return: "data/datasetname_dataset.py"<|endoftext|>
f5c172668cb740cda66181d80dece0c8ff7317b5634fe7bbc103d66ea8f1d841
def draw_subspectrogram(spectrogram, duration_s, fft_rate, random_pick=False): '\n Draw a random subspectrogram of given time length from the given spectrogram\n ' if (not random_pick): np.random.seed(42) offset = int((np.random.random() * (spectrogram.shape[1] - (duration_s * fft_rate)))) return spectrogram[(:, offset:(offset + int((duration_s * fft_rate))))]
Draw a random subspectrogram of given time length from the given spectrogram
utils.py
draw_subspectrogram
quentinverlhac/music-emotion-recognition
0
python
def draw_subspectrogram(spectrogram, duration_s, fft_rate, random_pick=False): '\n \n ' if (not random_pick): np.random.seed(42) offset = int((np.random.random() * (spectrogram.shape[1] - (duration_s * fft_rate)))) return spectrogram[(:, offset:(offset + int((duration_s * fft_rate))))]
def draw_subspectrogram(spectrogram, duration_s, fft_rate, random_pick=False): '\n \n ' if (not random_pick): np.random.seed(42) offset = int((np.random.random() * (spectrogram.shape[1] - (duration_s * fft_rate)))) return spectrogram[(:, offset:(offset + int((duration_s * fft_rate))))]<|docstring|>Draw a random subspectrogram of given time length from the given spectrogram<|endoftext|>
e77225295a2c3762fac224cb10eb1eab96b3f0ae9208c05f93daf8b11a1248b8
def segment_spectrogram(spectrogram, duration_s, fft_rate): '\n Segment the spectrogram into successive subspectrograms of given length and returns\n the list of all subspectrograms\n ' spectrograms = [] sub_len = int((duration_s * fft_rate)) n_subspectros = int((spectrogram.shape[1] / sub_len)) for i in range(n_subspectros): spectrograms.append(spectrogram[(:, (sub_len * i):(sub_len * (i + 1)))]) return spectrograms
Segment the spectrogram into successive subspectrograms of given length and returns the list of all subspectrograms
utils.py
segment_spectrogram
quentinverlhac/music-emotion-recognition
0
python
def segment_spectrogram(spectrogram, duration_s, fft_rate): '\n Segment the spectrogram into successive subspectrograms of given length and returns\n the list of all subspectrograms\n ' spectrograms = [] sub_len = int((duration_s * fft_rate)) n_subspectros = int((spectrogram.shape[1] / sub_len)) for i in range(n_subspectros): spectrograms.append(spectrogram[(:, (sub_len * i):(sub_len * (i + 1)))]) return spectrograms
def segment_spectrogram(spectrogram, duration_s, fft_rate): '\n Segment the spectrogram into successive subspectrograms of given length and returns\n the list of all subspectrograms\n ' spectrograms = [] sub_len = int((duration_s * fft_rate)) n_subspectros = int((spectrogram.shape[1] / sub_len)) for i in range(n_subspectros): spectrograms.append(spectrogram[(:, (sub_len * i):(sub_len * (i + 1)))]) return spectrograms<|docstring|>Segment the spectrogram into successive subspectrograms of given length and returns the list of all subspectrograms<|endoftext|>
0a52d6bc6b85fc0e1ec52084da218961d7343b9f12b80e7d457fee86e4e4b0f4
def segment_dataset(all_spectrograms, labels, duration_s, fft_rate): '\n Segment all spectrograms in the dataset in snippets of the given duration, and update\n the labels accordingly\n ' new_spectrograms = [] new_labels = [] for i in range(len(all_spectrograms)): segments = segment_spectrogram(all_spectrograms[i], duration_s, fft_rate) new_spectrograms += segments new_labels += [labels[i] for spectro in segments] return (new_spectrograms, new_labels)
Segment all spectrograms in the dataset in snippets of the given duration, and update the labels accordingly
utils.py
segment_dataset
quentinverlhac/music-emotion-recognition
0
python
def segment_dataset(all_spectrograms, labels, duration_s, fft_rate): '\n Segment all spectrograms in the dataset in snippets of the given duration, and update\n the labels accordingly\n ' new_spectrograms = [] new_labels = [] for i in range(len(all_spectrograms)): segments = segment_spectrogram(all_spectrograms[i], duration_s, fft_rate) new_spectrograms += segments new_labels += [labels[i] for spectro in segments] return (new_spectrograms, new_labels)
def segment_dataset(all_spectrograms, labels, duration_s, fft_rate): '\n Segment all spectrograms in the dataset in snippets of the given duration, and update\n the labels accordingly\n ' new_spectrograms = [] new_labels = [] for i in range(len(all_spectrograms)): segments = segment_spectrogram(all_spectrograms[i], duration_s, fft_rate) new_spectrograms += segments new_labels += [labels[i] for spectro in segments] return (new_spectrograms, new_labels)<|docstring|>Segment all spectrograms in the dataset in snippets of the given duration, and update the labels accordingly<|endoftext|>
84598ec15ad4b6438b2f15fb8e85e0643255ffa4db7506f9146154ce44078e09
def dump_elements(elements, dump_path): '\n Dumps all elements in the list to a binary file on the dump path\n ' with open(dump_path, 'wb') as f: pkl.dump(elements, f)
Dumps all elements in the list to a binary file on the dump path
utils.py
dump_elements
quentinverlhac/music-emotion-recognition
0
python
def dump_elements(elements, dump_path): '\n \n ' with open(dump_path, 'wb') as f: pkl.dump(elements, f)
def dump_elements(elements, dump_path): '\n \n ' with open(dump_path, 'wb') as f: pkl.dump(elements, f)<|docstring|>Dumps all elements in the list to a binary file on the dump path<|endoftext|>
97eae0c95c4d9be85e7099595a63ae8a1f26a00ca28d65b8acb124867b3c9ac7
def load_dump(dump_path): '\n Load the content of the file at dump path\n ' with open(dump_path, 'rb') as file: return pkl.load(file)
Load the content of the file at dump path
utils.py
load_dump
quentinverlhac/music-emotion-recognition
0
python
def load_dump(dump_path): '\n \n ' with open(dump_path, 'rb') as file: return pkl.load(file)
def load_dump(dump_path): '\n \n ' with open(dump_path, 'rb') as file: return pkl.load(file)<|docstring|>Load the content of the file at dump path<|endoftext|>
b2da0c01610cf98ece6d195dd71564df98c4d89d8dd783e7beb0aba2fbbe7503
def load_labels(path): '\n Load a pickle file containing list of labels and return as pandas DataFrame\n ' with open(path, 'rb') as f: labels = pkl.load(f) labels = pd.DataFrame(labels) labels.columns = config.EMOTIFY_EMOTIONS_ORDERED_LIST return labels
Load a pickle file containing list of labels and return as pandas DataFrame
utils.py
load_labels
quentinverlhac/music-emotion-recognition
0
python
def load_labels(path): '\n \n ' with open(path, 'rb') as f: labels = pkl.load(f) labels = pd.DataFrame(labels) labels.columns = config.EMOTIFY_EMOTIONS_ORDERED_LIST return labels
def load_labels(path): '\n \n ' with open(path, 'rb') as f: labels = pkl.load(f) labels = pd.DataFrame(labels) labels.columns = config.EMOTIFY_EMOTIONS_ORDERED_LIST return labels<|docstring|>Load a pickle file containing list of labels and return as pandas DataFrame<|endoftext|>
341065812402418d2e20d3b86cea63852f4cbabf73b2d0cee90b6968b849f78a
def average_predictions(full_spectrogram, model): '\n Average predictions of the model over all segments in the spectrogram\n ' segmented_spectro = segment_spectrogram(full_spectrogram, config.SUBSPECTROGRAM_DURATION_S, config.FFT_RATE) all_predictions = [] for spectro in segmented_spectro: tensor_spectro = tf.convert_to_tensor(spectro) tensor_spectro = tf.transpose(tensor_spectro) tensor_spectro = tf.reshape(tensor_spectro, [1, tensor_spectro.shape[0], tensor_spectro.shape[1]]) all_predictions.append(model.call(tensor_spectro)) return (tf.add_n(all_predictions) / len(segmented_spectro))
Average predictions of the model over all segments in the spectrogram
utils.py
average_predictions
quentinverlhac/music-emotion-recognition
0
python
def average_predictions(full_spectrogram, model): '\n \n ' segmented_spectro = segment_spectrogram(full_spectrogram, config.SUBSPECTROGRAM_DURATION_S, config.FFT_RATE) all_predictions = [] for spectro in segmented_spectro: tensor_spectro = tf.convert_to_tensor(spectro) tensor_spectro = tf.transpose(tensor_spectro) tensor_spectro = tf.reshape(tensor_spectro, [1, tensor_spectro.shape[0], tensor_spectro.shape[1]]) all_predictions.append(model.call(tensor_spectro)) return (tf.add_n(all_predictions) / len(segmented_spectro))
def average_predictions(full_spectrogram, model): '\n \n ' segmented_spectro = segment_spectrogram(full_spectrogram, config.SUBSPECTROGRAM_DURATION_S, config.FFT_RATE) all_predictions = [] for spectro in segmented_spectro: tensor_spectro = tf.convert_to_tensor(spectro) tensor_spectro = tf.transpose(tensor_spectro) tensor_spectro = tf.reshape(tensor_spectro, [1, tensor_spectro.shape[0], tensor_spectro.shape[1]]) all_predictions.append(model.call(tensor_spectro)) return (tf.add_n(all_predictions) / len(segmented_spectro))<|docstring|>Average predictions of the model over all segments in the spectrogram<|endoftext|>
e2cfc2f79d65cf203a4ab1f72be1798e3f82f107532fd100556697da21b1c125
def shuffle_data_and_labels(data, labels): '\n Shuffles data and labels the same way\n :param data: list: input data\n :param labels: list: corresponding labels\n :return: tuple, shuffled data and labels\n ' temp = list(zip(data, labels)) random.shuffle(temp) return zip(*temp)
Shuffles data and labels the same way :param data: list: input data :param labels: list: corresponding labels :return: tuple, shuffled data and labels
utils.py
shuffle_data_and_labels
quentinverlhac/music-emotion-recognition
0
python
def shuffle_data_and_labels(data, labels): '\n Shuffles data and labels the same way\n :param data: list: input data\n :param labels: list: corresponding labels\n :return: tuple, shuffled data and labels\n ' temp = list(zip(data, labels)) random.shuffle(temp) return zip(*temp)
def shuffle_data_and_labels(data, labels): '\n Shuffles data and labels the same way\n :param data: list: input data\n :param labels: list: corresponding labels\n :return: tuple, shuffled data and labels\n ' temp = list(zip(data, labels)) random.shuffle(temp) return zip(*temp)<|docstring|>Shuffles data and labels the same way :param data: list: input data :param labels: list: corresponding labels :return: tuple, shuffled data and labels<|endoftext|>
2fb30905c793ec4f3fe7f396ba3fbf6019670a5aaa961b833196e92572800faa
def restart(self): '\n Restart ibash process.\n ' self.child.kill() self.start() root.status.set_msg('Process killed and started !')
Restart ibash process.
vyapp/plugins/ibash.py
restart
AndreasDavour/vy
0
python
def restart(self): '\n \n ' self.child.kill() self.start() root.status.set_msg('Process killed and started !')
def restart(self): '\n \n ' self.child.kill() self.start() root.status.set_msg('Process killed and started !')<|docstring|>Restart ibash process.<|endoftext|>
296ea716addbcc7e6450ee7520715598eef36276c5890e102dbc102142bdc4ce
@abstractmethod def create(self, query: SearchParams) -> Tuple[(str, Dict)]: 'abstract'
abstract
src/moz_books/interface/i_request_params_factory.py
create
yukkun007/mmbooks
0
python
@method def create(self, query: SearchParams) -> Tuple[(str, Dict)]:
@method def create(self, query: SearchParams) -> Tuple[(str, Dict)]: <|docstring|>abstract<|endoftext|>
9a5ce9e6e9d81fb39781558bf47e1c5a91d1735fada1a169960cd3b00885a618
def parse_timestamp(timestamp): 'Convert a timestamp string to a datetime object.' (sec, ms) = timestamp.split(',') microseconds = (1000 * int(ms)) fields = (time.strptime(sec, '%Y-%m-%d %H:%M:%S')[0:6] + (microseconds,)) return datetime.datetime(*fields)
Convert a timestamp string to a datetime object.
util/task_times.py
parse_timestamp
WillChilds-Klein/mistress-mapreduce
2
python
def parse_timestamp(timestamp): (sec, ms) = timestamp.split(',') microseconds = (1000 * int(ms)) fields = (time.strptime(sec, '%Y-%m-%d %H:%M:%S')[0:6] + (microseconds,)) return datetime.datetime(*fields)
def parse_timestamp(timestamp): (sec, ms) = timestamp.split(',') microseconds = (1000 * int(ms)) fields = (time.strptime(sec, '%Y-%m-%d %H:%M:%S')[0:6] + (microseconds,)) return datetime.datetime(*fields)<|docstring|>Convert a timestamp string to a datetime object.<|endoftext|>
2f940caab0020a16f72ba350915cd2c52445ee2a8260bf00708ebe27f8ab9620
def _make_env(scenario_name, benchmark=False): '\n Creates a MultiAgentEnv object as env. This can be used similar to a gym\n environment by calling env.reset() and env.step().\n Use env.render() to view the environment on the screen.\n Input:\n scenario_name : name of the scenario from ./scenarios/ to be Returns\n (without the .py extension)\n benchmark : whether you want to produce benchmarking data\n (usually only done during evaluation)\n Some useful env properties (see environment.py):\n .observation_space : Returns the observation space for each agent\n .action_space : Returns the action space for each agent\n .n : Returns the number of Agents\n ' from multiagent.environment import MultiAgentEnv from multiagent import scenarios scenario = scenarios.load((scenario_name + '.py')).Scenario() world = scenario.make_world() if benchmark: env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation, scenario.benchmark_data) else: env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation) return env
Creates a MultiAgentEnv object as env. This can be used similar to a gym environment by calling env.reset() and env.step(). Use env.render() to view the environment on the screen. Input: scenario_name : name of the scenario from ./scenarios/ to be Returns (without the .py extension) benchmark : whether you want to produce benchmarking data (usually only done during evaluation) Some useful env properties (see environment.py): .observation_space : Returns the observation space for each agent .action_space : Returns the action space for each agent .n : Returns the number of Agents
ma-poca/mapoca/mapoca/particles_env.py
_make_env
Unity-Technologies/paper-ml-agents
1
python
def _make_env(scenario_name, benchmark=False): '\n Creates a MultiAgentEnv object as env. This can be used similar to a gym\n environment by calling env.reset() and env.step().\n Use env.render() to view the environment on the screen.\n Input:\n scenario_name : name of the scenario from ./scenarios/ to be Returns\n (without the .py extension)\n benchmark : whether you want to produce benchmarking data\n (usually only done during evaluation)\n Some useful env properties (see environment.py):\n .observation_space : Returns the observation space for each agent\n .action_space : Returns the action space for each agent\n .n : Returns the number of Agents\n ' from multiagent.environment import MultiAgentEnv from multiagent import scenarios scenario = scenarios.load((scenario_name + '.py')).Scenario() world = scenario.make_world() if benchmark: env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation, scenario.benchmark_data) else: env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation) return env
def _make_env(scenario_name, benchmark=False): '\n Creates a MultiAgentEnv object as env. This can be used similar to a gym\n environment by calling env.reset() and env.step().\n Use env.render() to view the environment on the screen.\n Input:\n scenario_name : name of the scenario from ./scenarios/ to be Returns\n (without the .py extension)\n benchmark : whether you want to produce benchmarking data\n (usually only done during evaluation)\n Some useful env properties (see environment.py):\n .observation_space : Returns the observation space for each agent\n .action_space : Returns the action space for each agent\n .n : Returns the number of Agents\n ' from multiagent.environment import MultiAgentEnv from multiagent import scenarios scenario = scenarios.load((scenario_name + '.py')).Scenario() world = scenario.make_world() if benchmark: env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation, scenario.benchmark_data) else: env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation) return env<|docstring|>Creates a MultiAgentEnv object as env. This can be used similar to a gym environment by calling env.reset() and env.step(). Use env.render() to view the environment on the screen. Input: scenario_name : name of the scenario from ./scenarios/ to be Returns (without the .py extension) benchmark : whether you want to produce benchmarking data (usually only done during evaluation) Some useful env properties (see environment.py): .observation_space : Returns the observation space for each agent .action_space : Returns the action space for each agent .n : Returns the number of Agents<|endoftext|>
48612fc1628369dea79e802bedfbaeef79c6eaec76443a0236b600e3843a48d6
def _check_array_dimensions(self, array=None): "\n Check that a grid's array dimensions agree with this grid's metadata\n\n Parameters\n ----------\n array : np.ndarray or list of np.ndarray, optional\n The array for which to test the dimensions. If this is not\n specified, this method performs a self-consistency check of array\n dimensions and meta-data.\n " n_pop_ref = None if isinstance(array, SphericalPolarGridView): array = array.quantities[array.viewed_quantity] for quantity in self.quantities: if (array is None): (n_pop, shape) = single_grid_dims(self.quantities[quantity]) else: (n_pop, shape) = single_grid_dims(array) if (shape != self.shape): raise ValueError(('Quantity arrays do not have the right dimensions: %s instead of %s' % (shape, self.shape))) if (n_pop is not None): if (n_pop_ref is None): n_pop_ref = n_pop elif (n_pop != n_pop_ref): raise ValueError('Not all dust lists in the grid have the same size')
Check that a grid's array dimensions agree with this grid's metadata Parameters ---------- array : np.ndarray or list of np.ndarray, optional The array for which to test the dimensions. If this is not specified, this method performs a self-consistency check of array dimensions and meta-data.
hyperion/grid/spherical_polar_grid.py
_check_array_dimensions
keflavich/hyperion
37
python
def _check_array_dimensions(self, array=None): "\n Check that a grid's array dimensions agree with this grid's metadata\n\n Parameters\n ----------\n array : np.ndarray or list of np.ndarray, optional\n The array for which to test the dimensions. If this is not\n specified, this method performs a self-consistency check of array\n dimensions and meta-data.\n " n_pop_ref = None if isinstance(array, SphericalPolarGridView): array = array.quantities[array.viewed_quantity] for quantity in self.quantities: if (array is None): (n_pop, shape) = single_grid_dims(self.quantities[quantity]) else: (n_pop, shape) = single_grid_dims(array) if (shape != self.shape): raise ValueError(('Quantity arrays do not have the right dimensions: %s instead of %s' % (shape, self.shape))) if (n_pop is not None): if (n_pop_ref is None): n_pop_ref = n_pop elif (n_pop != n_pop_ref): raise ValueError('Not all dust lists in the grid have the same size')
def _check_array_dimensions(self, array=None): "\n Check that a grid's array dimensions agree with this grid's metadata\n\n Parameters\n ----------\n array : np.ndarray or list of np.ndarray, optional\n The array for which to test the dimensions. If this is not\n specified, this method performs a self-consistency check of array\n dimensions and meta-data.\n " n_pop_ref = None if isinstance(array, SphericalPolarGridView): array = array.quantities[array.viewed_quantity] for quantity in self.quantities: if (array is None): (n_pop, shape) = single_grid_dims(self.quantities[quantity]) else: (n_pop, shape) = single_grid_dims(array) if (shape != self.shape): raise ValueError(('Quantity arrays do not have the right dimensions: %s instead of %s' % (shape, self.shape))) if (n_pop is not None): if (n_pop_ref is None): n_pop_ref = n_pop elif (n_pop != n_pop_ref): raise ValueError('Not all dust lists in the grid have the same size')<|docstring|>Check that a grid's array dimensions agree with this grid's metadata Parameters ---------- array : np.ndarray or list of np.ndarray, optional The array for which to test the dimensions. If this is not specified, this method performs a self-consistency check of array dimensions and meta-data.<|endoftext|>
6188421b344d8910855cedefc318d27ade5a0905d342c2b9fc65a887e15b1dcf
def read(self, group, quantities='all'): "\n Read the geometry and physical quantities from a spherical polar grid\n\n Parameters\n ----------\n group : h5py.Group\n The HDF5 group to read the grid from. This group should contain\n groups named 'Geometry' and 'Quantities'.\n quantities : 'all' or list\n Which physical quantities to read in. Use 'all' to read in all\n quantities or a list of strings to read only specific quantities.\n " self.read_geometry(group['Geometry']) self.read_quantities(group['Quantities'], quantities=quantities) self._check_array_dimensions()
Read the geometry and physical quantities from a spherical polar grid Parameters ---------- group : h5py.Group The HDF5 group to read the grid from. This group should contain groups named 'Geometry' and 'Quantities'. quantities : 'all' or list Which physical quantities to read in. Use 'all' to read in all quantities or a list of strings to read only specific quantities.
hyperion/grid/spherical_polar_grid.py
read
keflavich/hyperion
37
python
def read(self, group, quantities='all'): "\n Read the geometry and physical quantities from a spherical polar grid\n\n Parameters\n ----------\n group : h5py.Group\n The HDF5 group to read the grid from. This group should contain\n groups named 'Geometry' and 'Quantities'.\n quantities : 'all' or list\n Which physical quantities to read in. Use 'all' to read in all\n quantities or a list of strings to read only specific quantities.\n " self.read_geometry(group['Geometry']) self.read_quantities(group['Quantities'], quantities=quantities) self._check_array_dimensions()
def read(self, group, quantities='all'): "\n Read the geometry and physical quantities from a spherical polar grid\n\n Parameters\n ----------\n group : h5py.Group\n The HDF5 group to read the grid from. This group should contain\n groups named 'Geometry' and 'Quantities'.\n quantities : 'all' or list\n Which physical quantities to read in. Use 'all' to read in all\n quantities or a list of strings to read only specific quantities.\n " self.read_geometry(group['Geometry']) self.read_quantities(group['Quantities'], quantities=quantities) self._check_array_dimensions()<|docstring|>Read the geometry and physical quantities from a spherical polar grid Parameters ---------- group : h5py.Group The HDF5 group to read the grid from. This group should contain groups named 'Geometry' and 'Quantities'. quantities : 'all' or list Which physical quantities to read in. Use 'all' to read in all quantities or a list of strings to read only specific quantities.<|endoftext|>
806911f5082f9fe3254be7810788962dfbbdbd6f158548af6fdcfe886b530760
def read_geometry(self, group): '\n Read in geometry information from a spherical polar grid\n\n Parameters\n ----------\n group : h5py.Group\n The HDF5 group to read the grid geometry from.\n ' if (group.attrs['grid_type'].decode('utf-8') != 'sph_pol'): raise ValueError('Grid is not spherical polar') self.set_walls(group['walls_1']['r'], group['walls_2']['t'], group['walls_3']['p']) if (group.attrs['geometry'].decode('utf-8') != self.get_geometry_id()): raise Exception('Calculated geometry hash does not match hash in file')
Read in geometry information from a spherical polar grid Parameters ---------- group : h5py.Group The HDF5 group to read the grid geometry from.
hyperion/grid/spherical_polar_grid.py
read_geometry
keflavich/hyperion
37
python
def read_geometry(self, group): '\n Read in geometry information from a spherical polar grid\n\n Parameters\n ----------\n group : h5py.Group\n The HDF5 group to read the grid geometry from.\n ' if (group.attrs['grid_type'].decode('utf-8') != 'sph_pol'): raise ValueError('Grid is not spherical polar') self.set_walls(group['walls_1']['r'], group['walls_2']['t'], group['walls_3']['p']) if (group.attrs['geometry'].decode('utf-8') != self.get_geometry_id()): raise Exception('Calculated geometry hash does not match hash in file')
def read_geometry(self, group): '\n Read in geometry information from a spherical polar grid\n\n Parameters\n ----------\n group : h5py.Group\n The HDF5 group to read the grid geometry from.\n ' if (group.attrs['grid_type'].decode('utf-8') != 'sph_pol'): raise ValueError('Grid is not spherical polar') self.set_walls(group['walls_1']['r'], group['walls_2']['t'], group['walls_3']['p']) if (group.attrs['geometry'].decode('utf-8') != self.get_geometry_id()): raise Exception('Calculated geometry hash does not match hash in file')<|docstring|>Read in geometry information from a spherical polar grid Parameters ---------- group : h5py.Group The HDF5 group to read the grid geometry from.<|endoftext|>
6c809ccf37b6878b8319b98ec0d7545c36efb7bedbe7e979040d21c8b8eec662
def read_quantities(self, group, quantities='all'): "\n Read in physical quantities from a spherical polar grid\n\n Parameters\n ----------\n group : h5py.Group\n The HDF5 group to read the grid quantities from\n quantities : 'all' or list\n Which physical quantities to read in. Use 'all' to read in all\n quantities or a list of strings to read only specific quantities.\n " if (quantities is not None): for quantity in group: if ((quantities == 'all') or (quantity in quantities)): array = np.array(group[quantity]) if (array.ndim == 4): self.quantities[quantity] = [array[i] for i in range(array.shape[0])] else: self.quantities[quantity] = array self._check_array_dimensions()
Read in physical quantities from a spherical polar grid Parameters ---------- group : h5py.Group The HDF5 group to read the grid quantities from quantities : 'all' or list Which physical quantities to read in. Use 'all' to read in all quantities or a list of strings to read only specific quantities.
hyperion/grid/spherical_polar_grid.py
read_quantities
keflavich/hyperion
37
python
def read_quantities(self, group, quantities='all'): "\n Read in physical quantities from a spherical polar grid\n\n Parameters\n ----------\n group : h5py.Group\n The HDF5 group to read the grid quantities from\n quantities : 'all' or list\n Which physical quantities to read in. Use 'all' to read in all\n quantities or a list of strings to read only specific quantities.\n " if (quantities is not None): for quantity in group: if ((quantities == 'all') or (quantity in quantities)): array = np.array(group[quantity]) if (array.ndim == 4): self.quantities[quantity] = [array[i] for i in range(array.shape[0])] else: self.quantities[quantity] = array self._check_array_dimensions()
def read_quantities(self, group, quantities='all'): "\n Read in physical quantities from a spherical polar grid\n\n Parameters\n ----------\n group : h5py.Group\n The HDF5 group to read the grid quantities from\n quantities : 'all' or list\n Which physical quantities to read in. Use 'all' to read in all\n quantities or a list of strings to read only specific quantities.\n " if (quantities is not None): for quantity in group: if ((quantities == 'all') or (quantity in quantities)): array = np.array(group[quantity]) if (array.ndim == 4): self.quantities[quantity] = [array[i] for i in range(array.shape[0])] else: self.quantities[quantity] = array self._check_array_dimensions()<|docstring|>Read in physical quantities from a spherical polar grid Parameters ---------- group : h5py.Group The HDF5 group to read the grid quantities from quantities : 'all' or list Which physical quantities to read in. Use 'all' to read in all quantities or a list of strings to read only specific quantities.<|endoftext|>
28749e0ee7bb916c1d612acba934d516b367faecbddfee93258bd573dabccd1c
def write(self, group, quantities='all', copy=True, absolute_paths=False, compression=True, wall_dtype=float, physics_dtype=float): "\n Write out the spherical polar grid\n\n Parameters\n ----------\n group : h5py.Group\n The HDF5 group to write the grid to\n quantities : 'all' or list\n Which physical quantities to write out. Use 'all' to write out all\n quantities or a list of strings to write only specific quantities.\n copy : bool\n Whether to copy external links, or leave them as links.\n absolute_paths : bool\n If copy is False, then this indicates whether to use absolute or\n relative paths for links.\n compression : bool\n Whether to compress the arrays in the HDF5 file\n wall_dtype : type\n The datatype to use to write the wall positions\n physics_dtype : type\n The datatype to use to write the physical quantities\n " if ('Geometry' not in group): g_geometry = group.create_group('Geometry') else: g_geometry = group['Geometry'] if ('Quantities' not in group): g_quantities = group.create_group('Quantities') else: g_quantities = group['Quantities'] g_geometry.attrs['grid_type'] = np.string_('sph_pol'.encode('utf-8')) g_geometry.attrs['geometry'] = np.string_(self.get_geometry_id().encode('utf-8')) dset = g_geometry.create_dataset('walls_1', data=np.array(list(zip(self.r_wall)), dtype=[('r', wall_dtype)]), compression=compression) dset.attrs['Unit'] = np.string_('cm'.encode('utf-8')) dset = g_geometry.create_dataset('walls_2', data=np.array(list(zip(self.t_wall)), dtype=[('t', wall_dtype)]), compression=compression) dset.attrs['Unit'] = np.string_('rad'.encode('utf-8')) dset = g_geometry.create_dataset('walls_3', data=np.array(list(zip(self.p_wall)), dtype=[('p', wall_dtype)]), compression=compression) dset.attrs['Unit'] = np.string_('rad'.encode('utf-8')) self._check_array_dimensions() for quantity in self.quantities: if ((quantities == 'all') or (quantity in quantities)): if isinstance(self.quantities[quantity], h5py.ExternalLink): link_or_copy(g_quantities, quantity, self.quantities[quantity], copy, absolute_paths=absolute_paths) else: dset = g_quantities.create_dataset(quantity, data=self.quantities[quantity], compression=compression, dtype=physics_dtype) dset.attrs['geometry'] = np.string_(self.get_geometry_id().encode('utf-8'))
Write out the spherical polar grid Parameters ---------- group : h5py.Group The HDF5 group to write the grid to quantities : 'all' or list Which physical quantities to write out. Use 'all' to write out all quantities or a list of strings to write only specific quantities. copy : bool Whether to copy external links, or leave them as links. absolute_paths : bool If copy is False, then this indicates whether to use absolute or relative paths for links. compression : bool Whether to compress the arrays in the HDF5 file wall_dtype : type The datatype to use to write the wall positions physics_dtype : type The datatype to use to write the physical quantities
hyperion/grid/spherical_polar_grid.py
write
keflavich/hyperion
37
python
def write(self, group, quantities='all', copy=True, absolute_paths=False, compression=True, wall_dtype=float, physics_dtype=float): "\n Write out the spherical polar grid\n\n Parameters\n ----------\n group : h5py.Group\n The HDF5 group to write the grid to\n quantities : 'all' or list\n Which physical quantities to write out. Use 'all' to write out all\n quantities or a list of strings to write only specific quantities.\n copy : bool\n Whether to copy external links, or leave them as links.\n absolute_paths : bool\n If copy is False, then this indicates whether to use absolute or\n relative paths for links.\n compression : bool\n Whether to compress the arrays in the HDF5 file\n wall_dtype : type\n The datatype to use to write the wall positions\n physics_dtype : type\n The datatype to use to write the physical quantities\n " if ('Geometry' not in group): g_geometry = group.create_group('Geometry') else: g_geometry = group['Geometry'] if ('Quantities' not in group): g_quantities = group.create_group('Quantities') else: g_quantities = group['Quantities'] g_geometry.attrs['grid_type'] = np.string_('sph_pol'.encode('utf-8')) g_geometry.attrs['geometry'] = np.string_(self.get_geometry_id().encode('utf-8')) dset = g_geometry.create_dataset('walls_1', data=np.array(list(zip(self.r_wall)), dtype=[('r', wall_dtype)]), compression=compression) dset.attrs['Unit'] = np.string_('cm'.encode('utf-8')) dset = g_geometry.create_dataset('walls_2', data=np.array(list(zip(self.t_wall)), dtype=[('t', wall_dtype)]), compression=compression) dset.attrs['Unit'] = np.string_('rad'.encode('utf-8')) dset = g_geometry.create_dataset('walls_3', data=np.array(list(zip(self.p_wall)), dtype=[('p', wall_dtype)]), compression=compression) dset.attrs['Unit'] = np.string_('rad'.encode('utf-8')) self._check_array_dimensions() for quantity in self.quantities: if ((quantities == 'all') or (quantity in quantities)): if isinstance(self.quantities[quantity], h5py.ExternalLink): link_or_copy(g_quantities, quantity, self.quantities[quantity], copy, absolute_paths=absolute_paths) else: dset = g_quantities.create_dataset(quantity, data=self.quantities[quantity], compression=compression, dtype=physics_dtype) dset.attrs['geometry'] = np.string_(self.get_geometry_id().encode('utf-8'))
def write(self, group, quantities='all', copy=True, absolute_paths=False, compression=True, wall_dtype=float, physics_dtype=float): "\n Write out the spherical polar grid\n\n Parameters\n ----------\n group : h5py.Group\n The HDF5 group to write the grid to\n quantities : 'all' or list\n Which physical quantities to write out. Use 'all' to write out all\n quantities or a list of strings to write only specific quantities.\n copy : bool\n Whether to copy external links, or leave them as links.\n absolute_paths : bool\n If copy is False, then this indicates whether to use absolute or\n relative paths for links.\n compression : bool\n Whether to compress the arrays in the HDF5 file\n wall_dtype : type\n The datatype to use to write the wall positions\n physics_dtype : type\n The datatype to use to write the physical quantities\n " if ('Geometry' not in group): g_geometry = group.create_group('Geometry') else: g_geometry = group['Geometry'] if ('Quantities' not in group): g_quantities = group.create_group('Quantities') else: g_quantities = group['Quantities'] g_geometry.attrs['grid_type'] = np.string_('sph_pol'.encode('utf-8')) g_geometry.attrs['geometry'] = np.string_(self.get_geometry_id().encode('utf-8')) dset = g_geometry.create_dataset('walls_1', data=np.array(list(zip(self.r_wall)), dtype=[('r', wall_dtype)]), compression=compression) dset.attrs['Unit'] = np.string_('cm'.encode('utf-8')) dset = g_geometry.create_dataset('walls_2', data=np.array(list(zip(self.t_wall)), dtype=[('t', wall_dtype)]), compression=compression) dset.attrs['Unit'] = np.string_('rad'.encode('utf-8')) dset = g_geometry.create_dataset('walls_3', data=np.array(list(zip(self.p_wall)), dtype=[('p', wall_dtype)]), compression=compression) dset.attrs['Unit'] = np.string_('rad'.encode('utf-8')) self._check_array_dimensions() for quantity in self.quantities: if ((quantities == 'all') or (quantity in quantities)): if isinstance(self.quantities[quantity], h5py.ExternalLink): link_or_copy(g_quantities, quantity, self.quantities[quantity], copy, absolute_paths=absolute_paths) else: dset = g_quantities.create_dataset(quantity, data=self.quantities[quantity], compression=compression, dtype=physics_dtype) dset.attrs['geometry'] = np.string_(self.get_geometry_id().encode('utf-8'))<|docstring|>Write out the spherical polar grid Parameters ---------- group : h5py.Group The HDF5 group to write the grid to quantities : 'all' or list Which physical quantities to write out. Use 'all' to write out all quantities or a list of strings to write only specific quantities. copy : bool Whether to copy external links, or leave them as links. absolute_paths : bool If copy is False, then this indicates whether to use absolute or relative paths for links. compression : bool Whether to compress the arrays in the HDF5 file wall_dtype : type The datatype to use to write the wall positions physics_dtype : type The datatype to use to write the physical quantities<|endoftext|>
1c37bec25387075724bccc86430d7dcccf3f52680060a098bd0c6ca652938b2a
def write_single_array(self, group, name, array, copy=True, absolute_paths=False, compression=True, physics_dtype=float): '\n Write out a single quantity, checking for consistency with geometry\n\n Parameters\n ----------\n group : h5py.Group\n The HDF5 group to write the grid to\n name : str\n The name of the array in the group\n array : np.ndarray\n The array to write out\n copy : bool\n Whether to copy external links, or leave them as links.\n absolute_paths : bool\n If copy is False, then this indicates whether to use absolute or\n relative paths for links.\n compression : bool\n Whether to compress the arrays in the HDF5 file\n wall_dtype : type\n The datatype to use to write the wall positions\n physics_dtype : type\n The datatype to use to write the physical quantities\n ' self._check_array_dimensions(array) if isinstance(array, h5py.ExternalLink): link_or_copy(group, name, array, copy, absolute_paths=absolute_paths) else: dset = group.create_dataset(name, data=array, compression=compression, dtype=physics_dtype) dset.attrs['geometry'] = np.string_(self.get_geometry_id().encode('utf-8'))
Write out a single quantity, checking for consistency with geometry Parameters ---------- group : h5py.Group The HDF5 group to write the grid to name : str The name of the array in the group array : np.ndarray The array to write out copy : bool Whether to copy external links, or leave them as links. absolute_paths : bool If copy is False, then this indicates whether to use absolute or relative paths for links. compression : bool Whether to compress the arrays in the HDF5 file wall_dtype : type The datatype to use to write the wall positions physics_dtype : type The datatype to use to write the physical quantities
hyperion/grid/spherical_polar_grid.py
write_single_array
keflavich/hyperion
37
python
def write_single_array(self, group, name, array, copy=True, absolute_paths=False, compression=True, physics_dtype=float): '\n Write out a single quantity, checking for consistency with geometry\n\n Parameters\n ----------\n group : h5py.Group\n The HDF5 group to write the grid to\n name : str\n The name of the array in the group\n array : np.ndarray\n The array to write out\n copy : bool\n Whether to copy external links, or leave them as links.\n absolute_paths : bool\n If copy is False, then this indicates whether to use absolute or\n relative paths for links.\n compression : bool\n Whether to compress the arrays in the HDF5 file\n wall_dtype : type\n The datatype to use to write the wall positions\n physics_dtype : type\n The datatype to use to write the physical quantities\n ' self._check_array_dimensions(array) if isinstance(array, h5py.ExternalLink): link_or_copy(group, name, array, copy, absolute_paths=absolute_paths) else: dset = group.create_dataset(name, data=array, compression=compression, dtype=physics_dtype) dset.attrs['geometry'] = np.string_(self.get_geometry_id().encode('utf-8'))
def write_single_array(self, group, name, array, copy=True, absolute_paths=False, compression=True, physics_dtype=float): '\n Write out a single quantity, checking for consistency with geometry\n\n Parameters\n ----------\n group : h5py.Group\n The HDF5 group to write the grid to\n name : str\n The name of the array in the group\n array : np.ndarray\n The array to write out\n copy : bool\n Whether to copy external links, or leave them as links.\n absolute_paths : bool\n If copy is False, then this indicates whether to use absolute or\n relative paths for links.\n compression : bool\n Whether to compress the arrays in the HDF5 file\n wall_dtype : type\n The datatype to use to write the wall positions\n physics_dtype : type\n The datatype to use to write the physical quantities\n ' self._check_array_dimensions(array) if isinstance(array, h5py.ExternalLink): link_or_copy(group, name, array, copy, absolute_paths=absolute_paths) else: dset = group.create_dataset(name, data=array, compression=compression, dtype=physics_dtype) dset.attrs['geometry'] = np.string_(self.get_geometry_id().encode('utf-8'))<|docstring|>Write out a single quantity, checking for consistency with geometry Parameters ---------- group : h5py.Group The HDF5 group to write the grid to name : str The name of the array in the group array : np.ndarray The array to write out copy : bool Whether to copy external links, or leave them as links. absolute_paths : bool If copy is False, then this indicates whether to use absolute or relative paths for links. compression : bool Whether to compress the arrays in the HDF5 file wall_dtype : type The datatype to use to write the wall positions physics_dtype : type The datatype to use to write the physical quantities<|endoftext|>
1ca52b3d6aa132f85010c83762f0becfa68f005bab4964d2a5d231a5f12a4053
def append(self, grid): '\n Used to append quantities from another grid\n\n Parameters\n ----------\n grid : 3D Numpy array or SphericalPolarGridView instance\n The grid to copy the quantity from\n ' if isinstance(grid, SphericalPolarGridView): if (self.quantities[self.viewed_quantity] is grid.quantities[grid.viewed_quantity]): raise Exception('Calling append recursively') if (type(grid.quantities[grid.viewed_quantity]) is list): raise Exception('Can only append a single grid') self._check_array_dimensions(grid.quantities[grid.viewed_quantity]) self.quantities[self.viewed_quantity].append(deepcopy(grid.quantities[grid.viewed_quantity])) elif isinstance(grid, np.ndarray): self._check_array_dimensions(grid) self.quantities[self.viewed_quantity].append(deepcopy(grid)) else: raise ValueError('grid should be a Numpy array or a SphericalPolarGridView instance')
Used to append quantities from another grid Parameters ---------- grid : 3D Numpy array or SphericalPolarGridView instance The grid to copy the quantity from
hyperion/grid/spherical_polar_grid.py
append
keflavich/hyperion
37
python
def append(self, grid): '\n Used to append quantities from another grid\n\n Parameters\n ----------\n grid : 3D Numpy array or SphericalPolarGridView instance\n The grid to copy the quantity from\n ' if isinstance(grid, SphericalPolarGridView): if (self.quantities[self.viewed_quantity] is grid.quantities[grid.viewed_quantity]): raise Exception('Calling append recursively') if (type(grid.quantities[grid.viewed_quantity]) is list): raise Exception('Can only append a single grid') self._check_array_dimensions(grid.quantities[grid.viewed_quantity]) self.quantities[self.viewed_quantity].append(deepcopy(grid.quantities[grid.viewed_quantity])) elif isinstance(grid, np.ndarray): self._check_array_dimensions(grid) self.quantities[self.viewed_quantity].append(deepcopy(grid)) else: raise ValueError('grid should be a Numpy array or a SphericalPolarGridView instance')
def append(self, grid): '\n Used to append quantities from another grid\n\n Parameters\n ----------\n grid : 3D Numpy array or SphericalPolarGridView instance\n The grid to copy the quantity from\n ' if isinstance(grid, SphericalPolarGridView): if (self.quantities[self.viewed_quantity] is grid.quantities[grid.viewed_quantity]): raise Exception('Calling append recursively') if (type(grid.quantities[grid.viewed_quantity]) is list): raise Exception('Can only append a single grid') self._check_array_dimensions(grid.quantities[grid.viewed_quantity]) self.quantities[self.viewed_quantity].append(deepcopy(grid.quantities[grid.viewed_quantity])) elif isinstance(grid, np.ndarray): self._check_array_dimensions(grid) self.quantities[self.viewed_quantity].append(deepcopy(grid)) else: raise ValueError('grid should be a Numpy array or a SphericalPolarGridView instance')<|docstring|>Used to append quantities from another grid Parameters ---------- grid : 3D Numpy array or SphericalPolarGridView instance The grid to copy the quantity from<|endoftext|>
5ae061e71b6988472a47f4dc52222401fdcbbb53fc43b491f10728b8cc60ec00
def add(self, grid): '\n Used to add quantities from another grid\n\n Parameters\n ----------\n grid : 3D Numpy array or SphericalPolarGridView instance\n The grid to copy the quantity from\n ' if (type(self.quantities[self.viewed_quantity]) is list): raise Exception('need to first specify the item to add to') if isinstance(grid, SphericalPolarGridView): if (type(grid.quantities[grid.viewed_quantity]) is list): raise Exception('need to first specify the item to add') self._check_array_dimensions(grid.quantities[grid.viewed_quantity]) self.quantities[self.viewed_quantity] += grid.quantities[grid.viewed_quantity] elif isinstance(grid, np.ndarray): self._check_array_dimensions(grid) self.quantities[self.viewed_quantity] += grid else: raise ValueError('grid should be a Numpy array or a SphericalPolarGridView instance')
Used to add quantities from another grid Parameters ---------- grid : 3D Numpy array or SphericalPolarGridView instance The grid to copy the quantity from
hyperion/grid/spherical_polar_grid.py
add
keflavich/hyperion
37
python
def add(self, grid): '\n Used to add quantities from another grid\n\n Parameters\n ----------\n grid : 3D Numpy array or SphericalPolarGridView instance\n The grid to copy the quantity from\n ' if (type(self.quantities[self.viewed_quantity]) is list): raise Exception('need to first specify the item to add to') if isinstance(grid, SphericalPolarGridView): if (type(grid.quantities[grid.viewed_quantity]) is list): raise Exception('need to first specify the item to add') self._check_array_dimensions(grid.quantities[grid.viewed_quantity]) self.quantities[self.viewed_quantity] += grid.quantities[grid.viewed_quantity] elif isinstance(grid, np.ndarray): self._check_array_dimensions(grid) self.quantities[self.viewed_quantity] += grid else: raise ValueError('grid should be a Numpy array or a SphericalPolarGridView instance')
def add(self, grid): '\n Used to add quantities from another grid\n\n Parameters\n ----------\n grid : 3D Numpy array or SphericalPolarGridView instance\n The grid to copy the quantity from\n ' if (type(self.quantities[self.viewed_quantity]) is list): raise Exception('need to first specify the item to add to') if isinstance(grid, SphericalPolarGridView): if (type(grid.quantities[grid.viewed_quantity]) is list): raise Exception('need to first specify the item to add') self._check_array_dimensions(grid.quantities[grid.viewed_quantity]) self.quantities[self.viewed_quantity] += grid.quantities[grid.viewed_quantity] elif isinstance(grid, np.ndarray): self._check_array_dimensions(grid) self.quantities[self.viewed_quantity] += grid else: raise ValueError('grid should be a Numpy array or a SphericalPolarGridView instance')<|docstring|>Used to add quantities from another grid Parameters ---------- grid : 3D Numpy array or SphericalPolarGridView instance The grid to copy the quantity from<|endoftext|>
899702169f05214365679538c3664fa126c5956f8720729a501a3a4e10dd0926
def set(self, paramters): ' @paramters: ordered dict ' if (paramters is None): return header = list(paramters) self.setColNames(header) self[0] = [(x if ((x is None) or isinstance(x, str) or isinstance(x, int) or isinstance(x, float)) else str(x)) for x in paramters.values()] for (i, name) in enumerate(header): if ('权重' in name): self.setItemBackground(0, i, Qt.yellow) self.setItemForeground(0, i, Qt.black)
@paramters: ordered dict
Stock/Select/Ui/Basic/Param/DyStockSelectStrategyParamWidget.py
set
emaiqi/DevilYuan
135
python
def set(self, paramters): ' ' if (paramters is None): return header = list(paramters) self.setColNames(header) self[0] = [(x if ((x is None) or isinstance(x, str) or isinstance(x, int) or isinstance(x, float)) else str(x)) for x in paramters.values()] for (i, name) in enumerate(header): if ('权重' in name): self.setItemBackground(0, i, Qt.yellow) self.setItemForeground(0, i, Qt.black)
def set(self, paramters): ' ' if (paramters is None): return header = list(paramters) self.setColNames(header) self[0] = [(x if ((x is None) or isinstance(x, str) or isinstance(x, int) or isinstance(x, float)) else str(x)) for x in paramters.values()] for (i, name) in enumerate(header): if ('权重' in name): self.setItemBackground(0, i, Qt.yellow) self.setItemForeground(0, i, Qt.black)<|docstring|>@paramters: ordered dict<|endoftext|>
e6be1093054da03ddae68736ca81a04693c853af481d036990ea77771a8a976f
def align_tensor(new_inputs, x, expand=False): '\n Permute and add dims to a tensor to match desired ``new_inputs``.\n\n :param OrderedDict new_inputs: A target set of inputs.\n :param funsor.terms.Funsor x: A :class:`Tensor` or\n :class:`~funsor.terms.Number` .\n :param bool expand: If False (default), set result size to 1 for any input\n of ``x`` not in ``new_inputs``; if True expand to ``new_inputs`` size.\n :return: a number or :class:`torch.Tensor` or :class:`np.ndarray` that can be broadcast to other\n tensors with inputs ``new_inputs``.\n :rtype: int or float or torch.Tensor or np.ndarray\n ' assert isinstance(new_inputs, OrderedDict) assert isinstance(x, (Number, Tensor)) assert all((isinstance(d.dtype, int) for d in x.inputs.values())) data = x.data if isinstance(x, Number): return data old_inputs = x.inputs if (old_inputs == new_inputs): return data x_keys = tuple(old_inputs) data = ops.permute(data, (tuple((x_keys.index(k) for k in new_inputs if (k in old_inputs))) + tuple(range(len(old_inputs), len(data.shape))))) data = data.reshape((tuple(((old_inputs[k].dtype if (k in old_inputs) else 1) for k in new_inputs)) + x.output.shape)) if expand: data = ops.expand(data, (tuple((d.dtype for d in new_inputs.values())) + x.output.shape)) return data
Permute and add dims to a tensor to match desired ``new_inputs``. :param OrderedDict new_inputs: A target set of inputs. :param funsor.terms.Funsor x: A :class:`Tensor` or :class:`~funsor.terms.Number` . :param bool expand: If False (default), set result size to 1 for any input of ``x`` not in ``new_inputs``; if True expand to ``new_inputs`` size. :return: a number or :class:`torch.Tensor` or :class:`np.ndarray` that can be broadcast to other tensors with inputs ``new_inputs``. :rtype: int or float or torch.Tensor or np.ndarray
funsor/tensor.py
align_tensor
ordabayevy/funsor
0
python
def align_tensor(new_inputs, x, expand=False): '\n Permute and add dims to a tensor to match desired ``new_inputs``.\n\n :param OrderedDict new_inputs: A target set of inputs.\n :param funsor.terms.Funsor x: A :class:`Tensor` or\n :class:`~funsor.terms.Number` .\n :param bool expand: If False (default), set result size to 1 for any input\n of ``x`` not in ``new_inputs``; if True expand to ``new_inputs`` size.\n :return: a number or :class:`torch.Tensor` or :class:`np.ndarray` that can be broadcast to other\n tensors with inputs ``new_inputs``.\n :rtype: int or float or torch.Tensor or np.ndarray\n ' assert isinstance(new_inputs, OrderedDict) assert isinstance(x, (Number, Tensor)) assert all((isinstance(d.dtype, int) for d in x.inputs.values())) data = x.data if isinstance(x, Number): return data old_inputs = x.inputs if (old_inputs == new_inputs): return data x_keys = tuple(old_inputs) data = ops.permute(data, (tuple((x_keys.index(k) for k in new_inputs if (k in old_inputs))) + tuple(range(len(old_inputs), len(data.shape))))) data = data.reshape((tuple(((old_inputs[k].dtype if (k in old_inputs) else 1) for k in new_inputs)) + x.output.shape)) if expand: data = ops.expand(data, (tuple((d.dtype for d in new_inputs.values())) + x.output.shape)) return data
def align_tensor(new_inputs, x, expand=False): '\n Permute and add dims to a tensor to match desired ``new_inputs``.\n\n :param OrderedDict new_inputs: A target set of inputs.\n :param funsor.terms.Funsor x: A :class:`Tensor` or\n :class:`~funsor.terms.Number` .\n :param bool expand: If False (default), set result size to 1 for any input\n of ``x`` not in ``new_inputs``; if True expand to ``new_inputs`` size.\n :return: a number or :class:`torch.Tensor` or :class:`np.ndarray` that can be broadcast to other\n tensors with inputs ``new_inputs``.\n :rtype: int or float or torch.Tensor or np.ndarray\n ' assert isinstance(new_inputs, OrderedDict) assert isinstance(x, (Number, Tensor)) assert all((isinstance(d.dtype, int) for d in x.inputs.values())) data = x.data if isinstance(x, Number): return data old_inputs = x.inputs if (old_inputs == new_inputs): return data x_keys = tuple(old_inputs) data = ops.permute(data, (tuple((x_keys.index(k) for k in new_inputs if (k in old_inputs))) + tuple(range(len(old_inputs), len(data.shape))))) data = data.reshape((tuple(((old_inputs[k].dtype if (k in old_inputs) else 1) for k in new_inputs)) + x.output.shape)) if expand: data = ops.expand(data, (tuple((d.dtype for d in new_inputs.values())) + x.output.shape)) return data<|docstring|>Permute and add dims to a tensor to match desired ``new_inputs``. :param OrderedDict new_inputs: A target set of inputs. :param funsor.terms.Funsor x: A :class:`Tensor` or :class:`~funsor.terms.Number` . :param bool expand: If False (default), set result size to 1 for any input of ``x`` not in ``new_inputs``; if True expand to ``new_inputs`` size. :return: a number or :class:`torch.Tensor` or :class:`np.ndarray` that can be broadcast to other tensors with inputs ``new_inputs``. :rtype: int or float or torch.Tensor or np.ndarray<|endoftext|>
418bac1229213af8e7a3b0c4f846d1eb4397d36c26b11eefe38e71163f90c054
def align_tensors(*args, **kwargs): '\n Permute multiple tensors before applying a broadcasted op.\n\n This is mainly useful for implementing eager funsor operations.\n\n :param funsor.terms.Funsor \\*args: Multiple :class:`Tensor` s and\n :class:`~funsor.terms.Number` s.\n :param bool expand: Whether to expand input tensors. Defaults to False.\n :return: a pair ``(inputs, tensors)`` where tensors are all\n :class:`torch.Tensor` s or :class:`np.ndarray` s\n that can be broadcast together to a single data\n with given ``inputs``.\n :rtype: tuple\n ' expand = kwargs.pop('expand', False) assert (not kwargs) inputs = OrderedDict() for x in args: inputs.update(x.inputs) tensors = [align_tensor(inputs, x, expand=expand) for x in args] return (inputs, tensors)
Permute multiple tensors before applying a broadcasted op. This is mainly useful for implementing eager funsor operations. :param funsor.terms.Funsor \*args: Multiple :class:`Tensor` s and :class:`~funsor.terms.Number` s. :param bool expand: Whether to expand input tensors. Defaults to False. :return: a pair ``(inputs, tensors)`` where tensors are all :class:`torch.Tensor` s or :class:`np.ndarray` s that can be broadcast together to a single data with given ``inputs``. :rtype: tuple
funsor/tensor.py
align_tensors
ordabayevy/funsor
0
python
def align_tensors(*args, **kwargs): '\n Permute multiple tensors before applying a broadcasted op.\n\n This is mainly useful for implementing eager funsor operations.\n\n :param funsor.terms.Funsor \\*args: Multiple :class:`Tensor` s and\n :class:`~funsor.terms.Number` s.\n :param bool expand: Whether to expand input tensors. Defaults to False.\n :return: a pair ``(inputs, tensors)`` where tensors are all\n :class:`torch.Tensor` s or :class:`np.ndarray` s\n that can be broadcast together to a single data\n with given ``inputs``.\n :rtype: tuple\n ' expand = kwargs.pop('expand', False) assert (not kwargs) inputs = OrderedDict() for x in args: inputs.update(x.inputs) tensors = [align_tensor(inputs, x, expand=expand) for x in args] return (inputs, tensors)
def align_tensors(*args, **kwargs): '\n Permute multiple tensors before applying a broadcasted op.\n\n This is mainly useful for implementing eager funsor operations.\n\n :param funsor.terms.Funsor \\*args: Multiple :class:`Tensor` s and\n :class:`~funsor.terms.Number` s.\n :param bool expand: Whether to expand input tensors. Defaults to False.\n :return: a pair ``(inputs, tensors)`` where tensors are all\n :class:`torch.Tensor` s or :class:`np.ndarray` s\n that can be broadcast together to a single data\n with given ``inputs``.\n :rtype: tuple\n ' expand = kwargs.pop('expand', False) assert (not kwargs) inputs = OrderedDict() for x in args: inputs.update(x.inputs) tensors = [align_tensor(inputs, x, expand=expand) for x in args] return (inputs, tensors)<|docstring|>Permute multiple tensors before applying a broadcasted op. This is mainly useful for implementing eager funsor operations. :param funsor.terms.Funsor \*args: Multiple :class:`Tensor` s and :class:`~funsor.terms.Number` s. :param bool expand: Whether to expand input tensors. Defaults to False. :return: a pair ``(inputs, tensors)`` where tensors are all :class:`torch.Tensor` s or :class:`np.ndarray` s that can be broadcast together to a single data with given ``inputs``. :rtype: tuple<|endoftext|>
551768af662168de6e5f57e5a3f8096574dc5e461a13f1ae4d5e4a9ab895b11c
def function(*signature): '\n Decorator to wrap a PyTorch/NumPy function, using either type hints or\n explicit type annotations.\n\n Example::\n\n # Using type hints:\n @funsor.tensor.function\n def matmul(x: Reals[3, 4], y: Reals[4, 5]) -> Reals[3, 5]:\n return torch.matmul(x, y)\n\n # Using explicit type annotations:\n @funsor.tensor.function(Reals[3, 4], Reals[4, 5], Reals[3, 5])\n def matmul(x, y):\n return torch.matmul(x, y)\n\n @funsor.tensor.function(Reals[10], Reals[10, 10], Reals[10], Real)\n def mvn_log_prob(loc, scale_tril, x):\n d = torch.distributions.MultivariateNormal(loc, scale_tril)\n return d.log_prob(x)\n\n To support functions that output nested tuples of tensors, specify a nested\n :py:class:`~typing.Tuple` of output types, for example::\n\n @funsor.tensor.function\n def max_and_argmax(x: Reals[8]) -> Tuple[Real, Bint[8]]:\n return torch.max(x, dim=-1)\n\n :param \\*signature: A sequence if input domains followed by a final output\n domain or nested tuple of output domains.\n ' assert signature if (len(signature) == 1): fn = signature[0] if (callable(fn) and (not isinstance(fn, ArrayType))): inputs = typing.get_type_hints(fn) output = inputs.pop('return') assert all((isinstance(d, ArrayType) for d in inputs.values())) assert (isinstance(output, (ArrayType, tuple)) or (output.__origin__ in (tuple, typing.Tuple))) return _function(inputs, output, fn) (inputs, output) = (signature[:(- 1)], signature[(- 1)]) output = _tuple_to_Tuple(output) assert all((isinstance(d, ArrayType) for d in inputs)) assert (isinstance(output, (ArrayType, tuple)) or (output.__origin__ in (tuple, typing.Tuple))) return functools.partial(_function, inputs, output)
Decorator to wrap a PyTorch/NumPy function, using either type hints or explicit type annotations. Example:: # Using type hints: @funsor.tensor.function def matmul(x: Reals[3, 4], y: Reals[4, 5]) -> Reals[3, 5]: return torch.matmul(x, y) # Using explicit type annotations: @funsor.tensor.function(Reals[3, 4], Reals[4, 5], Reals[3, 5]) def matmul(x, y): return torch.matmul(x, y) @funsor.tensor.function(Reals[10], Reals[10, 10], Reals[10], Real) def mvn_log_prob(loc, scale_tril, x): d = torch.distributions.MultivariateNormal(loc, scale_tril) return d.log_prob(x) To support functions that output nested tuples of tensors, specify a nested :py:class:`~typing.Tuple` of output types, for example:: @funsor.tensor.function def max_and_argmax(x: Reals[8]) -> Tuple[Real, Bint[8]]: return torch.max(x, dim=-1) :param \*signature: A sequence if input domains followed by a final output domain or nested tuple of output domains.
funsor/tensor.py
function
ordabayevy/funsor
0
python
def function(*signature): '\n Decorator to wrap a PyTorch/NumPy function, using either type hints or\n explicit type annotations.\n\n Example::\n\n # Using type hints:\n @funsor.tensor.function\n def matmul(x: Reals[3, 4], y: Reals[4, 5]) -> Reals[3, 5]:\n return torch.matmul(x, y)\n\n # Using explicit type annotations:\n @funsor.tensor.function(Reals[3, 4], Reals[4, 5], Reals[3, 5])\n def matmul(x, y):\n return torch.matmul(x, y)\n\n @funsor.tensor.function(Reals[10], Reals[10, 10], Reals[10], Real)\n def mvn_log_prob(loc, scale_tril, x):\n d = torch.distributions.MultivariateNormal(loc, scale_tril)\n return d.log_prob(x)\n\n To support functions that output nested tuples of tensors, specify a nested\n :py:class:`~typing.Tuple` of output types, for example::\n\n @funsor.tensor.function\n def max_and_argmax(x: Reals[8]) -> Tuple[Real, Bint[8]]:\n return torch.max(x, dim=-1)\n\n :param \\*signature: A sequence if input domains followed by a final output\n domain or nested tuple of output domains.\n ' assert signature if (len(signature) == 1): fn = signature[0] if (callable(fn) and (not isinstance(fn, ArrayType))): inputs = typing.get_type_hints(fn) output = inputs.pop('return') assert all((isinstance(d, ArrayType) for d in inputs.values())) assert (isinstance(output, (ArrayType, tuple)) or (output.__origin__ in (tuple, typing.Tuple))) return _function(inputs, output, fn) (inputs, output) = (signature[:(- 1)], signature[(- 1)]) output = _tuple_to_Tuple(output) assert all((isinstance(d, ArrayType) for d in inputs)) assert (isinstance(output, (ArrayType, tuple)) or (output.__origin__ in (tuple, typing.Tuple))) return functools.partial(_function, inputs, output)
def function(*signature): '\n Decorator to wrap a PyTorch/NumPy function, using either type hints or\n explicit type annotations.\n\n Example::\n\n # Using type hints:\n @funsor.tensor.function\n def matmul(x: Reals[3, 4], y: Reals[4, 5]) -> Reals[3, 5]:\n return torch.matmul(x, y)\n\n # Using explicit type annotations:\n @funsor.tensor.function(Reals[3, 4], Reals[4, 5], Reals[3, 5])\n def matmul(x, y):\n return torch.matmul(x, y)\n\n @funsor.tensor.function(Reals[10], Reals[10, 10], Reals[10], Real)\n def mvn_log_prob(loc, scale_tril, x):\n d = torch.distributions.MultivariateNormal(loc, scale_tril)\n return d.log_prob(x)\n\n To support functions that output nested tuples of tensors, specify a nested\n :py:class:`~typing.Tuple` of output types, for example::\n\n @funsor.tensor.function\n def max_and_argmax(x: Reals[8]) -> Tuple[Real, Bint[8]]:\n return torch.max(x, dim=-1)\n\n :param \\*signature: A sequence if input domains followed by a final output\n domain or nested tuple of output domains.\n ' assert signature if (len(signature) == 1): fn = signature[0] if (callable(fn) and (not isinstance(fn, ArrayType))): inputs = typing.get_type_hints(fn) output = inputs.pop('return') assert all((isinstance(d, ArrayType) for d in inputs.values())) assert (isinstance(output, (ArrayType, tuple)) or (output.__origin__ in (tuple, typing.Tuple))) return _function(inputs, output, fn) (inputs, output) = (signature[:(- 1)], signature[(- 1)]) output = _tuple_to_Tuple(output) assert all((isinstance(d, ArrayType) for d in inputs)) assert (isinstance(output, (ArrayType, tuple)) or (output.__origin__ in (tuple, typing.Tuple))) return functools.partial(_function, inputs, output)<|docstring|>Decorator to wrap a PyTorch/NumPy function, using either type hints or explicit type annotations. Example:: # Using type hints: @funsor.tensor.function def matmul(x: Reals[3, 4], y: Reals[4, 5]) -> Reals[3, 5]: return torch.matmul(x, y) # Using explicit type annotations: @funsor.tensor.function(Reals[3, 4], Reals[4, 5], Reals[3, 5]) def matmul(x, y): return torch.matmul(x, y) @funsor.tensor.function(Reals[10], Reals[10, 10], Reals[10], Real) def mvn_log_prob(loc, scale_tril, x): d = torch.distributions.MultivariateNormal(loc, scale_tril) return d.log_prob(x) To support functions that output nested tuples of tensors, specify a nested :py:class:`~typing.Tuple` of output types, for example:: @funsor.tensor.function def max_and_argmax(x: Reals[8]) -> Tuple[Real, Bint[8]]: return torch.max(x, dim=-1) :param \*signature: A sequence if input domains followed by a final output domain or nested tuple of output domains.<|endoftext|>
be52de52644bb365699030a9be380bf2b57e424e9126430b88355a8fe27ff26d
def tensordot(x, y, dims): '\n Wrapper around :func:`torch.tensordot` or :func:`np.tensordot`\n to operate on real-valued Funsors.\n\n Note this operates only on the ``output`` tensor. To perform sum-product\n contractions on named dimensions, instead use ``+`` and\n :class:`~funsor.terms.Reduce`.\n\n Arguments should satisfy::\n\n len(x.shape) >= dims\n len(y.shape) >= dims\n dims == 0 or x.shape[-dims:] == y.shape[:dims]\n\n :param Funsor x: A left hand argument.\n :param Funsor y: A y hand argument.\n :param int dims: The number of dimension of overlap of output shape.\n :rtype: Funsor\n ' assert (dims >= 0) assert (len(x.shape) >= dims) assert (len(y.shape) >= dims) assert ((dims == 0) or (x.shape[(- dims):] == y.shape[:dims])) (x_start, x_end) = (0, len(x.output.shape)) y_start = (x_end - dims) y_end = (y_start + len(y.output.shape)) symbols = 'abcdefghijklmnopqrstuvwxyz' equation = '{},{}->{}'.format(symbols[x_start:x_end], symbols[y_start:y_end], (symbols[x_start:y_start] + symbols[x_end:y_end])) return Einsum(equation, (x, y))
Wrapper around :func:`torch.tensordot` or :func:`np.tensordot` to operate on real-valued Funsors. Note this operates only on the ``output`` tensor. To perform sum-product contractions on named dimensions, instead use ``+`` and :class:`~funsor.terms.Reduce`. Arguments should satisfy:: len(x.shape) >= dims len(y.shape) >= dims dims == 0 or x.shape[-dims:] == y.shape[:dims] :param Funsor x: A left hand argument. :param Funsor y: A y hand argument. :param int dims: The number of dimension of overlap of output shape. :rtype: Funsor
funsor/tensor.py
tensordot
ordabayevy/funsor
0
python
def tensordot(x, y, dims): '\n Wrapper around :func:`torch.tensordot` or :func:`np.tensordot`\n to operate on real-valued Funsors.\n\n Note this operates only on the ``output`` tensor. To perform sum-product\n contractions on named dimensions, instead use ``+`` and\n :class:`~funsor.terms.Reduce`.\n\n Arguments should satisfy::\n\n len(x.shape) >= dims\n len(y.shape) >= dims\n dims == 0 or x.shape[-dims:] == y.shape[:dims]\n\n :param Funsor x: A left hand argument.\n :param Funsor y: A y hand argument.\n :param int dims: The number of dimension of overlap of output shape.\n :rtype: Funsor\n ' assert (dims >= 0) assert (len(x.shape) >= dims) assert (len(y.shape) >= dims) assert ((dims == 0) or (x.shape[(- dims):] == y.shape[:dims])) (x_start, x_end) = (0, len(x.output.shape)) y_start = (x_end - dims) y_end = (y_start + len(y.output.shape)) symbols = 'abcdefghijklmnopqrstuvwxyz' equation = '{},{}->{}'.format(symbols[x_start:x_end], symbols[y_start:y_end], (symbols[x_start:y_start] + symbols[x_end:y_end])) return Einsum(equation, (x, y))
def tensordot(x, y, dims): '\n Wrapper around :func:`torch.tensordot` or :func:`np.tensordot`\n to operate on real-valued Funsors.\n\n Note this operates only on the ``output`` tensor. To perform sum-product\n contractions on named dimensions, instead use ``+`` and\n :class:`~funsor.terms.Reduce`.\n\n Arguments should satisfy::\n\n len(x.shape) >= dims\n len(y.shape) >= dims\n dims == 0 or x.shape[-dims:] == y.shape[:dims]\n\n :param Funsor x: A left hand argument.\n :param Funsor y: A y hand argument.\n :param int dims: The number of dimension of overlap of output shape.\n :rtype: Funsor\n ' assert (dims >= 0) assert (len(x.shape) >= dims) assert (len(y.shape) >= dims) assert ((dims == 0) or (x.shape[(- dims):] == y.shape[:dims])) (x_start, x_end) = (0, len(x.output.shape)) y_start = (x_end - dims) y_end = (y_start + len(y.output.shape)) symbols = 'abcdefghijklmnopqrstuvwxyz' equation = '{},{}->{}'.format(symbols[x_start:x_end], symbols[y_start:y_end], (symbols[x_start:y_start] + symbols[x_end:y_end])) return Einsum(equation, (x, y))<|docstring|>Wrapper around :func:`torch.tensordot` or :func:`np.tensordot` to operate on real-valued Funsors. Note this operates only on the ``output`` tensor. To perform sum-product contractions on named dimensions, instead use ``+`` and :class:`~funsor.terms.Reduce`. Arguments should satisfy:: len(x.shape) >= dims len(y.shape) >= dims dims == 0 or x.shape[-dims:] == y.shape[:dims] :param Funsor x: A left hand argument. :param Funsor y: A y hand argument. :param int dims: The number of dimension of overlap of output shape. :rtype: Funsor<|endoftext|>
0f12deb34439897988174505f83e9891e25ea783a954544748585ba6b0ef70c6
def stack(parts, dim=0): '\n Wrapper around :func:`torch.stack` or :func:`np.stack` to operate on real-valued Funsors.\n\n Note this operates only on the ``output`` tensor. To stack funsors in a\n new named dim, instead use :class:`~funsor.terms.Stack`.\n\n :param tuple parts: A tuple of funsors.\n :param int dim: A torch dim along which to stack.\n :rtype: Funsor\n ' assert isinstance(dim, int) assert isinstance(parts, tuple) assert (len(set((x.output for x in parts))) == 1) shape = parts[0].output.shape if (dim >= 0): dim = ((dim - len(shape)) - 1) assert (dim < 0) split = ((dim + len(shape)) + 1) shape = ((shape[:split] + (len(parts),)) + shape[split:]) output = Array[(parts[0].dtype, shape)] fn = functools.partial(ops.stack, dim) return Function(fn, output, parts)
Wrapper around :func:`torch.stack` or :func:`np.stack` to operate on real-valued Funsors. Note this operates only on the ``output`` tensor. To stack funsors in a new named dim, instead use :class:`~funsor.terms.Stack`. :param tuple parts: A tuple of funsors. :param int dim: A torch dim along which to stack. :rtype: Funsor
funsor/tensor.py
stack
ordabayevy/funsor
0
python
def stack(parts, dim=0): '\n Wrapper around :func:`torch.stack` or :func:`np.stack` to operate on real-valued Funsors.\n\n Note this operates only on the ``output`` tensor. To stack funsors in a\n new named dim, instead use :class:`~funsor.terms.Stack`.\n\n :param tuple parts: A tuple of funsors.\n :param int dim: A torch dim along which to stack.\n :rtype: Funsor\n ' assert isinstance(dim, int) assert isinstance(parts, tuple) assert (len(set((x.output for x in parts))) == 1) shape = parts[0].output.shape if (dim >= 0): dim = ((dim - len(shape)) - 1) assert (dim < 0) split = ((dim + len(shape)) + 1) shape = ((shape[:split] + (len(parts),)) + shape[split:]) output = Array[(parts[0].dtype, shape)] fn = functools.partial(ops.stack, dim) return Function(fn, output, parts)
def stack(parts, dim=0): '\n Wrapper around :func:`torch.stack` or :func:`np.stack` to operate on real-valued Funsors.\n\n Note this operates only on the ``output`` tensor. To stack funsors in a\n new named dim, instead use :class:`~funsor.terms.Stack`.\n\n :param tuple parts: A tuple of funsors.\n :param int dim: A torch dim along which to stack.\n :rtype: Funsor\n ' assert isinstance(dim, int) assert isinstance(parts, tuple) assert (len(set((x.output for x in parts))) == 1) shape = parts[0].output.shape if (dim >= 0): dim = ((dim - len(shape)) - 1) assert (dim < 0) split = ((dim + len(shape)) + 1) shape = ((shape[:split] + (len(parts),)) + shape[split:]) output = Array[(parts[0].dtype, shape)] fn = functools.partial(ops.stack, dim) return Function(fn, output, parts)<|docstring|>Wrapper around :func:`torch.stack` or :func:`np.stack` to operate on real-valued Funsors. Note this operates only on the ``output`` tensor. To stack funsors in a new named dim, instead use :class:`~funsor.terms.Stack`. :param tuple parts: A tuple of funsors. :param int dim: A torch dim along which to stack. :rtype: Funsor<|endoftext|>
2ae5590055f847de32277eb99c8f3a1bd32c811cd8b1920924b64310b0bf4c46
def new_arange(self, name, *args, **kwargs): '\n Helper to create a named :func:`torch.arange` or :func:`np.arange` funsor.\n In some cases this can be replaced by a symbolic\n :class:`~funsor.terms.Slice` .\n\n :param str name: A variable name.\n :param int start:\n :param int stop:\n :param int step: Three args following :py:class:`slice` semantics.\n :param int dtype: An optional bounded integer type of this slice.\n :rtype: Tensor\n ' start = 0 step = 1 dtype = None if (len(args) == 1): stop = args[0] dtype = kwargs.pop('dtype', stop) elif (len(args) == 2): (start, stop) = args dtype = kwargs.pop('dtype', stop) elif (len(args) == 3): (start, stop, step) = args dtype = kwargs.pop('dtype', stop) elif (len(args) == 4): (start, stop, step, dtype) = args else: raise ValueError if (step <= 0): raise ValueError stop = min(dtype, max(start, stop)) data = ops.new_arange(self.data, start, stop, step) inputs = OrderedDict([(name, Bint[len(data)])]) return Tensor(data, inputs, dtype=dtype)
Helper to create a named :func:`torch.arange` or :func:`np.arange` funsor. In some cases this can be replaced by a symbolic :class:`~funsor.terms.Slice` . :param str name: A variable name. :param int start: :param int stop: :param int step: Three args following :py:class:`slice` semantics. :param int dtype: An optional bounded integer type of this slice. :rtype: Tensor
funsor/tensor.py
new_arange
ordabayevy/funsor
0
python
def new_arange(self, name, *args, **kwargs): '\n Helper to create a named :func:`torch.arange` or :func:`np.arange` funsor.\n In some cases this can be replaced by a symbolic\n :class:`~funsor.terms.Slice` .\n\n :param str name: A variable name.\n :param int start:\n :param int stop:\n :param int step: Three args following :py:class:`slice` semantics.\n :param int dtype: An optional bounded integer type of this slice.\n :rtype: Tensor\n ' start = 0 step = 1 dtype = None if (len(args) == 1): stop = args[0] dtype = kwargs.pop('dtype', stop) elif (len(args) == 2): (start, stop) = args dtype = kwargs.pop('dtype', stop) elif (len(args) == 3): (start, stop, step) = args dtype = kwargs.pop('dtype', stop) elif (len(args) == 4): (start, stop, step, dtype) = args else: raise ValueError if (step <= 0): raise ValueError stop = min(dtype, max(start, stop)) data = ops.new_arange(self.data, start, stop, step) inputs = OrderedDict([(name, Bint[len(data)])]) return Tensor(data, inputs, dtype=dtype)
def new_arange(self, name, *args, **kwargs): '\n Helper to create a named :func:`torch.arange` or :func:`np.arange` funsor.\n In some cases this can be replaced by a symbolic\n :class:`~funsor.terms.Slice` .\n\n :param str name: A variable name.\n :param int start:\n :param int stop:\n :param int step: Three args following :py:class:`slice` semantics.\n :param int dtype: An optional bounded integer type of this slice.\n :rtype: Tensor\n ' start = 0 step = 1 dtype = None if (len(args) == 1): stop = args[0] dtype = kwargs.pop('dtype', stop) elif (len(args) == 2): (start, stop) = args dtype = kwargs.pop('dtype', stop) elif (len(args) == 3): (start, stop, step) = args dtype = kwargs.pop('dtype', stop) elif (len(args) == 4): (start, stop, step, dtype) = args else: raise ValueError if (step <= 0): raise ValueError stop = min(dtype, max(start, stop)) data = ops.new_arange(self.data, start, stop, step) inputs = OrderedDict([(name, Bint[len(data)])]) return Tensor(data, inputs, dtype=dtype)<|docstring|>Helper to create a named :func:`torch.arange` or :func:`np.arange` funsor. In some cases this can be replaced by a symbolic :class:`~funsor.terms.Slice` . :param str name: A variable name. :param int start: :param int stop: :param int step: Three args following :py:class:`slice` semantics. :param int dtype: An optional bounded integer type of this slice. :rtype: Tensor<|endoftext|>
86b77c51f2437b4876d849ce38858f16e60abd6d9db41867de95d43f60e4ae53
def materialize(self, x): '\n Attempt to convert a Funsor to a :class:`~funsor.terms.Number` or\n :class:`Tensor` by substituting :func:`arange` s into its free variables.\n\n :arg Funsor x: A funsor.\n :rtype: Funsor\n ' assert isinstance(x, Funsor) if isinstance(x, (Number, Tensor)): return x subs = [] for (name, domain) in x.inputs.items(): if isinstance(domain.dtype, int): subs.append((name, self.new_arange(name, domain.dtype))) subs = tuple(subs) return substitute(x, subs)
Attempt to convert a Funsor to a :class:`~funsor.terms.Number` or :class:`Tensor` by substituting :func:`arange` s into its free variables. :arg Funsor x: A funsor. :rtype: Funsor
funsor/tensor.py
materialize
ordabayevy/funsor
0
python
def materialize(self, x): '\n Attempt to convert a Funsor to a :class:`~funsor.terms.Number` or\n :class:`Tensor` by substituting :func:`arange` s into its free variables.\n\n :arg Funsor x: A funsor.\n :rtype: Funsor\n ' assert isinstance(x, Funsor) if isinstance(x, (Number, Tensor)): return x subs = [] for (name, domain) in x.inputs.items(): if isinstance(domain.dtype, int): subs.append((name, self.new_arange(name, domain.dtype))) subs = tuple(subs) return substitute(x, subs)
def materialize(self, x): '\n Attempt to convert a Funsor to a :class:`~funsor.terms.Number` or\n :class:`Tensor` by substituting :func:`arange` s into its free variables.\n\n :arg Funsor x: A funsor.\n :rtype: Funsor\n ' assert isinstance(x, Funsor) if isinstance(x, (Number, Tensor)): return x subs = [] for (name, domain) in x.inputs.items(): if isinstance(domain.dtype, int): subs.append((name, self.new_arange(name, domain.dtype))) subs = tuple(subs) return substitute(x, subs)<|docstring|>Attempt to convert a Funsor to a :class:`~funsor.terms.Number` or :class:`Tensor` by substituting :func:`arange` s into its free variables. :arg Funsor x: A funsor. :rtype: Funsor<|endoftext|>
f7306d372926930a05ba52fd9995ff1759f8d3635d92d8864b4fa9f03b554838
def test_smoke(self): 'Smoke test for KeyUtil.' with UnitTestContext() as context: self.assertTrue((KeyUtil.remove_prefix('MyType=A;B;C', 'MyType') == 'A;B;C')) with self.assertRaises(Exception): KeyUtil.remove_prefix('MyType=A;B;C', 'OtherType') self.assertTrue((KeyUtil.get_token('MyType=A;B;C', 'MyType', 3, 1) == 'B')) with self.assertRaises(Exception): KeyUtil.get_token('MyType=A;B;C', 'OtherType', 3, 1) with self.assertRaises(Exception): KeyUtil.get_token('MyType=A;B', 'MyType', 3, 1) with self.assertRaises(Exception): KeyUtil.get_token('MyType=A;B;C', 'MyType', 3, 3)
Smoke test for KeyUtil.
py/datacentric/test/storage/test_key_util.py
test_smoke
datacentricorg/datacentric-py
1
python
def test_smoke(self): with UnitTestContext() as context: self.assertTrue((KeyUtil.remove_prefix('MyType=A;B;C', 'MyType') == 'A;B;C')) with self.assertRaises(Exception): KeyUtil.remove_prefix('MyType=A;B;C', 'OtherType') self.assertTrue((KeyUtil.get_token('MyType=A;B;C', 'MyType', 3, 1) == 'B')) with self.assertRaises(Exception): KeyUtil.get_token('MyType=A;B;C', 'OtherType', 3, 1) with self.assertRaises(Exception): KeyUtil.get_token('MyType=A;B', 'MyType', 3, 1) with self.assertRaises(Exception): KeyUtil.get_token('MyType=A;B;C', 'MyType', 3, 3)
def test_smoke(self): with UnitTestContext() as context: self.assertTrue((KeyUtil.remove_prefix('MyType=A;B;C', 'MyType') == 'A;B;C')) with self.assertRaises(Exception): KeyUtil.remove_prefix('MyType=A;B;C', 'OtherType') self.assertTrue((KeyUtil.get_token('MyType=A;B;C', 'MyType', 3, 1) == 'B')) with self.assertRaises(Exception): KeyUtil.get_token('MyType=A;B;C', 'OtherType', 3, 1) with self.assertRaises(Exception): KeyUtil.get_token('MyType=A;B', 'MyType', 3, 1) with self.assertRaises(Exception): KeyUtil.get_token('MyType=A;B;C', 'MyType', 3, 3)<|docstring|>Smoke test for KeyUtil.<|endoftext|>
b3af549fe04c108dd5714ecd7127742b8617e48b59bee2539209fd4616f93309
def test_vacuum_model_vacuums_test_model(vacuum: vacuum.Vacuum, test_project_name, test_model, test_unused_explores): 'vacuum.models() should return unused explores in a used model.' result1 = vacuum.models(project=test_project_name, model=test_model['name']) result2 = vacuum.models(model=test_model['name']) assert (result1 == result2) assert isinstance(result1, list) assert (len(result1) == 1) result = result1[0] assert (result['Model'] == test_model['name']) assert (result['Unused Explores'] == '\n'.join(test_unused_explores)) assert (result['Model Query Count'] > 0)
vacuum.models() should return unused explores in a used model.
tests/test_vacuum.py
test_vacuum_model_vacuums_test_model
iamaziz/henry
42
python
def test_vacuum_model_vacuums_test_model(vacuum: vacuum.Vacuum, test_project_name, test_model, test_unused_explores): result1 = vacuum.models(project=test_project_name, model=test_model['name']) result2 = vacuum.models(model=test_model['name']) assert (result1 == result2) assert isinstance(result1, list) assert (len(result1) == 1) result = result1[0] assert (result['Model'] == test_model['name']) assert (result['Unused Explores'] == '\n'.join(test_unused_explores)) assert (result['Model Query Count'] > 0)
def test_vacuum_model_vacuums_test_model(vacuum: vacuum.Vacuum, test_project_name, test_model, test_unused_explores): result1 = vacuum.models(project=test_project_name, model=test_model['name']) result2 = vacuum.models(model=test_model['name']) assert (result1 == result2) assert isinstance(result1, list) assert (len(result1) == 1) result = result1[0] assert (result['Model'] == test_model['name']) assert (result['Unused Explores'] == '\n'.join(test_unused_explores)) assert (result['Model Query Count'] > 0)<|docstring|>vacuum.models() should return unused explores in a used model.<|endoftext|>
85b35a73abbb6d0dbec040a9235e28212fa20bd5173e969adc08a19ae2ded3da
def test_vacuum_models_vacuums_unused_test_model(vacuum: vacuum.Vacuum, test_project_name, test_unused_model, test_unused_model_explore_names): 'vacuum.models() should return all explores in unused models.' result = vacuum.models(model=test_unused_model['name']) assert isinstance(result, list) assert (len(result) == 1) result = result[0] assert (result['Model'] == test_unused_model['name']) assert (result['Unused Explores'] == '\n'.join(test_unused_model_explore_names)) assert (result['Model Query Count'] == 0)
vacuum.models() should return all explores in unused models.
tests/test_vacuum.py
test_vacuum_models_vacuums_unused_test_model
iamaziz/henry
42
python
def test_vacuum_models_vacuums_unused_test_model(vacuum: vacuum.Vacuum, test_project_name, test_unused_model, test_unused_model_explore_names): result = vacuum.models(model=test_unused_model['name']) assert isinstance(result, list) assert (len(result) == 1) result = result[0] assert (result['Model'] == test_unused_model['name']) assert (result['Unused Explores'] == '\n'.join(test_unused_model_explore_names)) assert (result['Model Query Count'] == 0)
def test_vacuum_models_vacuums_unused_test_model(vacuum: vacuum.Vacuum, test_project_name, test_unused_model, test_unused_model_explore_names): result = vacuum.models(model=test_unused_model['name']) assert isinstance(result, list) assert (len(result) == 1) result = result[0] assert (result['Model'] == test_unused_model['name']) assert (result['Unused Explores'] == '\n'.join(test_unused_model_explore_names)) assert (result['Model Query Count'] == 0)<|docstring|>vacuum.models() should return all explores in unused models.<|endoftext|>
a1961aa2ad10194791f065c40d84d8e841dc81206438f54187a66337c519124c
@pytest.mark.parametrize('project, model, msg', [('BadProject', 'henry_qa', 'error occured while getting projects.'), ('henry', 'BadModel', 'error occured while getting models.'), ('BadProject', 'BadModel', 'error occured while getting projects.')]) def test_vacuum_models_throws_for_bad_filters(vacuum: vacuum.Vacuum, project, model, msg): 'vacuum.models() should error for bad project/model filter values.' with pytest.raises(exceptions.NotFoundError) as exc: vacuum.models(project=project, model=model) assert (msg in str(exc.value))
vacuum.models() should error for bad project/model filter values.
tests/test_vacuum.py
test_vacuum_models_throws_for_bad_filters
iamaziz/henry
42
python
@pytest.mark.parametrize('project, model, msg', [('BadProject', 'henry_qa', 'error occured while getting projects.'), ('henry', 'BadModel', 'error occured while getting models.'), ('BadProject', 'BadModel', 'error occured while getting projects.')]) def test_vacuum_models_throws_for_bad_filters(vacuum: vacuum.Vacuum, project, model, msg): with pytest.raises(exceptions.NotFoundError) as exc: vacuum.models(project=project, model=model) assert (msg in str(exc.value))
@pytest.mark.parametrize('project, model, msg', [('BadProject', 'henry_qa', 'error occured while getting projects.'), ('henry', 'BadModel', 'error occured while getting models.'), ('BadProject', 'BadModel', 'error occured while getting projects.')]) def test_vacuum_models_throws_for_bad_filters(vacuum: vacuum.Vacuum, project, model, msg): with pytest.raises(exceptions.NotFoundError) as exc: vacuum.models(project=project, model=model) assert (msg in str(exc.value))<|docstring|>vacuum.models() should error for bad project/model filter values.<|endoftext|>
80d9e1b7a57e3bd3a410643fd0eb7a56756eee29781500c320034b1837b74cd5
def test_vacuum_explores_filters(vacuum: vacuum.Vacuum, test_project_name, test_model, test_explores_stats): 'vacuum.explores() should be able to filter models and explores.' result = vacuum.explores(model=test_model['name']) assert isinstance(result, list) assert (len(result) == len(test_explores_stats)) assert all(((r['Model'] == test_model['name']) for r in result)) test_explore_names = [e['name'] for e in test_explores_stats] assert all(((r['Explore'] in test_explore_names) for r in result)) test_explore = test_explores_stats[0] result = vacuum.explores(model=test_model['name'], explore=test_explore['name']) assert isinstance(result, list) assert (len(result) == 1) result = result[0] assert (result['Model'] == test_model['name']) assert (result['Explore'] == test_explore['name']) assert (result['Unused Joins'] == test_explore['unused_joins']) assert (result['Unused Fields'] == test_explore['unused_fields'])
vacuum.explores() should be able to filter models and explores.
tests/test_vacuum.py
test_vacuum_explores_filters
iamaziz/henry
42
python
def test_vacuum_explores_filters(vacuum: vacuum.Vacuum, test_project_name, test_model, test_explores_stats): result = vacuum.explores(model=test_model['name']) assert isinstance(result, list) assert (len(result) == len(test_explores_stats)) assert all(((r['Model'] == test_model['name']) for r in result)) test_explore_names = [e['name'] for e in test_explores_stats] assert all(((r['Explore'] in test_explore_names) for r in result)) test_explore = test_explores_stats[0] result = vacuum.explores(model=test_model['name'], explore=test_explore['name']) assert isinstance(result, list) assert (len(result) == 1) result = result[0] assert (result['Model'] == test_model['name']) assert (result['Explore'] == test_explore['name']) assert (result['Unused Joins'] == test_explore['unused_joins']) assert (result['Unused Fields'] == test_explore['unused_fields'])
def test_vacuum_explores_filters(vacuum: vacuum.Vacuum, test_project_name, test_model, test_explores_stats): result = vacuum.explores(model=test_model['name']) assert isinstance(result, list) assert (len(result) == len(test_explores_stats)) assert all(((r['Model'] == test_model['name']) for r in result)) test_explore_names = [e['name'] for e in test_explores_stats] assert all(((r['Explore'] in test_explore_names) for r in result)) test_explore = test_explores_stats[0] result = vacuum.explores(model=test_model['name'], explore=test_explore['name']) assert isinstance(result, list) assert (len(result) == 1) result = result[0] assert (result['Model'] == test_model['name']) assert (result['Explore'] == test_explore['name']) assert (result['Unused Joins'] == test_explore['unused_joins']) assert (result['Unused Fields'] == test_explore['unused_fields'])<|docstring|>vacuum.explores() should be able to filter models and explores.<|endoftext|>
06994d0c0a053422110f111960b9e1ca9685bb73fdc4b12c438ac953cefc7167
@pytest.mark.parametrize('test_explore', ['explore_2_joins_all_used', 'explore_2_joins_1_used', 'unused_explore_2_joins', 'unused_explore_no_joins']) def test_vacuum_explores_vacuums(vacuum: vacuum.Vacuum, test_model, test_explore, test_explores_stats): 'vacuum.explores() should return the unused joins and fields for a given\n explore.\n ' result = vacuum.explores(model=test_model['name'], explore=test_explore) assert isinstance(result, list) assert (len(result) == 1) result = result[0] test_explore_stats = list(filter((lambda e: (e['name'] == test_explore)), test_explores_stats))[0] assert (result['Model'] == test_model['name']) assert (result['Explore'] == test_explore) assert (result['Unused Joins'] == '\n'.join(test_explore_stats['unused_joins'])) assert (result['Unused Fields'] == '\n'.join(test_explore_stats['unused_fields']))
vacuum.explores() should return the unused joins and fields for a given explore.
tests/test_vacuum.py
test_vacuum_explores_vacuums
iamaziz/henry
42
python
@pytest.mark.parametrize('test_explore', ['explore_2_joins_all_used', 'explore_2_joins_1_used', 'unused_explore_2_joins', 'unused_explore_no_joins']) def test_vacuum_explores_vacuums(vacuum: vacuum.Vacuum, test_model, test_explore, test_explores_stats): 'vacuum.explores() should return the unused joins and fields for a given\n explore.\n ' result = vacuum.explores(model=test_model['name'], explore=test_explore) assert isinstance(result, list) assert (len(result) == 1) result = result[0] test_explore_stats = list(filter((lambda e: (e['name'] == test_explore)), test_explores_stats))[0] assert (result['Model'] == test_model['name']) assert (result['Explore'] == test_explore) assert (result['Unused Joins'] == '\n'.join(test_explore_stats['unused_joins'])) assert (result['Unused Fields'] == '\n'.join(test_explore_stats['unused_fields']))
@pytest.mark.parametrize('test_explore', ['explore_2_joins_all_used', 'explore_2_joins_1_used', 'unused_explore_2_joins', 'unused_explore_no_joins']) def test_vacuum_explores_vacuums(vacuum: vacuum.Vacuum, test_model, test_explore, test_explores_stats): 'vacuum.explores() should return the unused joins and fields for a given\n explore.\n ' result = vacuum.explores(model=test_model['name'], explore=test_explore) assert isinstance(result, list) assert (len(result) == 1) result = result[0] test_explore_stats = list(filter((lambda e: (e['name'] == test_explore)), test_explores_stats))[0] assert (result['Model'] == test_model['name']) assert (result['Explore'] == test_explore) assert (result['Unused Joins'] == '\n'.join(test_explore_stats['unused_joins'])) assert (result['Unused Fields'] == '\n'.join(test_explore_stats['unused_fields']))<|docstring|>vacuum.explores() should return the unused joins and fields for a given explore.<|endoftext|>
09241af2747d827cbac12ffde7ba8d3e3c9ea5faea0c209153bd109365ae3610
@pytest.mark.parametrize('model, explore, msg', [('BadModel', None, 'error occured while getting models.'), ('BadModel', 'explore_2_joins_used', 'error occured while getting models/explores.'), ('BadModel', 'BadExplore', 'error occured while getting models/explores'), ('henry_qa', 'BadExplore', 'error occured while getting models/explores')]) def test_vacuum_explores_throws_for_bad_filters(vacuum: vacuum.Vacuum, model, explore, msg): 'vacuum.explores() should error for bad model/explore filter values.' with pytest.raises(exceptions.NotFoundError) as exc: vacuum.explores(model=model, explore=explore) assert (msg in str(exc.value))
vacuum.explores() should error for bad model/explore filter values.
tests/test_vacuum.py
test_vacuum_explores_throws_for_bad_filters
iamaziz/henry
42
python
@pytest.mark.parametrize('model, explore, msg', [('BadModel', None, 'error occured while getting models.'), ('BadModel', 'explore_2_joins_used', 'error occured while getting models/explores.'), ('BadModel', 'BadExplore', 'error occured while getting models/explores'), ('henry_qa', 'BadExplore', 'error occured while getting models/explores')]) def test_vacuum_explores_throws_for_bad_filters(vacuum: vacuum.Vacuum, model, explore, msg): with pytest.raises(exceptions.NotFoundError) as exc: vacuum.explores(model=model, explore=explore) assert (msg in str(exc.value))
@pytest.mark.parametrize('model, explore, msg', [('BadModel', None, 'error occured while getting models.'), ('BadModel', 'explore_2_joins_used', 'error occured while getting models/explores.'), ('BadModel', 'BadExplore', 'error occured while getting models/explores'), ('henry_qa', 'BadExplore', 'error occured while getting models/explores')]) def test_vacuum_explores_throws_for_bad_filters(vacuum: vacuum.Vacuum, model, explore, msg): with pytest.raises(exceptions.NotFoundError) as exc: vacuum.explores(model=model, explore=explore) assert (msg in str(exc.value))<|docstring|>vacuum.explores() should error for bad model/explore filter values.<|endoftext|>
d65e98b664618585fe244086a439c020d436f21261e55339013b05a1fcd9e238
def sort_items(self): 'Organises each item according to category' for item in self.items: if ('Aged Brie' in item.name): self.aged_brie.append(item) self.organised_items['Aged Brie'] = self.aged_brie elif ('Sulfuras' in item.name): self.sulfuras.append(item) self.organised_items['Sulfuras'] = self.sulfuras elif ('Backstage' in item.name): self.backstage.append(item) self.organised_items['Backstage'] = self.backstage elif ('Conjured' in item.name): self.conjured.append(item) self.organised_items['Conjured'] = self.conjured elif (('Aged Brie' not in item.name) and ('Sulfuras' not in item.name) and ('Backstage' not in item.name) and ('Conjured' not in item.name)): self.general_items.append(item) self.organised_items['General Items'] = self.general_items return self.organised_items
Organises each item according to category
python/gilded_rose.py
sort_items
Mayo-Theodore/GildedRose-Refactoring-Kata
0
python
def sort_items(self): for item in self.items: if ('Aged Brie' in item.name): self.aged_brie.append(item) self.organised_items['Aged Brie'] = self.aged_brie elif ('Sulfuras' in item.name): self.sulfuras.append(item) self.organised_items['Sulfuras'] = self.sulfuras elif ('Backstage' in item.name): self.backstage.append(item) self.organised_items['Backstage'] = self.backstage elif ('Conjured' in item.name): self.conjured.append(item) self.organised_items['Conjured'] = self.conjured elif (('Aged Brie' not in item.name) and ('Sulfuras' not in item.name) and ('Backstage' not in item.name) and ('Conjured' not in item.name)): self.general_items.append(item) self.organised_items['General Items'] = self.general_items return self.organised_items
def sort_items(self): for item in self.items: if ('Aged Brie' in item.name): self.aged_brie.append(item) self.organised_items['Aged Brie'] = self.aged_brie elif ('Sulfuras' in item.name): self.sulfuras.append(item) self.organised_items['Sulfuras'] = self.sulfuras elif ('Backstage' in item.name): self.backstage.append(item) self.organised_items['Backstage'] = self.backstage elif ('Conjured' in item.name): self.conjured.append(item) self.organised_items['Conjured'] = self.conjured elif (('Aged Brie' not in item.name) and ('Sulfuras' not in item.name) and ('Backstage' not in item.name) and ('Conjured' not in item.name)): self.general_items.append(item) self.organised_items['General Items'] = self.general_items return self.organised_items<|docstring|>Organises each item according to category<|endoftext|>
89e49207fac292a50d8f506ef9249a8cd4c8b965aad6bd7a7d8408395def8d72
def list_items(self): 'Displays available items in gilded rose' return self.organised_items
Displays available items in gilded rose
python/gilded_rose.py
list_items
Mayo-Theodore/GildedRose-Refactoring-Kata
0
python
def list_items(self): return self.organised_items
def list_items(self): return self.organised_items<|docstring|>Displays available items in gilded rose<|endoftext|>
7cf4a70407951379fd9407723d1b6a80884e54d67f5ece9d2461b067bdb56a7c
def update_sell_in(self): 'Update sell in value for all items' for (key, values) in self.organised_items.items(): for item in values: if ('Sulfuras' in item.name): item.sell_in = None else: item.sell_in -= 1 return self.organised_items
Update sell in value for all items
python/gilded_rose.py
update_sell_in
Mayo-Theodore/GildedRose-Refactoring-Kata
0
python
def update_sell_in(self): for (key, values) in self.organised_items.items(): for item in values: if ('Sulfuras' in item.name): item.sell_in = None else: item.sell_in -= 1 return self.organised_items
def update_sell_in(self): for (key, values) in self.organised_items.items(): for item in values: if ('Sulfuras' in item.name): item.sell_in = None else: item.sell_in -= 1 return self.organised_items<|docstring|>Update sell in value for all items<|endoftext|>