content
stringlengths
22
815k
id
int64
0
4.91M
def setup_page(choice, pagepanel, frame): """ Creates a :class:`Page` inside a :class:`Notebook`. :Args: - choice (tuple) A tuple of (name, module path, module alias) - pagepanel """ if isinstance(choice.module, str): try: __import__(choice.module) except ImportError as e: wx.MessageBox('%s' % e, 'Info', wx.OK | wx.ICON_ERROR) return False else: class_aliases, class_obj = _get_classes(sys.modules[choice.module], class_order=choice.order) else: class_aliases, class_obj = _get_classes(choice.module, class_order=choice.order) nb = wx.Notebook(pagepanel) for class_alias, class_obj in class_aliases: nb.AddPage(Page(nb, class_obj, choice.alias, class_alias, frame, pagepanel), class_alias) panelsizer = wx.BoxSizer() panelsizer.Add(nb, 1, wx.EXPAND|wx.ALL) pagepanel.SetSizer(panelsizer) pagepanel.Layout() pagepanel.Fit() return True
5,354,200
def customization_data(client=None): """Produce any customization definitions (types, fields, message destinations, etc) that should be installed by `resilient-circuits customize` """ # This import data contains: # Function inputs: # artifact_id # artifact_type # artifact_value # incident_id # DataTables: # bluecoat_categorization_results # Message Destinations: # bluecoat_site_review # Functions: # bluecoat_site_review_lookup # Workflows: # bluecoat_site_review_search yield ImportDefinition(u""" eyJ0YXNrX29yZGVyIjogW10sICJ3b3JrZmxvd3MiOiBbeyJ1dWlkIjogImVlOTE2NGRlLTBmN2Ut NGZkOC1iNjVmLWUxMTBlODRjZTFjOSIsICJkZXNjcmlwdGlvbiI6ICJUaGlzIHdvcmtmbG93IGRl bW9uc3RyYXRlcyB0aGUgQmx1ZWNvYXQgU2l0ZSBSZXZpZXcgbG9vayB1cCBmdW5jdGlvbiBhbmQg cHVsbHMgYmFjayByZXN1bHRzLiIsICJvYmplY3RfdHlwZSI6ICJhcnRpZmFjdCIsICJleHBvcnRf a2V5IjogImJsdWVjb2F0X3NpdGVfcmV2aWV3X3NlYXJjaCIsICJ3b3JrZmxvd19pZCI6IDE0NCwg Imxhc3RfbW9kaWZpZWRfYnkiOiAiZ2VyYWxkLnRyb3RtYW5AaWJtLmNvbSIsICJjb250ZW50Ijog eyJ4bWwiOiAiPD94bWwgdmVyc2lvbj1cIjEuMFwiIGVuY29kaW5nPVwiVVRGLThcIj8+PGRlZmlu aXRpb25zIHhtbG5zPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMvQlBNTi8yMDEwMDUyNC9NT0RF TFwiIHhtbG5zOmJwbW5kaT1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0JQTU4vMjAxMDA1MjQv RElcIiB4bWxuczpvbWdkYz1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0RELzIwMTAwNTI0L0RD XCIgeG1sbnM6b21nZGk9XCJodHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9ERC8yMDEwMDUyNC9ESVwi IHhtbG5zOnJlc2lsaWVudD1cImh0dHA6Ly9yZXNpbGllbnQuaWJtLmNvbS9icG1uXCIgeG1sbnM6 eHNkPVwiaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWFcIiB4bWxuczp4c2k9XCJodHRw Oi8vd3d3LnczLm9yZy8yMDAxL1hNTFNjaGVtYS1pbnN0YW5jZVwiIHRhcmdldE5hbWVzcGFjZT1c Imh0dHA6Ly93d3cuY2FtdW5kYS5vcmcvdGVzdFwiPjxwcm9jZXNzIGlkPVwiYmx1ZWNvYXRfc2l0 ZV9yZXZpZXdfc2VhcmNoXCIgaXNFeGVjdXRhYmxlPVwidHJ1ZVwiIG5hbWU9XCJCbHVlY29hdCBT aXRlIFJldmlldyBTZWFyY2hcIj48ZG9jdW1lbnRhdGlvbj5UaGlzIHdvcmtmbG93IGRlbW9uc3Ry YXRlcyB0aGUgQmx1ZWNvYXQgU2l0ZSBSZXZpZXcgbG9vayB1cCBmdW5jdGlvbiBhbmQgcHVsbHMg YmFjayByZXN1bHRzLjwvZG9jdW1lbnRhdGlvbj48c3RhcnRFdmVudCBpZD1cIlN0YXJ0RXZlbnRf MTU1YXN4bVwiPjxvdXRnb2luZz5TZXF1ZW5jZUZsb3dfMHVueGwydDwvb3V0Z29pbmc+PC9zdGFy dEV2ZW50PjxzZXJ2aWNlVGFzayBpZD1cIlNlcnZpY2VUYXNrXzFpYmVmdjdcIiBuYW1lPVwiQmx1 ZWNvYXQgU2l0ZSBSZXZpZXcgTG9va3VwXCIgcmVzaWxpZW50OnR5cGU9XCJmdW5jdGlvblwiPjxl eHRlbnNpb25FbGVtZW50cz48cmVzaWxpZW50OmZ1bmN0aW9uIHV1aWQ9XCIyYjI2OTMyYS02MGRm LTQwNDUtYWU4ZC0wZWIzMTlkZTYwMWRcIj57XCJpbnB1dHNcIjp7fSxcInByZV9wcm9jZXNzaW5n X3NjcmlwdFwiOlwiaW5wdXRzLmluY2lkZW50X2lkID0gaW5jaWRlbnQuaWRcXG5pbnB1dHMuYXJ0 aWZhY3RfdmFsdWUgPSBhcnRpZmFjdC52YWx1ZVxcblwifTwvcmVzaWxpZW50OmZ1bmN0aW9uPjwv ZXh0ZW5zaW9uRWxlbWVudHM+PGluY29taW5nPlNlcXVlbmNlRmxvd18wdW54bDJ0PC9pbmNvbWlu Zz48b3V0Z29pbmc+U2VxdWVuY2VGbG93XzFncnNjb3Q8L291dGdvaW5nPjwvc2VydmljZVRhc2s+ PHNlcXVlbmNlRmxvdyBpZD1cIlNlcXVlbmNlRmxvd18wdW54bDJ0XCIgc291cmNlUmVmPVwiU3Rh cnRFdmVudF8xNTVhc3htXCIgdGFyZ2V0UmVmPVwiU2VydmljZVRhc2tfMWliZWZ2N1wiLz48ZW5k RXZlbnQgaWQ9XCJFbmRFdmVudF8wN3FqOXgzXCI+PGluY29taW5nPlNlcXVlbmNlRmxvd18xZ3Jz Y290PC9pbmNvbWluZz48L2VuZEV2ZW50PjxzZXF1ZW5jZUZsb3cgaWQ9XCJTZXF1ZW5jZUZsb3df MWdyc2NvdFwiIHNvdXJjZVJlZj1cIlNlcnZpY2VUYXNrXzFpYmVmdjdcIiB0YXJnZXRSZWY9XCJF bmRFdmVudF8wN3FqOXgzXCIvPjx0ZXh0QW5ub3RhdGlvbiBpZD1cIlRleHRBbm5vdGF0aW9uXzFr eHhpeXRcIj48dGV4dD5TdGFydCB5b3VyIHdvcmtmbG93IGhlcmU8L3RleHQ+PC90ZXh0QW5ub3Rh dGlvbj48YXNzb2NpYXRpb24gaWQ9XCJBc3NvY2lhdGlvbl8xc2V1ajQ4XCIgc291cmNlUmVmPVwi U3RhcnRFdmVudF8xNTVhc3htXCIgdGFyZ2V0UmVmPVwiVGV4dEFubm90YXRpb25fMWt4eGl5dFwi Lz48L3Byb2Nlc3M+PGJwbW5kaTpCUE1ORGlhZ3JhbSBpZD1cIkJQTU5EaWFncmFtXzFcIj48YnBt bmRpOkJQTU5QbGFuZSBicG1uRWxlbWVudD1cInVuZGVmaW5lZFwiIGlkPVwiQlBNTlBsYW5lXzFc Ij48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIGlk PVwiU3RhcnRFdmVudF8xNTVhc3htX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIzNlwiIHdp ZHRoPVwiMzZcIiB4PVwiMTYyXCIgeT1cIjE4OFwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6 Qm91bmRzIGhlaWdodD1cIjBcIiB3aWR0aD1cIjkwXCIgeD1cIjE1N1wiIHk9XCIyMjNcIi8+PC9i cG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5TaGFwZSBicG1u RWxlbWVudD1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRcIiBpZD1cIlRleHRBbm5vdGF0aW9uXzFr eHhpeXRfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjMwXCIgd2lkdGg9XCIxMDBcIiB4PVwi OTlcIiB5PVwiMjU0XCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5F bGVtZW50PVwiQXNzb2NpYXRpb25fMXNldWo0OFwiIGlkPVwiQXNzb2NpYXRpb25fMXNldWo0OF9k aVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiMTY5XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9 XCIyMjBcIi8+PG9tZ2RpOndheXBvaW50IHg9XCIxNTNcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50 XCIgeT1cIjI1NFwiLz48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxl bWVudD1cIlNlcnZpY2VUYXNrXzFpYmVmdjdcIiBpZD1cIlNlcnZpY2VUYXNrXzFpYmVmdjdfZGlc Ij48b21nZGM6Qm91bmRzIGhlaWdodD1cIjgwXCIgd2lkdGg9XCIxMDBcIiB4PVwiMzU4XCIgeT1c IjE2NlwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1c IlNlcXVlbmNlRmxvd18wdW54bDJ0XCIgaWQ9XCJTZXF1ZW5jZUZsb3dfMHVueGwydF9kaVwiPjxv bWdkaTp3YXlwb2ludCB4PVwiMTk4XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZc Ii8+PG9tZ2RpOndheXBvaW50IHg9XCIzNThcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1c IjIwNlwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lk dGg9XCIwXCIgeD1cIjI3OFwiIHk9XCIxODRcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRp OkJQTU5FZGdlPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiRW5kRXZlbnRfMDdxajl4 M1wiIGlkPVwiRW5kRXZlbnRfMDdxajl4M19kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMzZc IiB3aWR0aD1cIjM2XCIgeD1cIjYxMlwiIHk9XCIxODhcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9t Z2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCI2MzBcIiB5PVwiMjI3XCIv PjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBi cG1uRWxlbWVudD1cIlNlcXVlbmNlRmxvd18xZ3JzY290XCIgaWQ9XCJTZXF1ZW5jZUZsb3dfMWdy c2NvdF9kaVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiNDU4XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2lu dFwiIHk9XCIyMDZcIi8+PG9tZ2RpOndheXBvaW50IHg9XCI2MTJcIiB4c2k6dHlwZT1cIm9tZ2Rj OlBvaW50XCIgeT1cIjIwNlwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdo dD1cIjEzXCIgd2lkdGg9XCIwXCIgeD1cIjUzNVwiIHk9XCIxODRcIi8+PC9icG1uZGk6QlBNTkxh YmVsPjwvYnBtbmRpOkJQTU5FZGdlPjwvYnBtbmRpOkJQTU5QbGFuZT48L2JwbW5kaTpCUE1ORGlh Z3JhbT48L2RlZmluaXRpb25zPiIsICJ3b3JrZmxvd19pZCI6ICJibHVlY29hdF9zaXRlX3Jldmll d19zZWFyY2giLCAidmVyc2lvbiI6IDIxfSwgImxhc3RfbW9kaWZpZWRfdGltZSI6IDE1NDMzNDc3 NDM2MTgsICJjcmVhdG9yX2lkIjogImdlcmFsZC50cm90bWFuQGlibS5jb20iLCAiYWN0aW9ucyI6 IFtdLCAicHJvZ3JhbW1hdGljX25hbWUiOiAiYmx1ZWNvYXRfc2l0ZV9yZXZpZXdfc2VhcmNoIiwg Im5hbWUiOiAiQmx1ZWNvYXQgU2l0ZSBSZXZpZXcgU2VhcmNoIn1dLCAiYWN0aW9ucyI6IFtdLCAi bGF5b3V0cyI6IFtdLCAiZXhwb3J0X2Zvcm1hdF92ZXJzaW9uIjogMiwgImlkIjogMzcsICJpbmR1 c3RyaWVzIjogbnVsbCwgInBoYXNlcyI6IFtdLCAiYWN0aW9uX29yZGVyIjogW10sICJnZW9zIjog bnVsbCwgImxvY2FsZSI6IG51bGwsICJzZXJ2ZXJfdmVyc2lvbiI6IHsibWFqb3IiOiAzMSwgInZl cnNpb24iOiAiMzEuMC40MjU0IiwgImJ1aWxkX251bWJlciI6IDQyNTQsICJtaW5vciI6IDB9LCAi dGltZWZyYW1lcyI6IG51bGwsICJ3b3Jrc3BhY2VzIjogW10sICJhdXRvbWF0aWNfdGFza3MiOiBb XSwgImZ1bmN0aW9ucyI6IFt7ImRpc3BsYXlfbmFtZSI6ICJCbHVlY29hdCBTaXRlIFJldmlldyBM b29rdXAiLCAiZGVzY3JpcHRpb24iOiB7ImNvbnRlbnQiOiAiVGhpcyBmdW5jdGlvbiB0YWtlcyBh biBhcnRpZmFjdCBvZiB0eXBlIFVSTCBvciBETlMgbmFtZSBhbmQgcmV0dXJucyB0aG9zZSByZXN1 bHRzIGFzIGEganNvbiBvYmplY3QuIiwgImZvcm1hdCI6ICJ0ZXh0In0sICJjcmVhdG9yIjogeyJk aXNwbGF5X25hbWUiOiAiR2VyYWxkIFRyb3RtYW4iLCAidHlwZSI6ICJ1c2VyIiwgImlkIjogNCwg Im5hbWUiOiAiZ2VyYWxkLnRyb3RtYW5AaWJtLmNvbSJ9LCAidmlld19pdGVtcyI6IFt7InNob3df aWYiOiBudWxsLCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0aW9uIiwgInNob3dfbGlua19oZWFkZXIi OiBmYWxzZSwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJjb250ZW50IjogImVhZDIxNGMyLTEz ZmUtNDNmNi1hM2M3LTY3NmE4ODMzOGRiYiIsICJzdGVwX2xhYmVsIjogbnVsbH0sIHsic2hvd19p ZiI6IG51bGwsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAic2hvd19saW5rX2hlYWRlciI6 IGZhbHNlLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImNvbnRlbnQiOiAiNWU5NGIxZDMtNDhh MS00MThhLTljZTMtNDQzM2M2ODg5ZTE0IiwgInN0ZXBfbGFiZWwiOiBudWxsfSwgeyJzaG93X2lm IjogbnVsbCwgImZpZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIsICJzaG93X2xpbmtfaGVhZGVyIjog ZmFsc2UsICJlbGVtZW50IjogImZpZWxkX3V1aWQiLCAiY29udGVudCI6ICIzYTJlMzQ3Yi02NzJl LTQyNjMtODc4Ny1hM2U5ZWJhNGFjOTEiLCAic3RlcF9sYWJlbCI6IG51bGx9LCB7InNob3dfaWYi OiBudWxsLCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0aW9uIiwgInNob3dfbGlua19oZWFkZXIiOiBm YWxzZSwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJjb250ZW50IjogIjliYTQ5ODg3LTBkY2Yt NDBjZS1hNWVhLTljMGM0M2Y4MzFiZiIsICJzdGVwX2xhYmVsIjogbnVsbH1dLCAiZXhwb3J0X2tl eSI6ICJibHVlY29hdF9zaXRlX3Jldmlld19sb29rdXAiLCAidXVpZCI6ICIyYjI2OTMyYS02MGRm LTQwNDUtYWU4ZC0wZWIzMTlkZTYwMWQiLCAibGFzdF9tb2RpZmllZF9ieSI6IHsiZGlzcGxheV9u YW1lIjogIkdlcmFsZCBUcm90bWFuIiwgInR5cGUiOiAidXNlciIsICJpZCI6IDQsICJuYW1lIjog ImdlcmFsZC50cm90bWFuQGlibS5jb20ifSwgInZlcnNpb24iOiAyMCwgIndvcmtmbG93cyI6IFt7 ImRlc2NyaXB0aW9uIjogbnVsbCwgIm9iamVjdF90eXBlIjogImFydGlmYWN0IiwgImFjdGlvbnMi OiBbXSwgIm5hbWUiOiAiQmx1ZWNvYXQgU2l0ZSBSZXZpZXcgU2VhcmNoIiwgIndvcmtmbG93X2lk IjogMTQ0LCAicHJvZ3JhbW1hdGljX25hbWUiOiAiYmx1ZWNvYXRfc2l0ZV9yZXZpZXdfc2VhcmNo IiwgInV1aWQiOiBudWxsfV0sICJsYXN0X21vZGlmaWVkX3RpbWUiOiAxNTQzMzQ5NzI1MjM0LCAi ZGVzdGluYXRpb25faGFuZGxlIjogImJsdWVjb2F0X3NpdGVfcmV2aWV3IiwgImlkIjogMTY0LCAi bmFtZSI6ICJibHVlY29hdF9zaXRlX3Jldmlld19sb29rdXAifV0sICJub3RpZmljYXRpb25zIjog bnVsbCwgInJlZ3VsYXRvcnMiOiBudWxsLCAiaW5jaWRlbnRfdHlwZXMiOiBbeyJjcmVhdGVfZGF0 ZSI6IDE1NDMzNTc5MDE2NzAsICJkZXNjcmlwdGlvbiI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2Vz IChpbnRlcm5hbCkiLCAiZXhwb3J0X2tleSI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChpbnRl cm5hbCkiLCAiaWQiOiAwLCAibmFtZSI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChpbnRlcm5h bCkiLCAidXBkYXRlX2RhdGUiOiAxNTQzMzU3OTAxNjcwLCAidXVpZCI6ICJiZmVlYzJkNC0zNzcw LTExZTgtYWQzOS00YTAwMDQwNDRhYTAiLCAiZW5hYmxlZCI6IGZhbHNlLCAic3lzdGVtIjogZmFs c2UsICJwYXJlbnRfaWQiOiBudWxsLCAiaGlkZGVuIjogZmFsc2V9XSwgInNjcmlwdHMiOiBbXSwg InR5cGVzIjogW3siZGlzcGxheV9uYW1lIjogIkJsdWVjb2F0IENhdGVnb3JpemF0aW9uIFJlc3Vs dHMiLCAidXVpZCI6ICIzNDUwYWJkZS0xOGU3LTRkOTEtODBhOC0zYzVjMjRjMWY3OWIiLCAidHlw ZV9pZCI6IDgsICJmaWVsZHMiOiB7InVybCI6IHsib3BlcmF0aW9ucyI6IFtdLCAidHlwZV9pZCI6 IDEwMDQsICJvcGVyYXRpb25fcGVybXMiOiB7fSwgInRleHQiOiAiVVJMIiwgImJsYW5rX29wdGlv biI6IGZhbHNlLCAicHJlZml4IjogbnVsbCwgImNoYW5nZWFibGUiOiB0cnVlLCAiaWQiOiAyNTks ICJyZWFkX29ubHkiOiBmYWxzZSwgInV1aWQiOiAiY2I4NmViMzgtOTAzNC00YWIyLTkzOWQtZGI0 YzdmZmY0Njk4IiwgImNob3NlbiI6IGZhbHNlLCAiaW5wdXRfdHlwZSI6ICJ0ZXh0IiwgInRvb2x0 aXAiOiAiIiwgIndpZHRoIjogMjI1LCAiaW50ZXJuYWwiOiBmYWxzZSwgInJpY2hfdGV4dCI6IGZh bHNlLCAidGVtcGxhdGVzIjogW10sICJleHBvcnRfa2V5IjogImJsdWVjb2F0X2NhdGVnb3JpemF0 aW9uX3Jlc3VsdHMvdXJsIiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJwbGFjZWhvbGRl ciI6ICIiLCAibmFtZSI6ICJ1cmwiLCAiZGVwcmVjYXRlZCI6IGZhbHNlLCAiZGVmYXVsdF9jaG9z ZW5fYnlfc2VydmVyIjogZmFsc2UsICJ2YWx1ZXMiOiBbXSwgIm9yZGVyIjogMH0sICJjYXRlZ29y aXphdGlvbl9uYW1lIjogeyJvcGVyYXRpb25zIjogW10sICJ0eXBlX2lkIjogMTAwNCwgIm9wZXJh dGlvbl9wZXJtcyI6IHt9LCAidGV4dCI6ICJDYXRlZ29yaXphdGlvbiBOYW1lIiwgImJsYW5rX29w dGlvbiI6IGZhbHNlLCAicHJlZml4IjogbnVsbCwgImNoYW5nZWFibGUiOiB0cnVlLCAiaWQiOiAy NjAsICJyZWFkX29ubHkiOiBmYWxzZSwgInV1aWQiOiAiMGUyYzExMmMtNDYyNS00MjU2LWE0MTgt NjVmYTBjMDE3Y2ZjIiwgImNob3NlbiI6IGZhbHNlLCAiaW5wdXRfdHlwZSI6ICJ0ZXh0IiwgInRv b2x0aXAiOiAiIiwgIndpZHRoIjogMjY0LCAiaW50ZXJuYWwiOiBmYWxzZSwgInJpY2hfdGV4dCI6 IGZhbHNlLCAidGVtcGxhdGVzIjogW10sICJleHBvcnRfa2V5IjogImJsdWVjb2F0X2NhdGVnb3Jp emF0aW9uX3Jlc3VsdHMvY2F0ZWdvcml6YXRpb25fbmFtZSIsICJoaWRlX25vdGlmaWNhdGlvbiI6 IGZhbHNlLCAicGxhY2Vob2xkZXIiOiAiIiwgIm5hbWUiOiAiY2F0ZWdvcml6YXRpb25fbmFtZSIs ICJkZXByZWNhdGVkIjogZmFsc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwg InZhbHVlcyI6IFtdLCAib3JkZXIiOiAxfSwgInV0Y2VzdF90aW1lc3RhbXAiOiB7Im9wZXJhdGlv bnMiOiBbXSwgInR5cGVfaWQiOiAxMDA0LCAib3BlcmF0aW9uX3Blcm1zIjoge30sICJ0ZXh0Ijog IlVUQy9FU1QgdGltZXN0YW1wIiwgImJsYW5rX29wdGlvbiI6IGZhbHNlLCAicHJlZml4IjogbnVs bCwgImNoYW5nZWFibGUiOiB0cnVlLCAiaWQiOiAyNjEsICJyZWFkX29ubHkiOiBmYWxzZSwgInV1 aWQiOiAiZTBkMGQ3NDktNmRhZS00Y2Q5LWExMjMtZDliOWJjYjY3ZjJkIiwgImNob3NlbiI6IGZh bHNlLCAiaW5wdXRfdHlwZSI6ICJ0ZXh0IiwgInRvb2x0aXAiOiAiIiwgIndpZHRoIjogMjU1LCAi aW50ZXJuYWwiOiBmYWxzZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAidGVtcGxhdGVzIjogW10sICJl eHBvcnRfa2V5IjogImJsdWVjb2F0X2NhdGVnb3JpemF0aW9uX3Jlc3VsdHMvdXRjZXN0X3RpbWVz dGFtcCIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAicGxhY2Vob2xkZXIiOiAiIiwgIm5h bWUiOiAidXRjZXN0X3RpbWVzdGFtcCIsICJkZXByZWNhdGVkIjogZmFsc2UsICJkZWZhdWx0X2No b3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgInZhbHVlcyI6IFtdLCAib3JkZXIiOiAyfX0sICJwYXJl bnRfdHlwZXMiOiBbImluY2lkZW50Il0sICJ0eXBlX25hbWUiOiAiYmx1ZWNvYXRfY2F0ZWdvcml6 YXRpb25fcmVzdWx0cyIsICJleHBvcnRfa2V5IjogImJsdWVjb2F0X2NhdGVnb3JpemF0aW9uX3Jl c3VsdHMiLCAiZm9yX2N1c3RvbV9maWVsZHMiOiBmYWxzZSwgImFjdGlvbnMiOiBbXSwgImlkIjog bnVsbCwgImZvcl9hY3Rpb25zIjogZmFsc2UsICJmb3Jfbm90aWZpY2F0aW9ucyI6IGZhbHNlLCAi c2NyaXB0cyI6IFtdLCAicHJvcGVydGllcyI6IHsiZm9yX3dobyI6IFtdLCAiY2FuX2Rlc3Ryb3ki OiBmYWxzZSwgImNhbl9jcmVhdGUiOiBmYWxzZX19XSwgIm1lc3NhZ2VfZGVzdGluYXRpb25zIjog W3sidXVpZCI6ICJjODliY2U2ZC0wMWY4LTQ3ZTctOGQ0Yy01ZDc5M2IxZWE2YjYiLCAiZXhwb3J0 X2tleSI6ICJibHVlY29hdF9zaXRlX3JldmlldyIsICJuYW1lIjogIkJsdWVjb2F0IFNpdGUgUmV2 aWV3IiwgImRlc3RpbmF0aW9uX3R5cGUiOiAwLCAicHJvZ3JhbW1hdGljX25hbWUiOiAiYmx1ZWNv YXRfc2l0ZV9yZXZpZXciLCAiZXhwZWN0X2FjayI6IHRydWUsICJ1c2VycyI6IFsiZ2VyYWxkLnRy b3RtYW5AaWJtLmNvbSJdfV0sICJpbmNpZGVudF9hcnRpZmFjdF90eXBlcyI6IFtdLCAicm9sZXMi OiBbXSwgImZpZWxkcyI6IFt7Im9wZXJhdGlvbnMiOiBbXSwgInR5cGVfaWQiOiAwLCAib3BlcmF0 aW9uX3Blcm1zIjoge30sICJ0ZXh0IjogIlNpbXVsYXRpb24iLCAiYmxhbmtfb3B0aW9uIjogZmFs c2UsICJwcmVmaXgiOiBudWxsLCAiY2hhbmdlYWJsZSI6IHRydWUsICJpZCI6IDM3LCAicmVhZF9v bmx5IjogdHJ1ZSwgInV1aWQiOiAiYzNmMGUzZWQtMjFlMS00ZDUzLWFmZmItZmU1Y2EzMzA4Y2Nh IiwgImNob3NlbiI6IGZhbHNlLCAiaW5wdXRfdHlwZSI6ICJib29sZWFuIiwgInRvb2x0aXAiOiAi V2hldGhlciB0aGUgaW5jaWRlbnQgaXMgYSBzaW11bGF0aW9uIG9yIGEgcmVndWxhciBpbmNpZGVu dC4gIFRoaXMgZmllbGQgaXMgcmVhZC1vbmx5LiIsICJpbnRlcm5hbCI6IGZhbHNlLCAicmljaF90 ZXh0IjogZmFsc2UsICJ0ZW1wbGF0ZXMiOiBbXSwgImV4cG9ydF9rZXkiOiAiaW5jaWRlbnQvaW5j X3RyYWluaW5nIiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJuYW1lIjogImluY190cmFp bmluZyIsICJkZXByZWNhdGVkIjogZmFsc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBm YWxzZSwgInZhbHVlcyI6IFtdfSwgeyJvcGVyYXRpb25zIjogW10sICJ0eXBlX2lkIjogMTEsICJv cGVyYXRpb25fcGVybXMiOiB7fSwgInRleHQiOiAiaW5jaWRlbnRfaWQiLCAiYmxhbmtfb3B0aW9u IjogZmFsc2UsICJwcmVmaXgiOiBudWxsLCAiY2hhbmdlYWJsZSI6IHRydWUsICJpZCI6IDExNSwg InJlYWRfb25seSI6IGZhbHNlLCAidXVpZCI6ICJlYWQyMTRjMi0xM2ZlLTQzZjYtYTNjNy02NzZh ODgzMzhkYmIiLCAiY2hvc2VuIjogZmFsc2UsICJpbnB1dF90eXBlIjogIm51bWJlciIsICJ0b29s dGlwIjogIiIsICJpbnRlcm5hbCI6IGZhbHNlLCAicmljaF90ZXh0IjogZmFsc2UsICJ0ZW1wbGF0 ZXMiOiBbXSwgImV4cG9ydF9rZXkiOiAiX19mdW5jdGlvbi9pbmNpZGVudF9pZCIsICJoaWRlX25v dGlmaWNhdGlvbiI6IGZhbHNlLCAicGxhY2Vob2xkZXIiOiAiIiwgIm5hbWUiOiAiaW5jaWRlbnRf aWQiLCAiZGVwcmVjYXRlZCI6IGZhbHNlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFs c2UsICJ2YWx1ZXMiOiBbXX0sIHsib3BlcmF0aW9ucyI6IFtdLCAidHlwZV9pZCI6IDExLCAib3Bl cmF0aW9uX3Blcm1zIjoge30sICJ0ZXh0IjogImFydGlmYWN0X3R5cGUiLCAiYmxhbmtfb3B0aW9u IjogZmFsc2UsICJwcmVmaXgiOiBudWxsLCAiY2hhbmdlYWJsZSI6IHRydWUsICJpZCI6IDE1MCwg InJlYWRfb25seSI6IGZhbHNlLCAidXVpZCI6ICIzYTJlMzQ3Yi02NzJlLTQyNjMtODc4Ny1hM2U5 ZWJhNGFjOTEiLCAiY2hvc2VuIjogZmFsc2UsICJpbnB1dF90eXBlIjogInRleHQiLCAidG9vbHRp cCI6ICIiLCAiaW50ZXJuYWwiOiBmYWxzZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAidGVtcGxhdGVz IjogW10sICJleHBvcnRfa2V5IjogIl9fZnVuY3Rpb24vYXJ0aWZhY3RfdHlwZSIsICJoaWRlX25v dGlmaWNhdGlvbiI6IGZhbHNlLCAicGxhY2Vob2xkZXIiOiAiIiwgIm5hbWUiOiAiYXJ0aWZhY3Rf dHlwZSIsICJkZXByZWNhdGVkIjogZmFsc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBm YWxzZSwgInZhbHVlcyI6IFtdfSwgeyJvcGVyYXRpb25zIjogW10sICJ0eXBlX2lkIjogMTEsICJv cGVyYXRpb25fcGVybXMiOiB7fSwgInRleHQiOiAiYXJ0aWZhY3RfaWQiLCAiYmxhbmtfb3B0aW9u IjogZmFsc2UsICJwcmVmaXgiOiBudWxsLCAiY2hhbmdlYWJsZSI6IHRydWUsICJpZCI6IDI2Miwg InJlYWRfb25seSI6IGZhbHNlLCAidXVpZCI6ICI1ZTk0YjFkMy00OGExLTQxOGEtOWNlMy00NDMz YzY4ODllMTQiLCAiY2hvc2VuIjogZmFsc2UsICJpbnB1dF90eXBlIjogIm51bWJlciIsICJ0b29s dGlwIjogIiIsICJpbnRlcm5hbCI6IGZhbHNlLCAicmljaF90ZXh0IjogZmFsc2UsICJ0ZW1wbGF0 ZXMiOiBbXSwgImV4cG9ydF9rZXkiOiAiX19mdW5jdGlvbi9hcnRpZmFjdF9pZCIsICJoaWRlX25v dGlmaWNhdGlvbiI6IGZhbHNlLCAicGxhY2Vob2xkZXIiOiAiIiwgIm5hbWUiOiAiYXJ0aWZhY3Rf aWQiLCAiZGVwcmVjYXRlZCI6IGZhbHNlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFs c2UsICJ2YWx1ZXMiOiBbXX0sIHsib3BlcmF0aW9ucyI6IFtdLCAidHlwZV9pZCI6IDExLCAib3Bl cmF0aW9uX3Blcm1zIjoge30sICJ0ZXh0IjogImFydGlmYWN0X3ZhbHVlIiwgImJsYW5rX29wdGlv biI6IGZhbHNlLCAicHJlZml4IjogbnVsbCwgImNoYW5nZWFibGUiOiB0cnVlLCAiaWQiOiAxNDks ICJyZWFkX29ubHkiOiBmYWxzZSwgInV1aWQiOiAiOWJhNDk4ODctMGRjZi00MGNlLWE1ZWEtOWMw YzQzZjgzMWJmIiwgImNob3NlbiI6IGZhbHNlLCAiaW5wdXRfdHlwZSI6ICJ0ZXh0IiwgInRvb2x0 aXAiOiAiIiwgImludGVybmFsIjogZmFsc2UsICJyaWNoX3RleHQiOiBmYWxzZSwgInRlbXBsYXRl cyI6IFtdLCAiZXhwb3J0X2tleSI6ICJfX2Z1bmN0aW9uL2FydGlmYWN0X3ZhbHVlIiwgImhpZGVf bm90aWZpY2F0aW9uIjogZmFsc2UsICJwbGFjZWhvbGRlciI6ICIiLCAibmFtZSI6ICJhcnRpZmFj dF92YWx1ZSIsICJkZXByZWNhdGVkIjogZmFsc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIi OiBmYWxzZSwgInZhbHVlcyI6IFtdfV0sICJvdmVycmlkZXMiOiBbXSwgImV4cG9ydF9kYXRlIjog MTU0MzM0OTg1MzIwMH0= """ )
5,354,201
def init_app(app): """ This will be called by application initializer. """ global backup_server backup_server = BackupServer() app.register_blueprint(blueprint)
5,354,202
def test_picorv32_quartus_cyclone10_timing(picorv32_cyclone10_data): """Check timing tables""" timing = picorv32_cyclone10_data["timing"] clocks = timing["Clocks"].set_index("Clock Name") fmax = timing["Slow 900mV 100C Model Fmax Summary"].set_index("Clock Name") assert clocks.loc["clk", "Frequency"] == "175.0 MHz" assert fmax.loc["clk", "Fmax"] == "131.58 MHz"
5,354,203
def taillight_detect(image): """ Takes in a road image, re-sizes for the model, predicts the lane to be drawn from the model in G color, recreates an RGB image of a lane and merges with the original road image. """ model = load_model('full_CNN_model.h5') #image1=image #image1=np.array(image1) #objects=np.squeeze(image,2) #rows,cols=objects.shape rows, cols,_ = image.shape #cols, rows = image.size #cols=160 #rows=80 # Get image ready for feeding into model small_img = cv2.resize(image, (160, 80)) #img_y_cr_cb = cv2.cvtColor(small_img, cv2.COLOR_BGR2YCrCb) #y, cr, cb = cv2.split(img_y_cr_cb) # Applying equalize Hist operation on Y channel. #y_eq = cv2.equalizeHist(y) #clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) #y_eq = clahe.apply(y) #img_y_cr_cb_eq = cv2.merge((y_eq, cr, cb)) #small_img = cv2.cvtColor(img_y_cr_cb_eq, cv2.COLOR_YCR_CB2BGR) #small_img = imresize(image, (80, 160, 3)) small_img = np.array(small_img) small_img = small_img[None,:,:,:] # Make prediction with neural network (un-normalize value by multiplying by 255) prediction = model.predict(small_img)[0] * 255 #new_image = imresize(prediction, (rows, cols, 3)) mask = cv2.resize(prediction, (cols, rows)) img_y_cr_cb = cv2.cvtColor(image, cv2.COLOR_BGR2YCrCb) y, cr, cb = cv2.split(img_y_cr_cb) # Applying equalize Hist operation on Y channel. #y_eq = cv2.equalizeHist(y) clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) y_eq = clahe.apply(y) img_y_cr_cb_eq = cv2.merge((y_eq, cr, cb)) image_he = cv2.cvtColor(img_y_cr_cb_eq, cv2.COLOR_YCR_CB2BGR) gray = cv2.cvtColor(image_he, cv2.COLOR_BGR2GRAY) blurred = cv2.GaussianBlur(gray, (3, 3), 0) auto = auto_canny(blurred) for i in range(rows): for j in range(cols): if auto[i,j] >0 and mask [i,j]>100: auto[i,j]=255 else: auto[i,j]=0 cv2.imshow('histogram equalisation', auto) cv2.waitKey(0) #h, w = edges.shape[:2] filled_from_bottom = np.zeros((rows, cols)) for col in range(cols): for row in reversed(range(rows)): if auto[row][col] < 255: filled_from_bottom[row][col] = 255 else: break filled_from_top = np.zeros((rows, cols)) for col in range(cols): for row in range(rows): if auto[row][col] < 255: filled_from_top[row][col] = 255 else: break filled_from_left = np.zeros((rows, cols)) for row in range(rows): for col in range(cols): if auto[row][col] < 255: filled_from_left[row][col] = 255 else: break filled_from_right = np.zeros((rows, cols)) for row in range(rows): for col in reversed(range(cols)): if auto[row][col] < 255: filled_from_right[row][col] = 255 else: break for i in range(rows): for j in range(cols): if filled_from_bottom[i,j] ==0 and filled_from_top[i,j]==0 and filled_from_right[i,j] ==0 and filled_from_left[i,j]==0: auto[i,j]=255 else: auto[i,j]=0 kernel = np.ones((5,5),np.uint8) opening = cv2.morphologyEx(auto, cv2.MORPH_OPEN, kernel) closing = cv2.morphologyEx(auto, cv2.MORPH_CLOSE, kernel) mask = np.expand_dims(mask, 2) mask = np.repeat(mask, 3, axis=2) # give the mask the same shape as your image colors = {"red": [0.0,1.0,1.0], "blue": [0.,0.,0.1]} # a dictionary for your colors, experiment with the values colored_mask = np.multiply(mask, colors["red"]) # broadcast multiplication (thanks to the multiplication by 0, you'll end up with values different from 0 only on the relevant channels and the right regions) image = image+colored_mask # element-wise sum (sinc img and mask have the same shape) #return image.astype(float) / 255 #return new_image return auto
5,354,204
def generate_random_number(rng, length): """Return random number with predefined length.""" return crypto.random_generate(rng, length)
5,354,205
def update_old_names(): """Fetches the list of old tz names and returns a mapping""" url = urlparse(ZONEINFO_URL) log.info('Connecting to %s' % url.netloc) ftp = ftplib.FTP(url.netloc) ftp.login() gzfile = BytesIO() log.info('Fetching zoneinfo database') ftp.retrbinary('RETR ' + url.path, gzfile.write) gzfile.seek(0) log.info('Extracting backwards data') archive = tarfile.open(mode="r:gz", fileobj=gzfile) backward = {} for line in archive.extractfile('backward').readlines(): if line[0] == '#': continue if len(line.strip()) == 0: continue parts = line.split() if parts[0] != b'Link': continue backward[parts[2].decode('ascii')] = parts[1].decode('ascii') return backward
5,354,206
def get_namespace(location: Optional[str] = None, namespace_id: Optional[str] = None, project: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNamespaceResult: """ Gets a namespace. """ __args__ = dict() __args__['location'] = location __args__['namespaceId'] = namespace_id __args__['project'] = project if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('google-native:servicedirectory/v1:getNamespace', __args__, opts=opts, typ=GetNamespaceResult).value return AwaitableGetNamespaceResult( labels=__ret__.labels, name=__ret__.name)
5,354,207
def build_headers(access_token, client_id): """ :param access_token: Access token granted when the user links their account :param client_id: This is the api key for your own app :return: Dict of headers """ return {'Content-Type': 'application/json', 'Authorization': f'Bearer {access_token}', 'trakt-api-version': '2', 'trakt-api-key': client_id}
5,354,208
def get_args(): """Get command-line arguments""" parser = argparse.ArgumentParser( description='sum numbers', formatter_class=argparse.ArgumentDefaultsHelpFormatter) # Positional arg parser.add_argument('int', metavar='INT', type=int, nargs='+', help='Numbers to add') return parser.parse_args()
5,354,209
def odd_subgraph_centrality(i, lam, u): """ Calculates the number of odd length closed walks that a node participates in :cite:`estrada2005spectral`. Used in the calculation of spectral scaling and generalized robustness index. :param i: node index :param lam: largest eigenvalue :param u: largest eigenvector :return: a float """ sc = 0 for j in range(len(lam)): sc += np.power(u[i, j], 2) * np.sinh(lam[j]) return sc
5,354,210
def PolyAreasToModel(polyareas, bevel_amount, bevel_pitch, quadrangulate): """Convert a PolyAreas into a Model object. Assumes polyareas are in xy plane. Args: polyareas: geom.PolyAreas bevel_amount: float - if > 0, amount of bevel bevel_pitch: float - if > 0, angle in radians of bevel quadrangulate: bool - should n-gons be quadrangulated? Returns: geom.Model """ m = geom.Model() if not polyareas: return m polyareas.points.AddZCoord(0.0) m.points = polyareas.points for pa in polyareas.polyareas: PolyAreaToModel(m, pa, bevel_amount, bevel_pitch, quadrangulate) return m
5,354,211
def preprocessing(train_data, test_data): """ * The method at first eliminates constant features from both train and test data. * Then, it splits training data into features and labels. * Finally, the method performs pca on training and testing data sets to reduce the dimension and overcome curse of dimensionality problem. Parameters ---------- train_data: training data set in data frame format test_data: testing data set in data frame format """ # constant feature elimination train_data = train_data.drop(['X3', 'X31', 'X32', 'X127', 'X128', 'X590'], axis=1) train_data = np.asarray(train_data) test_data = test_data.drop(['X3', 'X31', 'X32', 'X127', 'X128', 'X590'], axis=1) test_data = np.asarray(test_data) # training data is split into features and labels train_x = train_data[:, :train_data.shape[1] - 1] train_y = train_data[:, train_data.shape[1] - 1] train_y.shape = (np.size(train_y), 1) # principal component analysis pca = PCA(n_components=60) train_x_pca = pca.fit_transform(train_x) test_pca = pca.transform(test_data) return train_x_pca, train_y, test_pca
5,354,212
def generate_videos_from_events(response, video_model): """Creates the video containers/representations for this given response. We should only really invoke this as part of a migration as of right now (2/8/2019), but it's quite possible we'll have the need for dynamic upsertion later. """ seen_ids = set() video_objects = [] Video = video_model # Using a constructive approach here, but with an ancillary seen_ids list b/c Django models without # primary keys are unhashable for some dumb reason (even though they have unique fields...) for frame_id, event_data in response.exp_data.items(): if event_data.get("videoList", None) and event_data.get("videoId", None): # We've officially captured video here! events = event_data.get("eventTimings", []) for event in events: video_id = event["videoId"] pipe_name = event["pipeId"] # what we call "ID" they call "name" stream_time = event["streamTime"] if ( video_id not in seen_ids and pipe_name and stream_time and stream_time > 0 ): # Try looking for the regular ID first. file_obj = S3_RESOURCE.Object( settings.BUCKET_NAME, f"{video_id}.mp4" ) try: s3_response = file_obj.get() except ClientError: try: # If that doesn't work, use the pipe name. file_obj = S3_RESOURCE.Object( settings.BUCKET_NAME, f"{pipe_name}.mp4" ) s3_response = file_obj.get() except ClientError: logger.warning( f"could not find {video_id} or {pipe_name} in S3!" ) continue # Read first 32 bytes from streaming body (file header) to get actual filetype. streaming_body = s3_response["Body"] file_header_buffer: bytes = streaming_body.read(32) file_info = fleep.get(file_header_buffer) streaming_body.close() video_objects.append( Video( pipe_name=pipe_name, created_at=date_parser.parse(event["timestamp"]), date_modified=s3_response["LastModified"], # Can't get the *actual* pipe id property, it's in the webhook payload... frame_id=frame_id, full_name=f"{video_id}.{file_info.extension[0]}", study=response.study, response=response, is_consent_footage=frame_id in VALID_CONSENT_FRAMES, ) ) seen_ids.add(video_id) return Video.objects.bulk_create(video_objects)
5,354,213
def datetime_at_midnight(dt: DateTime, tz: TimeZone) -> DateTime: """ Returns a DateTime for the requested DateTime at midnight in the specified time zone. Args: dt (DateTime): the DateTime for which the new value at midnight should be calculated tz (TimeZone): the TimeZone to use when interpreting the DateTime Returns: DateTime Raises: DHError """ try: return _JDateTimeUtils.dateAtMidnight(dt, tz.value) except Exception as e: raise DHError(e) from e
5,354,214
def count_dcm(logger, top): """ This function recursively walks through a given directory (`top`) using depth-first search (bottom up) and counts the number of .dcm files present. Parameters ---------- path : {str} The directory to count. Returns ------- count : {int} The number of .dcm files in `path`. """ try: count = 0 # Count number of .dcm files in ../data/Mass/Test. for _, _, files in os.walk(top): for f in files: if f.endswith(".dcm"): count += 1 except Exception as e: # logger.error(f'Unable to count_dcm!\n{e}') print((f"Unable to count_dcm!\n{e}")) return count
5,354,215
def convert2sametype(dict_, formula): """Utility function for internal use. Convert string/dict/DataFrame to dict Parameters ---------- dict_ : dict formula : string/dict/DataFrame Returns ------- type(formula) """ return convert2type(dict_, type(formula))
5,354,216
def setup_data() -> None: """Load test dicom files to the test Orthanc server instance """ headers = {'content-type': 'application/dicom'} list_of_dicom_file_paths = [f'./tests/data/dicom_files/{i}' for i in os.listdir('./tests/data/dicom_files/')] for file_path in list_of_dicom_file_paths: with open(file_path, 'rb') as file_handler: data = file_handler.read() requests.post( f'{ORTHANC_URL}/instances', data=data, headers=headers )
5,354,217
def from_sdf(sdf_content: str = None, file_path: str = None, ignore_hydrogens = False) -> List[Graph]: """ parse graph from_sdf Read chemical files and parses them into instances of `Graph`. As this function is not meant to be called in a loop, inner functions only relative to chemical files parsing are declared. Type Aliases : Atom = str Bond = List[str] """ if file_path : if (file_path.endswith('.gz')): fp = gzip.open(file_path, 'rt', encoding='utf-8') sdf_content = fp.read() else : with open(file_path, 'r') as content_file: sdf_content = content_file.read() return [ Mol_to_Graph(mol[0], mol[1]) for mol in [ parse_Mol(mol_file, ignore_hydrogens) for mol_file in [ part[0] for part in [ compound.split('M END') for compound in sdf_content.split("$$$$") if (compound.strip(' \t\n\r') != '') ] if is_Mol(part) ] ] ]
5,354,218
def dispatcher_connect( opp: OpenPeerPower, signal: str, target: Callable[..., None] ) -> Callable[[], None]: """Connect a callable function to a signal.""" async_unsub = run_callback_threadsafe( opp.loop, async_dispatcher_connect, opp, signal, target ).result() def remove_dispatcher() -> None: """Remove signal listener.""" run_callback_threadsafe(opp.loop, async_unsub).result() return remove_dispatcher
5,354,219
def test(): """ Test method for module. """ server = Server(host='orka-node-1', port='22', user='vagrant', key=VAGRANT_KEY) print command(server, 'uname -s') print git_status(server, '/opt/orka') copy_file(server, '/etc/hosts', '/tmp/remote_hosts') print command(server, 'cat /tmp/remote_hosts')
5,354,220
def get_last_error(): """ Get the last error value, then turn it into a nice string. Return the string. """ error_id = kernel32.GetLastError() # No actual error if error_id == 0: return None # Gonna need a string pointer buf = ffi.new("LPWSTR") chars = kernel32.FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, ffi.NULL, error_id , 0, buf, 0, ffi.NULL) return ffi.string(ffi.cast("char **",buf)[0][0:chars]).decode('utf-8').strip("\r\n")
5,354,221
def load_params_from_pkl(params_dump_file_path): """ Loads parameters from a pickle _dump file. :param params_dump_file_path: self-explanatory :return dict of param_name => param """ coll = {} f = open(params_dump_file_path, 'rb') while True: try: param_name, param_val = pickle.load(f) coll[param_name] = param_val except (EOFError, UnpicklingError): break f.close() return coll
5,354,222
def _extract_filename_from_filepath(strFilePath=""): """ Function which extracts file name from the given filepath """ if strFilePath: try: strFileName = Path(strFilePath).name strFileName = str(strFileName).split(".")[0] return strFileName except Exception as ex: selft.crash_report(traceback.format_exception(*sys.exc_info(),limit=None, chain=True)) print("Error in _extract_filename_from_filepath="+str(ex)) else: print("Please enter the value="+str(strFilePath))
5,354,223
def plot_by_term(term, df, kind='go', q=0.1, swarm=True, x='genotype', y='b', gene='ens_gene'): """ Plot ontology terms by a given column. Params: term - term to look for in melted_df df - a tidy dataframe with columns x and y kind - the ontology to use q - q-value for statistical significance. defaults to 0.1 swarm - if True, plots a swarmplot. Else, plots a violinplot. x - column to plot on x axis y - column to plot on y axis gene - column in the given df where gene WBIDs are provided Output: ax - an axis object containing a graph genes - a list of genes obtained from the melted df """ if type(kind) is not str: raise ValueError('`kind` variable must be a string.') if kind.lower() not in ['tissue', 'phenotype', 'go']: raise ValueError('`kind` must be one of `tissue`, `phenotype` or `go`') if type(term) is not str: raise ValueError('`term` must be a string.') if kind.lower() == 'tissue': onto_df = tea.fetch_dictionary() elif kind.lower() == 'phenotype': onto_df = pd.read_csv('../input/phenotype_ontology.csv') else: onto_df = pd.read_csv('../input/go_dictionary.csv') # melt the df: melted_df = pd.melt(onto_df, id_vars='wbid', var_name='term', value_name='expressed') melted_df = melted_df[melted_df.expressed == 1] # warnings and bells: sel = melted_df.term.str.contains(term) if len(melted_df[sel].term.unique()) > 1: print('Warning: Provided term matches more than one ontological term.') genes = melted_df[sel].wbid if len(genes) == 0: raise ValueError('Provided term is not in ontology dictionary') ind = (df.qval < q) & (df[gene].isin(genes)) fig, ax = plt.subplots() if swarm: ax = sns.swarmplot(x='genotype', y='b', data=df[ind]) else: ax = sns.violinplot(x='genotype', y='b', data=df[ind]) return ax, genes
5,354,224
def add_emote_command_handler(update, context): """ CommandHandler that adds emotes from a specific channel to the bots cache. Format: /add <channelid>. Emotes are determined with querries to the twitchemotes.com API. """ try: channel_id = int(context.args[0]) cached_channels, _ = create_or_get_emote_data(context) except ValueError: context.bot.send_message( chat_id=update.effective_chat.id, text="Argument channel id must be a whole number.", ) except IndexError: context.bot.send_message( chat_id=update.effective_chat.id, text="Missing argument: channel id." ) if channel_id in cached_channels: context.bot.send_message( chat_id=update.effective_chat.id, text="Channel was already added to the bot, \ but I'm gonna check if there are new emotes.", ) channel_api_url = f"https://api.twitchemotes.com/api/v4/channels/{channel_id}" resp = requests.get(channel_api_url) if resp.status_code == 404: context.bot.send_message( chat_id=update.effective_chat.id, text=f"Error: channel with id {channel_id} not found.", ) return if resp.status_code != 200: context.bot.send_message( chat_id=update.effective_chat.id, text="Error: can't reach twitchemotes API.", ) return try: resp_emotes = resp.json()["emotes"] except KeyError: context.bot.send_message( chat_id=update.effective_chat.id, text=f"Error: can't read response from twitchemotes API.", ) return context.job_queue.run_repeating( cache_stickers, interval=5, context={"resp_emotes": resp_emotes, "chat_id": update.message.chat_id}, ) cached_channels.append(channel_id)
5,354,225
def lab_equality(lab1, lab2): """ Check if two labs are identical """ if lab1["ncolumns"] != lab1["ncolumns"] or lab1["nlines"] != lab2["nlines"]: return False return all(set(lab1[cell]) == set(lab2[cell]) for cell in lab1.keys() if type(cell) != type("a"))
5,354,226
def read_wwm(filename_or_fileglob, chunks={}, convert_wind_vectors=True): """Read Spectra from SWAN native netCDF format. Args: - filename_or_fileglob (str): filename or fileglob specifying multiple files to read. - chunks (dict): chunk sizes for dimensions in dataset. By default dataset is loaded using single chunk for all dimensions (see xr.open_mfdataset documentation). - convert_wind_vectors (bool): choose it to convert wind vectors into speed / direction data arrays. Returns: - dset (SpecDataset): spectra dataset object read from ww3 file. Note: - If file is large to fit in memory, consider specifying chunks for 'time' and/or 'station' dims. """ dset = xr.open_mfdataset(filename_or_fileglob, chunks=chunks) _units = dset.AC.attrs.get("units", "") dset = dset.rename( { "nfreq": attrs.FREQNAME, "ndir": attrs.DIRNAME, "nbstation": attrs.SITENAME, "AC": attrs.SPECNAME, "lon": attrs.LONNAME, "lat": attrs.LATNAME, "DEP": attrs.DEPNAME, "ocean_time": attrs.TIMENAME, } ) # Calculating wind speeds and directions if convert_wind_vectors and "Uwind" in dset and "Vwind" in dset: dset[attrs.WSPDNAME], dset[attrs.WDIRNAME] = uv_to_spddir( dset["Uwind"], dset["Vwind"], coming_from=True ) # Setting standard names and storing original file attributes set_spec_attributes(dset) dset[attrs.SPECNAME].attrs.update( {"_units": _units, "_variable_name": attrs.SPECNAME} ) # Assigning spectral coordinates #import ipdb; ipdb.set_trace() dset[attrs.FREQNAME] = dset.spsig / (2 * np.pi) # convert rad to Hz dset[attrs.DIRNAME] = dset.spdir # converting Action to Energy density and adjust density to Hz dset[attrs.SPECNAME] = dset[attrs.SPECNAME] * dset.spsig * (2 * np.pi) # Converting from radians dset[attrs.DIRNAME] *= R2D dset[attrs.SPECNAME] /= R2D # Returns only selected variables, transposed to_drop = [ dvar for dvar in dset.data_vars if dvar not in [ attrs.SPECNAME, attrs.WSPDNAME, attrs.WDIRNAME, attrs.DEPNAME, attrs.LONNAME, attrs.LATNAME, ] ] dims = [d for d in ["time", "site", "freq", "dir"] if d in dset.efth.dims] return dset.drop(to_drop).transpose(*dims)
5,354,227
def p_require_key_lst(p): """require_key_lst : require_key require_key_lst | require_key""" if len(p) == 2: p[0] = [p[1]] elif len(p) == 3: p[0] = [p[1]] + p[2]
5,354,228
def is_sorted(t): """Checks whether a list is sorted. t: list returns: boolean """ return t == sorted(t)
5,354,229
def create_container_port_mappings(container): """ Create the port mappings for the given container. :param container: The container to create the mappings for. """ ports = [] image = None if container.is_image_based(): image = container.image elif container.is_clone() and container.clone_of.is_image_based(): image = container.clone_of.image if image: protected_port = image.protected_port public_ports = image.public_ports if protected_port: mapping = PortMapping( server=container.server, container=container, external_port=PortMapping.get_available_server_port(container.server), internal_port=protected_port ) mapping.save() ports.append({ ContainerBackend.PORT_MAPPING_KEY_ADDRESS: mapping.server.internal_ip, ContainerBackend.PORT_MAPPING_KEY_EXTERNAL: mapping.external_port, ContainerBackend.PORT_MAPPING_KEY_INTERNAL: mapping.internal_port }) if public_ports: for port in public_ports.split(','): mapping = PortMapping( server=container.server, container=container, external_port=PortMapping.get_available_server_port(container.server), internal_port=port ) mapping.save() ports.append({ ContainerBackend.PORT_MAPPING_KEY_ADDRESS: '0.0.0.0', ContainerBackend.PORT_MAPPING_KEY_EXTERNAL: mapping.external_port, ContainerBackend.PORT_MAPPING_KEY_INTERNAL: mapping.internal_port }) return ports
5,354,230
async def test_cleanup_nopbar(): """Do nothing if no tqdm pbar has been created""" state = Namespace(progressbar={}) cleanup = TQDMCleanup(None, state) await cleanup(42) assert cleanup._prev is None
5,354,231
def _get_dep_for_package(package, platform): """ Convert arguments in the `package` parameter to actual deps. """ if is_list(package) or is_tuple(package): package, _ = package # TODO: ghc-8.4.4 if (package == "compact" and _get_ghc_version(platform) == "8.4.4"): package = "ghc-compact" if package in _get_internal_ghc_packages(platform): project = "ghc" else: project = "stackage-lts" return target_utils.ThirdPartyRuleTarget(project, package)
5,354,232
def get_caster(typehint: TypeHint) -> Callable[..., Any]: """Returns a conversion class most appropriate for the supplied type hint. Potential matches are checked in order from most to least specific to account for overlapping types (e.g. ABCs). """ if typehint in (Any, None): return untyped_caster origin = get_origin(typehint) if origin in (Union, UnionType): return union_caster(typehint) typecasters: Dict[TypeHintTuple, Callable[..., Any]] = { (bytes,): str.encode, (str,): str, (dict,): json_caster(typehint), (bool,): bool_caster, (Sequence, Set): collection_caster(typehint), (date, time): datetime_caster(typehint), (Pattern,): pattern_caster(typehint), (IO, IOBase): io_caster(typehint), (Literal,): literal_caster(typehint), (Enum,): enum_caster(typehint), } for cls, caster in typecasters.items(): if typehint in cls: return caster if origin in cls and origin is not None: return caster if issubtype(typehint, cls): return caster return generic_caster(typehint)
5,354,233
def values(series): """Count the values and sort. series: pd.Series returns: series mapping from values to frequencies """ return series.value_counts(dropna=False).sort_index()
5,354,234
def build_conda(force_conda, render, repo, use_pythonpath, output_folder): """Build the conda package for the current source tree.""" build_subcommand = "mambabuild" if force_conda or not has_mambabuild(): build_subcommand = "build" if render: build_subcommand = "render" requirements_data, _ = katana_requirements.load() # Approach: Generate a variant file with the versions we need in it and explicitly reference the versions from the # meta.yaml file. This will make adding dependencies less clean that I would like, but see failed approaches. # Failed approaches: # 1. Use an append or clobber file to override parts of meta.yaml. This either does not override enough (the former # doesn't allow making version requirement stricter), or overrides too much (replaces whole lists, so jinja # expression would need to be rerun, but cannot be.) # 2. Patch meta.yaml itself. This does not work easily because pyyaml outputs yaml in a format that conda doesn't # support. Conda does some regex parsing on the yaml, so it doesn't support yaml in full generality, and requires # white space in cases where pyyaml doesn't put it. # 3. Call the conda build pipeline directly using Python. This does not work because conda does not provide a hook # (that I could find) where I need it: between yaml loading and dependency resolution. I think this is partly # because the two phases are weirdly intertwined with several cases where jinja is reexecuted based on # information from a solve done using a previous parse. # Basically, this is a total mess, and I spent way too much time on it. I hate everything. variant_config = {} for p in requirements_data.select_packages(["conda", "conda/dev"], OutputFormat.CONDA): variant_config[p.name_for(requirements_data.packaging_systems["conda"]).replace("-", "_")] = [ p.version_for(requirements_data.packaging_systems["conda"]) ] # Several variant variables have special handling. Remove them. They are set manually as needed. for k in ("python", "numpy", "cxx_compiler"): del variant_config[k] with tempfile.NamedTemporaryFile(mode="wt", prefix="variant-file-", suffix=".yaml") as variant_file: yaml.dump(variant_config, variant_file, Dumper=yaml.SafeDumper) variant_file.flush() build_command_line = [ "conda", build_subcommand, "--channel", "conda-forge", "--channel", "katanagraph", "--variant-config-files", variant_file.name, repo / "conda_recipe", ] if output_folder: build_command_line += ["--output-folder", output_folder] os.environ["KATANA_VERSION"] = katana_version.version.format_version_pep440( katana_version.version.get_version() ) os.environ["KATANA_TEST_DATASETS"] = str(find_test_datasets_root(repo)) if not use_pythonpath: # Clear python path because if it is set, it will leak into conda build potentially causing issues. os.environ["PYTHONPATH"] = "" try: subprocess.check_call(build_command_line, cwd=SCRIPTS_DIR_PATH.parent.parent) except subprocess.CalledProcessError: print(open(variant_file.name).read()) raise
5,354,235
def use_id(type): """Declare that this configuration option should point to an ID with the given type.""" def validator(value): check_not_templatable(value) if value is None: return core.ID(None, is_declaration=False, type=type) if ( isinstance(value, core.ID) and value.is_declaration is False and value.type is type ): return value return core.ID(validate_id_name(value), is_declaration=False, type=type) return validator
5,354,236
def social_auth_user(backend, uid, user=None, *args, **kwargs): """Return UserSocialAuth account for backend/uid pair or None if it doesn't exists. Raise AuthAlreadyAssociated if UserSocialAuth entry belongs to another user. """ social_user = UserSocialAuth.get_social_auth(backend.name, uid) if social_user: if user and social_user.user != user: msg = ugettext('This %(provider)s account is already in use.') raise AuthAlreadyAssociated(backend, msg % { 'provider': backend.name }) elif not user: user = social_user.user return {'social_user': social_user, 'user': user}
5,354,237
def bbox_mask(t_arr, x_arr, limits): """ Just a wrapper for np.where """ #NOTE: t_arr is included but no longer used mask = np.where( (x_arr >= limits[0]) & \ (x_arr <= limits[1]))[0] return mask
5,354,238
def kdeplot_2d_clevels(xs, ys, levels=11, **kwargs): """ Plot contours at specified credible levels. Arguments --------- xs: array samples of the first variable. ys: array samples of the second variable, drawn jointly with `xs`. levels: float, array if float, interpreted as number of credible levels to be equally spaced between (0, 1); if array, interpreted as list of credible levels. xlow: float lower bound for abscissa passed to Bounded_2d_kde (optional). xigh: float upper bound for abscissa passed to Bounded_2d_kde (optional). ylow: float lower bound for ordinate passed to Bounded_2d_kde (optional). yhigh: float upper bound for ordinate passed to Bounded_2d_kde (optional). ax: Axes matplotlib axes on which to plot (optional). kwargs: additional arguments passed to plt.contour(). """ try: xs = xs.values.astype(float) ys = ys.values.astype(float) except AttributeError: pass if all(~isfinite(xs)) or all(~isfinite(ys)): return None try: len(levels) f = 1 - np.array(levels) except TypeError: f = linspace(0, 1, levels+2)[1:-1] if kwargs.get('auto_bound', False): kwargs['xlow'] = min(xs) kwargs['xhigh'] = max(xs) kwargs['ylow'] = min(ys) kwargs['yhigh'] = max(ys) kde_kws = {k: kwargs.pop(k, None) for k in ['xlow', 'xhigh', 'ylow', 'yhigh']} k = Bounded_2d_kde(np.column_stack((xs, ys)), **kde_kws) size = max(10*(len(f)+2), 500) c = np.random.choice(len(xs), size=size) p = k(np.column_stack((xs[c], ys[c]))) i = argsort(p) l = array([p[i[int(round(ff*len(i)))]] for ff in f]) Dx = np.percentile(xs, 99) - np.percentile(xs, 1) Dy = np.percentile(ys, 99) - np.percentile(ys, 1) x = linspace(np.percentile(xs, 1)-0.1*Dx, np.percentile(xs, 99)+0.1*Dx, 128) y = linspace(np.percentile(ys, 1)-0.1*Dy, np.percentile(ys, 99)+0.1*Dy, 128) XS, YS = meshgrid(x, y, indexing='ij') ZS = k(np.column_stack((XS.flatten(), YS.flatten()))).reshape(XS.shape) ax = kwargs.pop('ax', gca()) kwargs['colors'] = kwargs.get('colors', [kwargs.pop('color', None),]) ax.contour(XS, YS, ZS, levels=l, **kwargs)
5,354,239
def test_gumbel_prob(): """ Test probability functions: passing value through construct. """ net = GumbelProb() value = Tensor([0.5, 1.0], dtype=dtype.float32) ans = net(value) assert isinstance(ans, Tensor)
5,354,240
def test_addtional_connections(create): """Test additional connections to assembly connection.""" conn1 = create(ConnectorItem) conn2 = create(ConnectorItem) conn3 = create(ConnectorItem) c1 = create(ComponentItem, UML.Component) c2 = create(ComponentItem, UML.Component) c3 = create(ComponentItem, UML.Component) iface = create(InterfaceItem, UML.Interface) iface.folded = Folded.ASSEMBLY # provide and require interface by components provide(c1.subject, iface.subject) require(c2.subject, iface.subject) require(c3.subject, iface.subject) # connect components connect(conn1, conn1.head, c1) connect(conn2, conn2.head, c2) connect(conn3, conn3.head, c3) # create assembly connect(conn1, conn1.tail, iface) connect(conn2, conn2.tail, iface) # test precondition assert conn1.subject and conn2.subject # additional connection connect(conn3, conn3.tail, iface) # test UML data model assert conn3.subject is conn1.subject assembly = conn1.subject assert 3 == len(assembly.end)
5,354,241
def normalization_reg_loss(input): """ input: [..., 3] It computes the length of each vector and uses the L2 loss between the lengths and 1. """ lengths = (input ** 2).sum(dim=-1).sqrt() loss_norm_reg = ((lengths - 1) ** 2).mean() return loss_norm_reg
5,354,242
def _get_window_size(offset, step_size, image_size): """ Calculate window width or height. Usually same as block size, except when at the end of image and only a fracture of block size remains :param offset: start columns/ row :param step_size: block width/ height :param image_size: image width/ height :return: window width/ height """ if offset + step_size > image_size: return image_size - offset else: return step_size
5,354,243
def osc_server(ip=ip_osc_server, port=port_server): """ sets up and runs the OSC server. """ dispatch = dispatcher.Dispatcher() """ dispatch.map("/surface-sentiments", surface_handler) dispatch.map("/reset", reset_handler) dispatch.map("/silent", silent_handler) """ dispatch.map("/surface-sentiments", surface_handler) dispatch.map("/reset", reset_handler) dispatch.map("/silent", silent_handler) dispatch.map("/answer", answer_handler) dispatch.map("/refresh", refresh_handler) dispatch.map("/talking", talking_handler) dispatch.map("/end", end_handler) dispatch.map("/question", question_handler) dispatch.map("/thinking", thinking_handler) dispatch.map("/startsurface", surfacestart_handler) dispatch.map("/closesurface", surfacestop_handler) dispatch.map("/resetsurface", surfacereset_handler) dispatch.map("/newstate", new_state_handler) server = pythonosc.osc_server.ThreadingOSCUDPServer( (ip, port), dispatch) logger.info("Serving on {}".format(server.server_address)) server.serve_forever()
5,354,244
def _ListCtrl_IsSelected(self, idx): """ Returns ``True`` if the item is selected. """ return (self.GetItemState(idx, wx.LIST_STATE_SELECTED) & wx.LIST_STATE_SELECTED) != 0
5,354,245
def sturm_liouville_function(x, y, p, p_x, q, f, alpha=0, nonlinear_exp=2): """Second order Sturm-Liouville Function defining y'' for Lu=f. This form is used because it is expected for Scipy's solve_ivp method. Keyword arguments: x -- independent variable y -- dependent variable p -- p(x) parameter p_x -- derivative of p_x wrt x q -- q(x) parameter f -- forcing function f(x) alpha -- nonlinear parameter nonlinear_exp -- exponent of nonlinear term """ y_x = y[1] y_xx = -1*(p_x/p)*y[1] + (q/p)*y[0] + (q/p)*alpha*y[0]**nonlinear_exp - f/p return [y_x, y_xx]
5,354,246
def bn_update(loader, model, verbose=False, subset=None, **kwargs): """ BatchNorm buffers update (if any). Performs 1 epochs to estimate buffers average using train dataset. :param loader: train dataset loader for buffers average estimation. :param model: model being update :return: None """ if not check_bn(model): return model.train() device = model.model_device momenta = {} model.apply(reset_bn) model.apply(lambda module: _get_momenta(module, momenta)) n = 0 num_batches = len(loader) with torch.no_grad(): if subset is not None: num_batches = int(num_batches * subset) loader = itertools.islice(loader, num_batches) if verbose: loader = tqdm.tqdm(loader, total=num_batches) for input, _ in loader: if device != 'cpu': input = input.cuda(non_blocking=True) input_var = torch.autograd.Variable(input) b = input_var.data.size(0) momentum = b / (n + b) for module in momenta.keys(): module.momentum = momentum model(input_var, **kwargs) n += b model.apply(lambda module: _set_momenta(module, momenta))
5,354,247
def StopMasterDaemons(): """Stop the master daemons on this node. Stop the master daemons (ganeti-masterd and ganeti-rapi) on this node. @rtype: None """ # TODO: log and report back to the caller the error failures; we # need to decide in which case we fail the RPC for this result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop-master"]) if result.failed: logging.error("Could not stop Ganeti master, command %s had exitcode %s" " and error %s", result.cmd, result.exit_code, result.output)
5,354,248
def write_detected_issue_summaries(document, speech_segments): """ Scans the speech segments for any detected issues, and if there are any then a new table is added to the document. :param document: Word document structure to write the table into :param speech_segments: Call transcript structures """ # Scan through the segments and extract the issues issues_detected = [] for turn in speech_segments: for issue in turn.segmentIssuesDetected: new_issue = {"Speaker": turn.segmentSpeaker} new_issue["Timestamp"] = turn.segmentStartTime new_issue["Text"] = turn.segmentText[issue["Begin"]:issue["End"]] # May need a prefix or suffix for partial text if issue["Begin"] > 0: new_issue["Text"] = "..." + new_issue["Text"] if issue["End"] < len(turn.segmentText): new_issue["Text"] = new_issue["Text"] + "..." issues_detected.append(new_issue) if issues_detected: # Start with a new single-column section document.add_section(WD_SECTION.CONTINUOUS) section_ptr = document.sections[-1]._sectPr cols = section_ptr.xpath('./w:cols')[0] cols.set(qn('w:num'), '1') write_custom_text_header(document, "Issues Detected") # Table header information table = document.add_table(rows=1, cols=3) table.style = document.styles[TABLE_STYLE_STANDARD] hdr_cells = table.rows[0].cells hdr_cells[0].text = "Speaker" hdr_cells[1].text = "Turn Time" hdr_cells[2].text = "Detected Text" # Output each row for issue in issues_detected: # First column is the speaker row_cells = table.add_row().cells row_cells[0].text = issue["Speaker"] row_cells[1].text = convert_timestamp(issue["Timestamp"]) row_cells[2].text = issue["Text"] # Formatting transcript table widths widths = (Cm(2.2), Cm(2.2), Cm(12.8)) for row in table.rows: for idx, width in enumerate(widths): row.cells[idx].width = width # Finish with some spacing document.add_paragraph()
5,354,249
def item_view_mouse_click(item_view, row, column=0, button=QtCore.Qt.LeftButton, modifier=QtCore.Qt.NoModifier): """ Helper method version of 'item_view_index_mouse_click' using a row, column instead of a QModelIndex item_view: The QAbstractItemView instance row: The requested row index column: The requested column index (defaults to 0 in case of single column) """ index = get_item_view_index(item_view, row, column) item_view_index_mouse_click(item_view, index, button, modifier)
5,354,250
def logged(func): """Pipes exceptions through root logger""" @wraps(func) def deco(*args, **kwargs): try: result = func(*args, **kwargs) except Exception as e: logging.exception(f"{func.__name__}:\n{e}") print("Exception logged to {LOGFILE}") sys.exit(1) return result return deco
5,354,251
def hamming_set(index: str, d: int = 1, include_N: bool = True): """Given an index of bases in {ACGTN}, generate all indexes within hamming distance d of the input :param index: string representing the index sequence :param d: maximum distance to allow :param include_N: include N when generating possible indexes :return: set of indexes within hamming distance d """ base_d = {"A": 0, "C": 1, "G": 2, "T": 3, "N": 4} new_base = [i * np.eye(len(index), dtype=np.uint8) for i in range(4 + include_N)] other_bases = 1 - np.eye(len(index), dtype=np.uint8) h_set = {tuple(base_d[c] for c in index)} for _ in range(d): for a in list(map(np.array, h_set)): h_set.update(t for i in new_base for t in map(tuple, a * other_bases + i)) h_set = {"".join("ACGTN"[i] for i in h) for h in h_set} return h_set
5,354,252
def _get_epochs_info(raw_fname): """Get epoch info.""" from mne import read_epochs epochs = read_epochs(raw_fname) return epochs.info
5,354,253
def _load_specs(ctx): """ Helper function to find all specs stored in _WAF_/specs/*.json """ if hasattr(ctx, 'loaded_specs_dict'): return ctx.loaded_specs_dict = {} spec_file_folder = ctx.root.make_node(Context.launch_dir).make_node('/_WAF_/specs') spec_files = spec_file_folder.ant_glob('**/*.json') for file in spec_files: try: spec = ctx.parse_json_file(file) spec_name = str(file).split('.')[0] ctx.loaded_specs_dict[spec_name] = spec except Exception as e: ctx.cry_file_error(str(e), file.abspath()) # For any enabled game project, see if it has a WAFSpec subfolder as well enabled_game_projects_list = split_comma_delimited_string(ctx.options.enabled_game_projects, True) for project in enabled_game_projects_list: game_project_spec = ctx.path.make_node(project).make_node('WAFSpec').make_node('{}.json'.format(project)) if os.path.exists(game_project_spec.abspath()): try: spec = ctx.parse_json_file(game_project_spec) if project in ctx.loaded_specs_dict: Logs.warn("[WARN] Game project WAF spec '{}' in game project folder '{}' collides with a spec in _WAF_/specs. " "Overriding the _WAF_/specs version with the one in the game project" .format(project, project)) ctx.loaded_specs_dict[project] = spec except Exception as e: ctx.cry_file_error(str(e), game_project_spec.abspath())
5,354,254
def test_bounding_rect_of_boxes(mock_gui): """Test that calculating the bounding rect of a set of Boxes works correctly.""" boxes = [] for i in range(1, 4): box = make_dummy_box() box.position = 100 * i, 100 * i boxes.append(box) rect = bounding_rect_of_rects((box.world_rect for box in boxes)) assert rect == cocos.rect.Rect(100, 100, 300, 300)
5,354,255
def is_private(key): """ Returns whether or not an attribute is private. A private attribute looks like: __private_attribute__. :param key: The attribute key :return: bool """ return key.startswith("__") and key.endswith("__")
5,354,256
async def img(filename) -> Response: """Image static endpoint.""" return await send_from_directory("img", filename)
5,354,257
def ref_genome_info(info, config, dirs): """Retrieve reference genome information from configuration variables. """ genome_build = info.get("genome_build", None) (_, sam_ref) = get_genome_ref(genome_build, config["algorithm"]["aligner"], dirs["galaxy"]) return genome_build, sam_ref
5,354,258
def load_text(file_arg): """ General function used to load data from a text file """ file_handle = validate_file_for_reading(file_arg) try: df = pd.io.parsers.read_csv(file_handle,delim_whitespace=True,\ comment='#', skip_blank_lines=True, engine='c') except: raise SortSeqError(\ 'Could not interpret text file %s as dataframe.'%repr(file_handle)) return df.dropna(axis=0, how='all')
5,354,259
def entropy_from_mnemonic(mnemonic: Mnemonic, lang: str = "en") -> BinStr: """Convert mnemonic sentence to Electrum versioned entropy.""" # verify that it is a valid Electrum mnemonic sentence _ = version_from_mnemonic(mnemonic) indexes = _indexes_from_mnemonic(mnemonic, lang) entropy = _entropy_from_indexes(indexes, lang) return entropy
5,354,260
def main(argv): """Delete/split/merge/... labels in a labelvolume.""" parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser = parse.parse_remap_labels(parser) parser = parse.parse_common(parser) args = parser.parse_args() remap_labels( args.inputfile, args.delete_labels, args.delete_files, args.except_files, args.merge_labels, args.merge_files, args.split_labels, args.split_files, args.aux_labelvolume, args.min_labelsize, args.min_segmentsize, args.keep_only_largest, args.conncomp, args.nifti_output, args.nifti_transpose, args.outputfile, args.save_steps, args.protective, )
5,354,261
def delete_data_analysis(analysis_id: UUID, token: HTTPAuthorizationCredentials = Depends(auth)): """ Delete a data analysis record. You may only delete records in your private space, or that are associated with a collab of which you are an administrator. """ return delete_computation(omcmp.DataAnalysis, analysis_id, token)
5,354,262
def get_current_blk_file(current_file_number) -> str: """ Returns the current blk file name with file format. """ return get_current_file_name(blk_file_format(current_file_number))
5,354,263
def run(vhd_list, output_file): """Runs this backend.""" for vhd in vhd_list.order: output_file.write('%s %s %04d %s\n' % ( 'top' if vhd in vhd_list.top else 'dep', vhd.lib, vhd.version, vhd.fname))
5,354,264
def exec_cmd_status(ceph_installer, commands): """ Execute command Args: ceph_installer: installer object to exec cmd commands: list of commands to be executed Returns: Boolean """ for cmd in commands: out, err = ceph_installer.exec_command(sudo=True, cmd=cmd) out, err = out.read().decode().strip(), err.read().decode().strip() logger.info("Command Response : {} {}".format(out, err)) return True
5,354,265
def BuildIsAvailable(bucket_name, remote_path): """Checks whether a build is currently archived at some place.""" logging.info('Checking existance: gs://%s/%s' % (bucket_name, remote_path)) try: exists = cloud_storage.Exists(bucket_name, remote_path) logging.info('Exists? %s' % exists) return exists except cloud_storage.CloudStorageError: return False
5,354,266
def _channel_name(row, prefix="", suffix=""): """Formats a usable name for the repeater.""" length = 16 - len(prefix) name = prefix + " ".join((row["CALL"], row["CITY"]))[:length] if suffix: length = 16 - len(suffix) name = ("{:%d.%d}" % (length, length)).format(name) + suffix return name
5,354,267
def analyze1127_and_output(ps: List[List[RunningParticle]], gantry_number: int, total_particle_number: int, momentum_dispersion_list: List[float]) -> None: """ 分析运行结果,并写到 output.txt 中 类似 1 2.9106590546670255 3.9272244111035284 1.9234584254384846 0.45806934921638964 Parameters ---------- ps 运行后的所有粒子 gantry_number 机架数目 / 组数 total_particle_number 总粒子数 Returns 无 ------- """ if gantry_number != len(ps): raise ValueError(f"数据错误,gantry_number{gantry_number}!=len(ps){len(ps)}") if int(total_particle_number) != int(len(ps[0]) * gantry_number): raise ValueError( f"数据错误,total_particle_number({total_particle_number})!=len(ps[0])*gantry_number({len(ps[0]) * gantry_number})") particle_number_per_plane_per_dp: int = total_particle_number // len(momentum_dispersion_list) // 2 // gantry_number result: List[List[float]] = [] # 对于每个机架 / 组 for gid in range(gantry_number): # 第一列存访机架编号 result_per_group: List[float] = [gid + 1] # 这组机架 track 得到的 List[RunningParticle] particle_group = ps[gid] # 转到 pp pp_group = PhaseSpaceParticle.create_from_running_particles( IP_ISOC, IP_ISOC.get_natural_coordinate_system(), particle_group) # 映射到 x p 平面 ppx_track = PhaseSpaceParticle.phase_space_particles_project_to_xxp_plane(pp_group) ppy_track = PhaseSpaceParticle.phase_space_particles_project_to_yyp_plane(pp_group) # 对于 x 平面,List[RunningParticle]中前一半 statistic = Statistic() outer_statistic = Statistic() # 对于 x 平面 for i in range(0, particle_number_per_plane_per_dp * len(momentum_dispersion_list), particle_number_per_plane_per_dp): for j in range(0, particle_number_per_plane_per_dp): pt = ppx_track[i + j] statistic.add(pt[0]) outer_statistic.add((statistic.max() - statistic.min()) / MM / 2.0) statistic.clear() # y 平面 for i in range(particle_number_per_plane_per_dp * len(momentum_dispersion_list), particle_number_per_plane_per_dp * len(momentum_dispersion_list) * 2, particle_number_per_plane_per_dp): for j in range(0, particle_number_per_plane_per_dp): pt = ppy_track[i + j] statistic.add(pt[0]) outer_statistic.add((statistic.max() - statistic.min()) / MM / 2.0) statistic.clear() result_per_group.append(outer_statistic.var()) result_per_group.append(outer_statistic.max()) outer_statistic.clear() result.append(result_per_group) np.savetxt('output.txt', np.array(result))
5,354,268
def wordsinunit(unit): """Counts the words in the unit's source and target, taking plurals into account. The target words are only counted if the unit is translated.""" (sourcewords, targetwords) = (0, 0) if isinstance(unit.source, multistring): sourcestrings = unit.source.strings else: sourcestrings = [unit.source or ""] for s in sourcestrings: sourcewords += wordcount(s) if not unit.istranslated(): return sourcewords, targetwords if isinstance(unit.target, multistring): targetstrings = unit.target.strings else: targetstrings = [unit.target or ""] for s in targetstrings: targetwords += wordcount(s) return sourcewords, targetwords
5,354,269
def check_instance(arg, types, allow_none=False, message='Argument "%(string)s" is not of type %(expected)s, but of type %(actual)s', level=1): """ >>> check_instance(1, int) 1 >>> check_instance(3.5, float) 3.5 >>> check_instance('hello', str) 'hello' >>> check_instance([1, 2, 3], list) [1, 2, 3] >>> check_instance(1, (int, float)) 1 >>> check_instance(3.5, (int, float)) 3.5 >>> check_instance('hello', (str, list)) 'hello' >>> check_instance([1, 2, 3], (str, list)) [1, 2, 3] >>> check_instance(1, float) Traceback (most recent call last): ... AssertionError: Argument "1" is not of type <class 'float'>, but of type <class 'int'> >>> check_instance(3.5, int) Traceback (most recent call last): ... AssertionError: Argument "3.5" is not of type <class 'int'>, but of type <class 'float'> >>> check_instance('hello', list) Traceback (most recent call last): ... AssertionError: Argument "hello" is not of type <class 'list'>, but of type <class 'str'> >>> check_instance([1, 2, 3], str) Traceback (most recent call last): ... AssertionError: Argument "[1, 2, 3]" is not of type <class 'str'>, but of type <class 'list'> >>> check_instance(1, (list, str)) Traceback (most recent call last): ... AssertionError: Argument "1" is not of type (<class 'list'>, <class 'str'>), but of type <class 'int'> >>> check_instance(3.5, (list, str)) Traceback (most recent call last): ... AssertionError: Argument "3.5" is not of type (<class 'list'>, <class 'str'>), but of type <class 'float'> >>> check_instance('hello', (int, float)) Traceback (most recent call last): ... AssertionError: Argument "hello" is not of type (<class 'int'>, <class 'float'>), but of type <class 'str'> >>> check_instance([1, 2, 3], (int, float)) Traceback (most recent call last): ... AssertionError: Argument "[1, 2, 3]" is not of type (<class 'int'>, <class 'float'>), but of type <class 'list'> >>> check_instance(None, int) Traceback (most recent call last): ... AssertionError: Argument "None" is not of type <class 'int'>, but of type <class 'NoneType'> >>> check_instance(None, float) Traceback (most recent call last): ... AssertionError: Argument "None" is not of type <class 'float'>, but of type <class 'NoneType'> >>> check_instance(None, str) Traceback (most recent call last): ... AssertionError: Argument "None" is not of type <class 'str'>, but of type <class 'NoneType'> >>> check_instance(None, list) Traceback (most recent call last): ... AssertionError: Argument "None" is not of type <class 'list'>, but of type <class 'NoneType'> >>> check_instance(None, (int, float)) Traceback (most recent call last): ... AssertionError: Argument "None" is not of type (<class 'int'>, <class 'float'>), but of type <class 'NoneType'> >>> check_instance(None, (str, list)) Traceback (most recent call last): ... AssertionError: Argument "None" is not of type (<class 'str'>, <class 'list'>), but of type <class 'NoneType'> >>> check_instance(1, int, allow_none=True) 1 >>> check_instance(3.5, float, allow_none=True) 3.5 >>> check_instance('hello', str, allow_none=True) 'hello' >>> check_instance([1, 2, 3], list, allow_none=True) [1, 2, 3] >>> check_instance(1, (int, float), allow_none=True) 1 >>> check_instance(3.5, (int, float), allow_none=True) 3.5 >>> check_instance('hello', (str, list), allow_none=True) 'hello' >>> check_instance([1, 2, 3], (str, list), allow_none=True) [1, 2, 3] >>> check_instance(1, float, allow_none=True) Traceback (most recent call last): ... AssertionError: Argument "1" is not of type <class 'float'>, but of type <class 'int'> >>> check_instance(3.5, int, allow_none=True) Traceback (most recent call last): ... AssertionError: Argument "3.5" is not of type <class 'int'>, but of type <class 'float'> >>> check_instance('hello', list, allow_none=True) Traceback (most recent call last): ... AssertionError: Argument "hello" is not of type <class 'list'>, but of type <class 'str'> >>> check_instance([1, 2, 3], str, allow_none=True) Traceback (most recent call last): ... AssertionError: Argument "[1, 2, 3]" is not of type <class 'str'>, but of type <class 'list'> >>> check_instance(1, (list, str), allow_none=True) Traceback (most recent call last): ... AssertionError: Argument "1" is not of type (<class 'list'>, <class 'str'>), but of type <class 'int'> >>> check_instance(3.5, (list, str), allow_none=True) Traceback (most recent call last): ... AssertionError: Argument "3.5" is not of type (<class 'list'>, <class 'str'>), but of type <class 'float'> >>> check_instance('hello', (int, float), allow_none=True) Traceback (most recent call last): ... AssertionError: Argument "hello" is not of type (<class 'int'>, <class 'float'>), but of type <class 'str'> >>> check_instance([1, 2, 3], (int, float), allow_none=True) Traceback (most recent call last): ... AssertionError: Argument "[1, 2, 3]" is not of type (<class 'int'>, <class 'float'>), but of type <class 'list'> >>> check_instance(None, int, allow_none=True) >>> check_instance(None, float, allow_none=True) >>> check_instance(None, str, allow_none=True) >>> check_instance(None, list, allow_none=True) >>> check_instance(None, (int, float), allow_none=True) >>> check_instance(None, (int, float), allow_none=True) >>> check_instance(None, (str, list), allow_none=True) >>> check_instance(None, (str, list), allow_none=True) """ check(is_instance(arg, types, allow_none), lambda: message % {'string': str(arg), 'actual': type(arg), 'expected': types}, level) return arg
5,354,270
def spec_lnlike(params, labels, grid_param_list, lbda_obs, spec_obs, err_obs, dist, model_grid=None, model_reader=None, em_lines={}, em_grid={}, dlbda_obs=None, instru_corr=None, instru_fwhm=None, instru_idx=None, filter_reader=None, AV_bef_bb=False, units_obs='si', units_mod='si', interp_order=1): """ Define the likelihood log-function. Parameters ---------- params : tuple Set of models parameters for which the model grid has to be interpolated. labels: Tuple of strings Tuple of labels in the same order as initial_state, that is: - first all parameters related to loaded models (e.g. 'Teff', 'logg') - then the planet photometric radius 'R', in Jupiter radius - (optionally) the flux of emission lines (labels should match those in the em_lines dictionary), in units of the model spectrum (times mu) - (optionally) the optical extinction 'Av', in mag - (optionally) the ratio of total to selective optical extinction 'Rv' - (optionally) 'Tbb1', 'Rbb1', 'Tbb2', 'Rbb2', etc. for each extra bb contribution. grid_param_list : list of 1d numpy arrays/lists OR None - If list, should contain list/numpy 1d arrays with available grid of model parameters. - Set to None for a pure n-blackbody fit, n=1,2,... - Note1: model grids should not contain grids on radius and Av, but these should still be passed in initial_state (Av optional). - Note2: for a combined grid model + black body, just provide the grid parameter list here, and provide values for 'Tbbn' and 'Rbbn' in initial_state, labels and bounds. lbda_obs : numpy 1d ndarray or list Wavelength of observed spectrum. If several instruments, should be ordered per instrument, not necessarily as monotonically increasing wavelength. Hereafter, n_ch = len(lbda_obs). spec_obs : numpy 1d ndarray or list Observed spectrum for each value of lbda_obs. err_obs : numpy 1d/2d ndarray or list Uncertainties on the observed spectrum. If 2d array, should be [2,n_ch] where the first (resp. second) column corresponds to lower (upper) uncertainty, and n_ch is the length of lbda_obs and spec_obs. dist : float Distance in parsec, used for flux scaling of the models. model_grid : numpy N-d array, optional If provided, should contain the grid of model spectra for each free parameter of the given grid. I.e. for a grid of n_T values of Teff and n_g values of Logg, the numpy array should be n_T x n_g x n_ch x 2, where n_ch is the number of wavelengths for the observed spectrum, and the last 2 dims are for wavelength and fluxes respectively. If provided, takes precedence over model_name/model_reader. model_reader : python routine, opt External routine that reads a model file and returns a 2D numpy array, where the first column corresponds to wavelengths, and the second contains model values. See example routine in model_interpolation() description. em_lines: dictionary, opt Dictionary of emission lines to be added on top of the model spectrum. Each dict entry should be the name of the line, assigned to a tuple of 4 values: 1) the wavelength (in mu); 2) a string indicating whether line intensity is expressed in flux ('F'), luminosity ('L') or log(L/LSun) ("LogL"); 3) the FWHM of the gaussian (or None if to be set automatically); 4) whether the FWHM is expressed in 'nm', 'mu' or 'km/s'. The third and fourth can also be set to None. In that case, the FWHM of the gaussian will automatically be set to the equivalent width of the line, calculated from the flux to be injected and the continuum level (measured in the grid model to which the line is injected). Examples: em_lines = {'BrG':(2.1667,'F',None, None)}; em_lines = {'BrG':(2.1667,'LogL', 100, 'km/s')} em_grid: dictionary pointing to lists, opt Dictionary where each entry corresponds to an emission line and points to a list of values to inject for emission line fluxes. For computation efficiency, interpolation will be performed between the points of this grid during the MCMC sampling. Dict entries should match labels and em_lines. dlbda_obs: numpy 1d ndarray or list, optional Spectral channel width for the observed spectrum. It should be provided IF one wants to weigh each point based on the spectral resolution of the respective instruments (as in Olofsson et al. 2016). instru_corr : numpy 2d ndarray or list, optional Spectral correlation throughout post-processed images in which the spectrum is measured. It is specific to the combination of instrument, algorithm and radial separation of the companion from the central star. Can be computed using distances.spectral_correlation(). In case of a spectrum obtained with different instruments, build it with distances.combine_corrs(). If not provided, it will consider the uncertainties in each spectral channels are independent. See Greco & Brandt (2017) for details. instru_fwhm : float or list, optional The instrumental spectral fwhm provided in nm. This is used to convolve the model spectrum. If several instruments are used, provide a list of instru_fwhm values, one for each instrument whose spectral resolution is coarser than the model - including broad band filter FWHM if relevant. instru_idx: numpy 1d array, optional 1d array containing an index representing each instrument used to obtain the spectrum, label them from 0 to n_instru. Zero for points that don't correspond to any instru_fwhm provided above, and i in [1,n_instru] for points associated to instru_fwhm[i-1]. This parameter must be provided if the spectrum consists of points obtained with different instruments. filter_reader: python routine, optional External routine that reads a filter file and returns a 2D numpy array, where the first column corresponds to wavelengths, and the second contains transmission values. Important: if not provided, but strings are detected in instru_fwhm, the default format assumed for the files: - first row containing header - starting from 2nd row: 1st column: WL in mu, 2nd column: transmission Note: files should all have the same format and wavelength units. AV_bef_bb: bool, optional If both extinction and an extra bb component are free parameters, whether to apply extinction before adding the BB component (e.g. extinction mostly from circumplanetary dust) or after the BB component (e.g. mostly insterstellar extinction). units_obs : str, opt {'si','cgs','jy'} Units of observed spectrum. 'si' for W/m^2/mu; 'cgs' for ergs/s/cm^2/mu or 'jy' for janskys. units_mod: str, opt {'si','cgs','jy'} Units of the model. 'si' for W/m^2/mu; 'cgs' for ergs/s/cm^2/mu or 'jy' for janskys. If different to units_obs, the spectrum units will be converted. interp_order: int, opt, {-1,0,1} Interpolation mode for model interpolation. -1: log interpolation (i.e. linear interpolatlion on log(Flux)) 0: nearest neighbour model. 1: Order 1 spline interpolation. Returns ------- out: float The log of the likelihood. """ if grid_param_list is not None: if model_grid is None and model_reader is None: msg = "model_name and model_reader must be provided" raise TypeError(msg) lbda_mod, spec_mod = make_model_from_params(params, labels, grid_param_list, dist, lbda_obs, model_grid, model_reader, em_lines, em_grid, dlbda_obs, instru_fwhm, instru_idx, filter_reader, AV_bef_bb, units_obs, units_mod, interp_order) # evaluate the goodness of fit indicator chi = goodness_of_fit(lbda_obs, spec_obs, err_obs, lbda_mod, spec_mod, dlbda_obs=dlbda_obs, instru_corr=instru_corr, instru_fwhm=instru_fwhm, instru_idx=instru_idx, filter_reader=filter_reader, plot=False, outfile=None) # log likelihood lnlikelihood = -0.5 * chi return lnlikelihood
5,354,271
def generic_list(request): """Returns a list of all of the document IDs in the matched DocStore.""" return umbrella_from_request(request).get_doc_ids()
5,354,272
def japan_results(request): """ view function returns template that displays New York-specific photos """ images = Image.filter_images_by_location(location_id=12) return render(request, "all_pictures/japan.html", {"images":images})
5,354,273
def add_yaml_literal_block(yaml_object): """ Get a yaml literal block representer function to convert normal strings into yaml literals during yaml dumping Convert string to yaml literal block yaml docs: see "Block mappings" in https://pyyaml.org/wiki/PyYAMLDocumentation """ def literal_str_representer(dumper, data): return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|") return yaml_object.add_representer(literal_block, literal_str_representer)
5,354,274
def _spans_to_array( doc: Doc, sources: List[str], label2idx: Dict[str, int], labels_without_prefix: Set[str], prefixes: Optional[Set[str]] = None, warn_missing_labels: bool = False ) -> np.ndarray: """Convert the annotations of a spacy document into a 2D array. Each row corresponds to a token, and each column to a labelling source. In other words, the value at (i,j) represents the prediction of source j for token i. This prediction is expressed as the index of the label in the labels. NB: - Sources should be a list of labelling sources. If empty, all sources are employed. - If `prefixes` are provided (e.g., [I, B, L]), it is assumed that the labels in `label2idx` contain the prefixes (e.g., I-PERSON, B-PERSON). - If `prefixes` are not provided, it is assumed that the labels in `label2idx` do not contain prefixes (e.g, PERSON). - We also assume the O is label is at position 0. """ if sources is None: sources = list(doc.spans.keys()) if warn_missing_labels: missing_labels = set() # Creating the numpy array itself data = np.zeros((len(doc), len(sources)), dtype=np.int16) for source_index, source in enumerate(sources): for span in doc.spans.get(source, []): if span.label_ not in labels_without_prefix: if warn_missing_labels: missing_labels.add(span.label_) continue if prefixes is None: # Do not use prefix labels (e.g., use PER instead of # B-PER, I-PER, etc.) data[span.start:span.end, source_index] = label2idx[ span.label_ ] else: # If the span is a single token, we can use U if "U" in prefixes and len(span) == 1: data[span.start, source_index] = label2idx[ "U-%s" % span.label_ ] continue # Otherwise, we use B, I and L if "B" in prefixes: data[span.start, source_index] = label2idx[ "B-%s" % span.label_ ] if "I" in prefixes: start_i = (span.start+1) if "B" in prefixes else span.start end_i = (span.end-1) if "L" in prefixes else span.end data[start_i:end_i, source_index] = label2idx[ "I-%s" % span.label_ ] if "L" in prefixes: data[span.end-1, source_index] = label2idx[ "L-%s" % span.label_ ] if warn_missing_labels: print( "WARNING: \ Span labels were found in the dataset that were not provided \ in `labels_without_prefices`: {}".format(missing_labels) ) return data
5,354,275
def d_out_dist_cooler(P_mass, rho_dist_cool, w_drift): """ Calculates the tube's diameter of out distilliat from distilliat cooler to distilliat volume. Parameters ---------- P_mass : float The mass flow rate of distilliat, [kg/s] rho_dist_cool : float The density of liquid at cooling temperature, [kg/m**3] w_drift :float The speed of steam at the tube, [m/s] Returns ------- d_out_dist_cooler : float The tube's diameter of out distilliat from distilliat cooler to distilliat volume, [m] References ---------- &&& """ return P_mass/(0,785*rho_dist_cool*w_drift)
5,354,276
def is_port_in_use(port): """ test if a port is being used or is free to use. :param port: :return: """ import socket with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: return s.connect_ex(('localhost', port)) == 0
5,354,277
def redirect_output_to_logger(logger): """Context manager which redirects stdout and stderr to a logger""" orig_stdout = sys.stdout orig_stderr = sys.stderr sys.stdout = StreamToLog(logger, logging.INFO) sys.stderr = StreamToLog(logger, logging.WARNING) try: yield finally: sys.stdout = orig_stdout sys.stderr = orig_stderr
5,354,278
def create_variables_eagerly(getter, initial_value, **kwargs): """Attempts to force variable creation to be eager.""" eager_initial_value = None if isinstance(initial_value, tf.Tensor): if _is_eager_tensor(initial_value): eager_initial_value = initial_value else: # Try to compute the static value (e.g. if the user used `tf.ones`). eager_initial_value = tf.get_static_value(initial_value) if eager_initial_value is not None: # If we have an eager initial value we can create variables in eager mode. with tf.init_scope(): return getter(initial_value=eager_initial_value, **kwargs) else: # Fall back to creating in whatever context we're in with user input. return getter(initial_value=initial_value, **kwargs)
5,354,279
def _Run(vm): """See base method. Args: vm: The vm to run the benchmark on. Returns: A list of sample.Sample objects. """ # Make changes e.g. compiler flags to spec config file. if 'gcc' in FLAGS.runspec_config: _OverwriteGccO3(vm) # swap only if necessary; free local node memory and avoid remote memory; # reset caches; set stack size to unlimited # Also consider setting enable_transparent_hugepages flag to true cmd = ('echo 1 | sudo tee /proc/sys/vm/swappiness && ' 'echo 1 | sudo tee /proc/sys/vm/zone_reclaim_mode && ' 'sync ; echo 3 | sudo tee /proc/sys/vm/drop_caches && ' 'ulimit -s unlimited && ') cmd += 'runcpu ' if FLAGS.spec17_build_only: cmd += '--action build ' if FLAGS.spec17_rebuild: cmd += '--rebuild ' version_specific_parameters = [] # rate runs require 2 GB minimum system main memory per copy, # not including os overhead. Refer to: # https://www.spec.org/cpu2017/Docs/system-requirements.html#memory copies = min(vm.NumCpusForBenchmark(), vm.total_free_memory_kb // (2 * KB_TO_GB_MULTIPLIER)) version_specific_parameters.append(' --copies=%s ' % (FLAGS.spec17_copies or copies)) version_specific_parameters.append( ' --threads=%s ' % (FLAGS.spec17_threads or vm.NumCpusForBenchmark())) if FLAGS.spec17_fdo: version_specific_parameters.append('--feedback ') vm.RemoteCommand('cd /scratch/cpu2017; mkdir fdo_profiles') start_time = time.time() stdout, _ = speccpu.Run(vm, cmd, ' '.join(FLAGS.spec17_subset), version_specific_parameters) if FLAGS.spec17_build_only: if 'Error' in stdout and 'Please review this file' in stdout: raise errors.Benchmarks.RunError('Error during SPEC compilation.') return [ sample.Sample( 'compilation_time', time.time() - start_time, 's', { 'spec17_subset': FLAGS.spec17_subset, 'gcc_version': build_tools.GetVersion(vm, 'gcc') }) ] partial_results = True # Do not allow partial results if any benchmark subset is a full suite. for benchmark_subset in FLAGS.benchmark_subset: if benchmark_subset in ['intspeed', 'fpspeed', 'intrate', 'fprate']: partial_results = False log_files = set() for test in FLAGS.spec17_subset: if test in LOG_FILENAME: log_files.add(LOG_FILENAME[test]) else: if test in INTSPEED_SUITE: log_files.add(LOG_FILENAME['intspeed']) elif test in INTRATE_SUITE: log_files.add(LOG_FILENAME['intrate']) elif test in FPSPEED_SUITE: log_files.add(LOG_FILENAME['fpspeed']) elif test in FPRATE_SUITE: log_files.add(LOG_FILENAME['fprate']) for log_file in log_files: vm.RemoteCommand( f'cp {vm.GetScratchDir()}/cpu2017/result/{log_file} ~/{log_file}.log') vm.PullFile(vm_util.GetTempDir(), f'~/{log_file}.log') samples = speccpu.ParseOutput(vm, log_files, partial_results, None) for item in samples: item.metadata['vm_name'] = vm.name item.metadata['spec17_gcc_flags'] = FLAGS.spec17_gcc_flags return samples
5,354,280
def mixin_method(ufunc, rhs=None, transpose=True): """Decorator to register a mixin class method Using this decorator ensures that derived classes that are declared with the `mixin_class` decorator will also have the behaviors that this class has. ufunc : numpy.ufunc A universal function (or NEP18 callable) that is hooked in awkward1, i.e. it can be the first argument of a behavior rhs : Set[type] or None List of right-hand side argument types (leave None if unary function) The left-hand side is expected to always be ``self`` of the parent class If the function is not unary or binary, call for help :) transpose : bool Autmatically create a transpose signature (only makes sense for binary ufuncs) """ def register(method): if not isinstance(rhs, (set, type(None))): raise ValueError("Expected a set of right-hand-side argument types") if transpose and rhs is not None: def transposed(left, right): return method(right, left) method._awkward_mixin = (ufunc, rhs, transposed) else: method._awkward_mixin = (ufunc, rhs, None) return method return register
5,354,281
def set_def_quick_print(setting): """ Set the global default (henceforth) behavior whether to quick print when stamping or stopping. Args: setting: Passed through bool(). Returns: bool: Implemented setting value. """ setting = bool(setting) SET['QP'] = setting return setting
5,354,282
def test_main_restore_cancel(builtin_input, builtin_print, parse_config, restore): """should cancel restore procedure when user cancels confirmation""" with nose.assert_raises(Exception): swb.main() restore.assert_not_called()
5,354,283
def _filter_mandatory_attributes(zeep_axl_factory_object): """Inspect the AXL schema and return a generator of an API endpoint's mandatory attributes. Intended use if for local validation prior to submitting an 'add' AXL request to reduce the cost of remote error responses from the AXL server. Note: EXPERIMENTAL ONLY. Inconsistencies noted for determinations on 'minOccurs' and 'nillable'. Suggested NOT to be used. :param zeep_axl_factory_object: zeep AXL object generated from a 'get_type' factory call :return: generator of mandatory axl elements """ for element in serialize_object(zeep_axl_factory_object).elements: # filter on minimum occurrence and no default value if element[1].min_occurs >= 1 \ and not element[1].is_optional \ and not element[1].nillable \ and not element[1].default: yield element[1]
5,354,284
def company_instance(): """ Mock Company instance """ with patch(PATCH_METHOD) as req: req.return_value = COMPANY_TABLE_DATA instance = get_company_instance() instance.set_company_id("uuid") instance.set_company_name("co") instance.set_company_external_id("external id") instance.save() yield instance
5,354,285
def periodize_cylinders(xi, xf, d): """A generator for each of the copies of the cylinder that we have to check for collisions.""" # of course the cylinder itself must be used, so we return that first in # case anybody only cares to do one, then they do the most important one yield xi, xf # now for each edge adjacent to the face that the tip of the line defining # the center of the cylinder is hitting, we can check if the cylinder # spills over onto the next face in that direction by just checking if the # points of closest approach from the cylinder to the edge are within r of # each other xip, xfp = get_true_xif_periodic(xi, xf) periodizers = periodizers_given_face[get_face_given_point(xip)] for periodizer in periodizers: yield periodizer+xip, periodizer+xfp
5,354,286
async def test_ignore_terminate_fail(conf, game): """Test exit if script stops""" cmd = ["bash", "-c", 'trap "" SIGTERM && sleep 20'] async with simsched.simsched(game, conf, cmd): # Wait for term to be captured await asyncio.sleep(1)
5,354,287
def time_handler(start_time, start_fmt, elaps_fmt, today): """return StartTime, ElapsedTime tuple using start/sub time string""" start_time = datetime.strptime(start_time, '%Y-%m-%dT%H:%M:%S') start_time = StartTime(start_time.year, start_time.month, start_time.day, start_time.hour, start_time.minute, start_time.second) start_time.fmt = start_fmt delta = today - start_time delta = ElapsedTime(delta.days, delta.seconds, 0) delta.fmt = elaps_fmt return start_time, delta
5,354,288
def lgb_multi_weighted_logloss_exgal(y_preds, train_data): """ @author olivier https://www.kaggle.com/ogrellier https://www.kaggle.com/ogrellier/plasticc-in-a-kernel-meta-and-data/code multi logloss for PLAsTiCC challenge """ # class_weights taken from Giba's topic : https://www.kaggle.com/titericz # https://www.kaggle.com/c/PLAsTiCC-2018/discussion/67194 # with Kyle Boone's post https://www.kaggle.com/kyleboone y_true = train_data.get_label() if len(np.unique(y_true)) > 14: classes_exgal.append(99) class_weight_exgal[99] = 2 y_p = y_preds.reshape(y_true.shape[0], len(classes_exgal), order='F') # normalize y_p /= y_p.sum(1)[:,None] # Trasform y_true in dummies y_ohe = pd.get_dummies(y_true) # Normalize rows and limit y_preds to 1e-15, 1-1e-15 y_p = np.clip(a=y_p, a_min=1e-15, a_max=1 - 1e-15) # Transform to log y_p_log = np.log(y_p) # Get the log for ones, .values is used to drop the index of DataFrames # Exclude class 99 for now, since there is no class99 in the training set # we gave a special process for that class y_log_ones = np.sum(y_ohe.values * y_p_log, axis=0) # Get the number of positives for each class nb_pos = y_ohe.sum(axis=0).values.astype(float) # Weight average and divide by the number of positives class_arr = np.array([class_weight_exgal[k] for k in sorted(class_weight_exgal.keys())]) y_w = y_log_ones * class_arr / nb_pos loss = - np.sum(y_w) / np.sum(class_arr) return 'wloss', loss, False
5,354,289
def makedirs(path, mode=0o777, exist_ok=False): """Recursive directory creation function. :type path: bytes | unicode :type mode: int :type exist_ok: int :rtype: None """ pass
5,354,290
def api_owner_required(f): """ Authorization decorator for api requests that require the record's owner Ensure a user is admin or the actual user who created the record, if not send a 400 error. :return: Function """ @wraps(f) def decorated_function(*args, **kwargs): if current_user.is_admin(): return f(*args, **kwargs) else: user_id = kwargs['user_id'] if current_user.id != user_id: abort(400) return f(*args, **kwargs) return decorated_function
5,354,291
def save_prediction_image(stacked_img, im_name, epoch, save_folder_name="result_images", save_im=True): """save images to save_path Args: stacked_img (numpy): stacked cropped images save_folder_name (str): saving folder name """ div_arr = division_array(388, 2, 2, 512, 512) img_cont = image_concatenate(stacked_img.cpu().data.numpy(), 2, 2, 512, 512) img_cont = polarize((img_cont)/div_arr)*255 img_cont_np = img_cont.astype('uint8') img_cont = Image.fromarray(img_cont_np) # organize images in every epoch desired_path = save_folder_name + '/epoch_' + str(epoch) + '/' # Create the path if it does not exist if not os.path.exists(desired_path): os.makedirs(desired_path) # Save Image! export_name = str(im_name) + '.png' img_cont.save(desired_path + export_name) return img_cont_np
5,354,292
def flatten_probas_ori(probas, labels, ignore=None): """ Flattens predictions in the batch """ if probas.dim() == 3: # assumes output of a sigmoid layer B, H, W = probas.size() probas = probas.view(B, 1, H, W) B, C, H, W = probas.size() probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C labels = labels.view(-1) if ignore is None: return probas, labels valid = (labels != ignore) vprobas = probas[valid.nonzero().squeeze()] vlabels = labels[valid] return vprobas, vlabels
5,354,293
def get_new_file_number(pat, destdir, startnum=1, endnum=10000): """Substitute the integers from startnum to endnum into pat and return the first one that doesn't exist. The file name that is searched for is os.path.join(destdir, pat % i).""" for i in range(startnum, endnum): temp = pat % i if not os.path.exists(os.path.join(destdir, temp)): return i
5,354,294
def VisionTransformer_small(pretrained=False,input_shape=(3,224,224),patch_size=16,num_classes=1000, depth=8,drop_rate=0.2,**kwargs): """ My custom 'small' ViT model. Depth=8, heads=8= mlp_ratio=3.""" vit= VisionTransformer( patch_size=patch_size,num_classes=num_classes, depth=depth, num_heads=12, mlp_ratio=3., qkv_bias=False, qk_scale=768 ** -0.5, representation_size=None, drop_rate=drop_rate, attn_drop_rate=drop_rate, drop_path_rate=drop_rate, hybrid_backbone=None) model=ImageClassificationModel(input_shape=input_shape,output=vit) if pretrained: # NOTE my scale was wrong for original weights, leaving this here until I have better ones for this model vit.qk_scale=768 ** -0.5 return model
5,354,295
def downgrade_database( alembic_config_filename: str, destination_revision: str, alembic_base_dir: str = None, starting_revision: str = None, version_table: str = DEFAULT_ALEMBIC_VERSION_TABLE, as_sql: bool = False) -> None: """ Use Alembic to downgrade our database. USE WITH EXTREME CAUTION. "revision" is the destination revision. See http://alembic.readthedocs.org/en/latest/api/runtime.html but also, in particular, ``site-packages/alembic/command.py`` Arguments: alembic_config_filename: config filename alembic_base_dir: directory to start in, so relative paths in the config file work starting_revision: revision to start at (typically ``None`` to ask the database) destination_revision: revision to aim for version_table: table name for Alembic versions as_sql: run in "offline" mode: print the migration SQL, rather than modifying the database. See http://alembic.zzzcomputing.com/en/latest/offline.html """ if alembic_base_dir is None: alembic_base_dir = os.path.dirname(alembic_config_filename) os.chdir(alembic_base_dir) # so the directory in the config file works config = Config(alembic_config_filename) script = ScriptDirectory.from_config(config) # noinspection PyUnusedLocal,PyProtectedMember def downgrade(rev, context): return script._downgrade_revs(destination_revision, rev) log.info("Downgrading database to revision {!r} using Alembic", destination_revision) with EnvironmentContext(config, script, fn=downgrade, as_sql=as_sql, starting_rev=starting_revision, destination_rev=destination_revision, tag=None, version_table=version_table): script.run_env() log.info("Database downgrade completed")
5,354,296
def sparsify_axis_labels_old(ax, n=2): """ Sparsify tick labels on the given matplotlib axis, keeping only those whose index is divisible by n. Works with factor plots """ for idx, label in enumerate(ax.xaxis.get_ticklabels()): if idx % n != 0: label.set_visible(False)
5,354,297
def cast_env(env): """Encode all the environment values as the appropriate type for each Python version This assumes that all the data is or can be represented as UTF8""" env_type = six.ensure_binary if sys.version_info[0] < 3 else six.ensure_str return {env_type(key): env_type(value) for key, value in six.iteritems(env)}
5,354,298
def query_filter_choices(arg=None, fq=[]): """ Makes solr query and returns facets for tickets. :param arg: solr query, string """ params = { 'short_timeout': True, 'fq': [ 'project_id_s:%s' % c.project._id, 'mount_point_s:%s' % c.app.config.options.mount_point, 'type_s:Ticket', ] + fq, 'rows': 0, } params.update(FACET_PARAMS) result = search(arg, **params) return get_facets(result)
5,354,299