content
stringlengths 0
1.55M
|
---|
<import_from_stmt>common_fixtures *# NOQA
<import_from_stmt>copy deepcopy<def_stmt>made_log object admin_user_client context accountId=<none><block_start>t=object.type<if_stmt>t<eq>'stack'<block_start>t='stack'<block_end>logs=admin_user_client.list_audit_log(resourceId=object.id resourceType=t)<assert_stmt>len(logs)<eq>1<assert_stmt>logs[0].resourceType<eq>t<if_stmt>str(logs[0].resourceId)<ne>object.id<block_start><assert_stmt>str(logs[0].resourceId).replace('1s' '1e')<eq>object.id<block_end><else_stmt><block_start><assert_stmt>str(logs[0].resourceId)<eq>object.id<block_end><if_stmt>accountId<is><none><block_start><assert_stmt>logs[0].accountId<eq>context.project.id<block_end><else_stmt><block_start><assert_stmt>logs[0].accountId<eq>accountId<block_end><assert_stmt>logs[0].authenticatedAsAccountId<eq>context.account.id<block_end><def_stmt>test_audit_entry_created new_context admin_user_client<block_start>objects=[]<line_sep>new_headers=deepcopy(new_context.user_client._headers)<line_sep>new_headers['X-API-Project-Id']=new_context.project.id<line_sep>made_log(new_context.user_client.create_project() admin_user_client new_context accountId=new_context.account.id)<line_sep>new_context.user_client._headers=new_headers<line_sep>new_context.user_client.reload_schema()<line_sep>objects.append(new_context.user_client.create_container(imageUuid=new_context.image_uuid))<line_sep>objects.append(new_context.user_client.create_container(imageUuid=new_context.image_uuid))<line_sep>objects.append(new_context.user_client.create_api_key())<line_sep>objects.append(new_context.user_client.create_registry(serverAddress='test.io' name='test'))<line_sep>objects.append(new_context.user_client.create_api_key())<line_sep>objects.append(new_context.user_client.create_stack(name='env-'+random_str()))<for_stmt>object objects<block_start>made_log(object admin_user_client new_context)<block_end><block_end> |
<import_stmt>time<import_stmt>urllib<import_stmt>pytest<import_stmt>requests<import_from_stmt>datahub.cli.docker check_local_docker_containers<import_from_stmt>datahub.ingestion.run.pipeline Pipeline<import_from_stmt>tests.utils ingest_file_via_rest<line_sep>GMS_ENDPOINT="http://localhost:8080"<line_sep>FRONTEND_ENDPOINT="http://localhost:9002"<line_sep>KAFKA_BROKER="localhost:9092"<line_sep>bootstrap_sample_data="../metadata-ingestion/examples/mce_files/bootstrap_mce.json"<line_sep>usage_sample_data=("../metadata-ingestion/tests/integration/bigquery-usage/bigquery_usages_golden.json")<line_sep>bq_sample_data="./sample_bq_data.json"<line_sep>restli_default_headers={"X-RestLi-Protocol-Version":"2.0.0" }<line_sep>kafka_post_ingestion_wait_sec=60<line_sep>@pytest.fixture(scope="session")<def_stmt>wait_for_healthchecks # Simply assert that everything is healthy, but don't wait.
<block_start><assert_stmt><not>check_local_docker_containers()<line_sep><yield><block_end>@pytest.mark.dependency()<def_stmt>test_healthchecks wait_for_healthchecks# Call to wait_for_healthchecks fixture will do the actual functionality.
<block_start><pass><block_end>@pytest.fixture(scope="session")<def_stmt>frontend_session wait_for_healthchecks<block_start>session=requests.Session()<line_sep>headers={"Content-Type":"application/json" }<line_sep>data='{"username":"datahub", "password":"<PASSWORD>"}'<line_sep>response=session.post(f"{FRONTEND_ENDPOINT}/logIn" headers=headers data=data)<line_sep>response.raise_for_status()<line_sep><yield>session<block_end>@pytest.mark.dependency(depends=["test_healthchecks"])<def_stmt>test_ingestion_via_rest wait_for_healthchecks<block_start>ingest_file_via_rest(bootstrap_sample_data)<block_end>@pytest.mark.dependency(depends=["test_healthchecks"])<def_stmt>test_ingestion_usage_via_rest wait_for_healthchecks<block_start>ingest_file_via_rest(usage_sample_data)<block_end>@pytest.mark.dependency(depends=["test_healthchecks"])<def_stmt>test_ingestion_via_kafka wait_for_healthchecks<block_start>pipeline=Pipeline.create({"source":{"type":"file" "config":{"filename":bq_sample_data} } "sink":{"type":"datahub-kafka" "config":{"connection":{"bootstrap":KAFKA_BROKER }} } })<line_sep>pipeline.run()<line_sep>pipeline.raise_from_status()<line_sep># Since Kafka emission is asynchronous, we must wait a little bit so that
# the changes are actually processed.
time.sleep(kafka_post_ingestion_wait_sec)<block_end>@pytest.mark.dependency(depends=["test_ingestion_via_rest" "test_ingestion_via_kafka" "test_ingestion_usage_via_rest" ])<def_stmt>test_run_ingestion wait_for_healthchecks# Dummy test so that future ones can just depend on this one.
<block_start><pass><block_end>@pytest.mark.dependency(depends=["test_healthchecks" "test_run_ingestion"])<def_stmt>test_gms_get_user <block_start>username="jdoe"<line_sep>urn=f"urn:li:corpuser:{username}"<line_sep>response=requests.get(f"{GMS_ENDPOINT}/entities/{urllib.parse.quote(urn)}" headers={**restli_default_headers } )<line_sep>response.raise_for_status()<line_sep>data=response.json()<assert_stmt>data["value"]<assert_stmt>data["value"]["com.linkedin.metadata.snapshot.CorpUserSnapshot"]<assert_stmt>(data["value"]["com.linkedin.metadata.snapshot.CorpUserSnapshot"]["urn"]<eq>urn)<block_end>@pytest.mark.parametrize("platform,dataset_name,env" [(# This one tests the bootstrap sample data.
"urn:li:dataPlatform:kafka" "SampleKafkaDataset" "PROD" ) (# This one tests BigQuery ingestion.
"urn:li:dataPlatform:bigquery" "bigquery-public-data.covid19_geotab_mobility_impact.us_border_wait_times" "PROD" ) ] )@pytest.mark.dependency(depends=["test_healthchecks" "test_run_ingestion"])<def_stmt>test_gms_get_dataset platform dataset_name env<block_start>platform="urn:li:dataPlatform:bigquery"<line_sep>dataset_name=("bigquery-public-data.covid19_geotab_mobility_impact.us_border_wait_times")<line_sep>env="PROD"<line_sep>urn=f"urn:li:dataset:({platform},{dataset_name},{env})"<line_sep>response=requests.get(f"{GMS_ENDPOINT}/entities/{urllib.parse.quote(urn)}" headers={**restli_default_headers "X-RestLi-Method":"get" } )<line_sep>response.raise_for_status()<line_sep>res_data=response.json()<assert_stmt>res_data["value"]<assert_stmt>res_data["value"]["com.linkedin.metadata.snapshot.DatasetSnapshot"]<assert_stmt>(res_data["value"]["com.linkedin.metadata.snapshot.DatasetSnapshot"]["urn"]<eq>urn)<block_end>@pytest.mark.dependency(depends=["test_healthchecks" "test_run_ingestion"])<def_stmt>test_gms_batch_get_v2 <block_start>platform="urn:li:dataPlatform:bigquery"<line_sep>env="PROD"<line_sep>name_1="bigquery-public-data.covid19_geotab_mobility_impact.us_border_wait_times"<line_sep>name_2="bigquery-public-data.covid19_geotab_mobility_impact.ca_border_wait_times"<line_sep>urn1=f"urn:li:dataset:({platform},{name_1},{env})"<line_sep>urn2=f"urn:li:dataset:({platform},{name_2},{env})"<line_sep>response=requests.get(f"{GMS_ENDPOINT}/entitiesV2?ids=List({urllib.parse.quote(urn1)},{urllib.parse.quote(urn2)})&aspects=List(datasetProperties,ownership)" headers={**restli_default_headers "X-RestLi-Method":"batch_get" } )<line_sep>response.raise_for_status()<line_sep>res_data=response.json()<line_sep># Verify both urns exist and have correct aspects
<assert_stmt>res_data["results"]<assert_stmt>res_data["results"][urn1]<assert_stmt>res_data["results"][urn1]["aspects"]["datasetProperties"]<assert_stmt>res_data["results"][urn1]["aspects"]["ownership"]<assert_stmt>res_data["results"][urn2]<assert_stmt>res_data["results"][urn2]["aspects"]["datasetProperties"]<assert_stmt>("ownership"<not><in>res_data["results"][urn2]["aspects"])<block_end># Aspect does not exist.
@pytest.mark.parametrize("query,min_expected_results" [("covid" 1) ("sample" 3) ] )@pytest.mark.dependency(depends=["test_healthchecks" "test_run_ingestion"])<def_stmt>test_gms_search_dataset query min_expected_results<block_start>json={"input":f"{query}" "entity":"dataset" "start":0 "count":10}<line_sep>print(json)<line_sep>response=requests.post(f"{GMS_ENDPOINT}/entities?action=search" headers=restli_default_headers json=json )<line_sep>response.raise_for_status()<line_sep>res_data=response.json()<assert_stmt>res_data["value"]<assert_stmt>res_data["value"]["numEntities"]<ge>min_expected_results<assert_stmt>len(res_data["value"]["entities"])<ge>min_expected_results<block_end>@pytest.mark.parametrize("query,min_expected_results" [("covid" 1) ("sample" 3) ] )@pytest.mark.dependency(depends=["test_healthchecks" "test_run_ingestion"])<def_stmt>test_gms_search_across_entities query min_expected_results<block_start>json={"input":f"{query}" "entities":[] "start":0 "count":10}<line_sep>print(json)<line_sep>response=requests.post(f"{GMS_ENDPOINT}/entities?action=searchAcrossEntities" headers=restli_default_headers json=json )<line_sep>response.raise_for_status()<line_sep>res_data=response.json()<assert_stmt>res_data["value"]<assert_stmt>res_data["value"]["numEntities"]<ge>min_expected_results<assert_stmt>len(res_data["value"]["entities"])<ge>min_expected_results<block_end>@pytest.mark.dependency(depends=["test_healthchecks" "test_run_ingestion"])<def_stmt>test_gms_usage_fetch <block_start>response=requests.post(f"{GMS_ENDPOINT}/usageStats?action=queryRange" headers=restli_default_headers json={"resource":"urn:li:dataset:(urn:li:dataPlatform:bigquery,harshal-playground-306419.test_schema.excess_deaths_derived,PROD)" "duration":"DAY" "rangeFromEnd":"ALL" } )<line_sep>response.raise_for_status()<line_sep>data=response.json()["value"]<assert_stmt>len(data["buckets"])<eq>6<assert_stmt>data["buckets"][0]["metrics"]["topSqlQueries"]<line_sep>fields=data["aggregations"].pop("fields")<assert_stmt>len(fields)<eq>12<assert_stmt>fields[0]["count"]<eq>7<line_sep>users=data["aggregations"].pop("users")<assert_stmt>len(users)<eq>1<assert_stmt>users[0]["count"]<eq>7<assert_stmt>data["aggregations"]<eq>{# "fields" and "users" already popped out
"totalSqlQueries":7 "uniqueUserCount":1 }<block_end>@pytest.mark.dependency(depends=["test_healthchecks"])<def_stmt>test_frontend_auth frontend_session<block_start><pass><block_end>@pytest.mark.dependency(depends=["test_healthchecks" "test_run_ingestion"])<def_stmt>test_frontend_browse_datasets frontend_session<block_start>json={"query":"""query browse($input: BrowseInput!) {\n
browse(input: $input) {\n
start\n
count\n
total\n
groups {
name
}
entities {\n
... on Dataset {\n
urn\n
name\n
}\n
}\n
}\n
}""" "variables":{"input":{"type":"DATASET" "path":["prod"]}} }<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep>res_data=response.json()<assert_stmt>res_data<assert_stmt>res_data["data"]<assert_stmt>res_data["data"]["browse"]<assert_stmt>len(res_data["data"]["browse"]["entities"])<eq>0<assert_stmt>len(res_data["data"]["browse"]["groups"])<g>0<block_end>@pytest.mark.parametrize("query,min_expected_results" [("covid" 1) ("sample" 3) ("" 1) ] )@pytest.mark.dependency(depends=["test_healthchecks" "test_run_ingestion"])<def_stmt>test_frontend_search_datasets frontend_session query min_expected_results<block_start>json={"query":"""query search($input: SearchInput!) {\n
search(input: $input) {\n
start\n
count\n
total\n
searchResults {\n
entity {\n
... on Dataset {\n
urn\n
name\n
}\n
}\n
}\n
}\n
}""" "variables":{"input":{"type":"DATASET" "query":f"{query}" "start":0 "count":10}} }<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep>res_data=response.json()<assert_stmt>res_data<assert_stmt>res_data["data"]<assert_stmt>res_data["data"]["search"]<assert_stmt>res_data["data"]["search"]["total"]<ge>min_expected_results<assert_stmt>len(res_data["data"]["search"]["searchResults"])<ge>min_expected_results<block_end>@pytest.mark.parametrize("query,min_expected_results" [("covid" 1) ("sample" 3) ("" 1) ] )@pytest.mark.dependency(depends=["test_healthchecks" "test_run_ingestion"])<def_stmt>test_frontend_search_across_entities frontend_session query min_expected_results<block_start>json={"query":"""query searchAcrossEntities($input: SearchAcrossEntitiesInput!) {\n
searchAcrossEntities(input: $input) {\n
start\n
count\n
total\n
searchResults {\n
entity {\n
... on Dataset {\n
urn\n
name\n
}\n
}\n
}\n
}\n
}""" "variables":{"input":{"types":[] "query":f"{query}" "start":0 "count":10}} }<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep>res_data=response.json()<assert_stmt>res_data<assert_stmt>res_data["data"]<assert_stmt>res_data["data"]["searchAcrossEntities"]<assert_stmt>res_data["data"]["searchAcrossEntities"]["total"]<ge>min_expected_results<assert_stmt>(len(res_data["data"]["searchAcrossEntities"]["searchResults"])<ge>min_expected_results)<block_end>@pytest.mark.dependency(depends=["test_healthchecks" "test_run_ingestion"])<def_stmt>test_frontend_user_info frontend_session<block_start>urn="urn:li:corpuser:datahub"<line_sep>json={"query":"""query corpUser($urn: String!) {\n
corpUser(urn: $urn) {\n
urn\n
username\n
editableInfo {\n
pictureLink\n
}\n
info {\n
firstName\n
fullName\n
title\n
email\n
}\n
}\n
}""" "variables":{"urn":urn} }<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep>res_data=response.json()<assert_stmt>res_data<assert_stmt>res_data["data"]<assert_stmt>res_data["data"]["corpUser"]<assert_stmt>res_data["data"]["corpUser"]["urn"]<eq>urn<block_end>@pytest.mark.parametrize("platform,dataset_name,env" [(# This one tests the bootstrap sample data.
"urn:li:dataPlatform:kafka" "SampleKafkaDataset" "PROD" ) (# This one tests BigQuery ingestion.
"urn:li:dataPlatform:bigquery" "bigquery-public-data.covid19_geotab_mobility_impact.us_border_wait_times" "PROD" ) ] )@pytest.mark.dependency(depends=["test_healthchecks" "test_run_ingestion"])<def_stmt>test_frontend_datasets frontend_session platform dataset_name env<block_start>urn=f"urn:li:dataset:({platform},{dataset_name},{env})"<line_sep>json={"query":"""query getDataset($urn: String!) {\n
dataset(urn: $urn) {\n
urn\n
name\n
description\n
platform {\n
urn\n
}\n
schemaMetadata {\n
name\n
version\n
createdAt\n
}\n
}\n
}""" "variables":{"urn":urn} }<line_sep># Basic dataset info.
response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep>res_data=response.json()<assert_stmt>res_data<assert_stmt>res_data["data"]<assert_stmt>res_data["data"]["dataset"]<assert_stmt>res_data["data"]["dataset"]["urn"]<eq>urn<assert_stmt>res_data["data"]["dataset"]["name"]<eq>dataset_name<assert_stmt>res_data["data"]["dataset"]["platform"]["urn"]<eq>platform<block_end>@pytest.mark.dependency(depends=["test_healthchecks" "test_run_ingestion"])<def_stmt>test_ingest_with_system_metadata <block_start>response=requests.post(f"{GMS_ENDPOINT}/entities?action=ingest" headers=restli_default_headers json={"entity":{"value":{"com.linkedin.metadata.snapshot.CorpUserSnapshot":{"urn":"urn:li:corpuser:datahub" "aspects":[{"com.linkedin.identity.CorpUserInfo":{"active":<true> "displayName":"Data Hub" "email":"<EMAIL>" "title":"CEO" "fullName":"Data Hub" }}] }}} "systemMetadata":{"lastObserved":1628097379571 "runId":"af0fe6e4-f547-11eb-81b2-acde48001122" } } )<line_sep>response.raise_for_status()<block_end>@pytest.mark.dependency(depends=["test_healthchecks" "test_run_ingestion"])<def_stmt>test_ingest_with_blank_system_metadata <block_start>response=requests.post(f"{GMS_ENDPOINT}/entities?action=ingest" headers=restli_default_headers json={"entity":{"value":{"com.linkedin.metadata.snapshot.CorpUserSnapshot":{"urn":"urn:li:corpuser:datahub" "aspects":[{"com.linkedin.identity.CorpUserInfo":{"active":<true> "displayName":"Data Hub" "email":"<EMAIL>" "title":"CEO" "fullName":"Data Hub" }}] }}} "systemMetadata":{} } )<line_sep>response.raise_for_status()<block_end>@pytest.mark.dependency(depends=["test_healthchecks" "test_run_ingestion"])<def_stmt>test_ingest_without_system_metadata <block_start>response=requests.post(f"{GMS_ENDPOINT}/entities?action=ingest" headers=restli_default_headers json={"entity":{"value":{"com.linkedin.metadata.snapshot.CorpUserSnapshot":{"urn":"urn:li:corpuser:datahub" "aspects":[{"com.linkedin.identity.CorpUserInfo":{"active":<true> "displayName":"Data Hub" "email":"<EMAIL>" "title":"CEO" "fullName":"Data Hub" }}] }}} } )<line_sep>response.raise_for_status()<block_end>@pytest.mark.dependency(depends=["test_healthchecks" "test_run_ingestion"])<def_stmt>test_frontend_list_policies frontend_session<block_start>json={"query":"""query listPolicies($input: ListPoliciesInput!) {\n
listPolicies(input: $input) {\n
start\n
count\n
total\n
policies {\n
urn\n
type\n
name\n
description\n
state\n
resources {\n
type\n
allResources\n
resources\n
}\n
privileges\n
actors {\n
users\n
groups\n
allUsers\n
allGroups\n
resourceOwners\n
}\n
editable\n
}\n
}\n
}""" "variables":{"input":{"start":"0" "count":"20" }} }<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep>res_data=response.json()<assert_stmt>res_data<assert_stmt>res_data["data"]<assert_stmt>res_data["data"]["listPolicies"]<assert_stmt>res_data["data"]["listPolicies"]["start"]<eq>0<assert_stmt>res_data["data"]["listPolicies"]["count"]<g>0<assert_stmt>len(res_data["data"]["listPolicies"]["policies"])<g>0<block_end>@pytest.mark.dependency(depends=["test_healthchecks" "test_run_ingestion" "test_frontend_list_policies"])<def_stmt>test_frontend_update_policy frontend_session<block_start>json={"query":"""mutation updatePolicy($urn: String!, $input: PolicyUpdateInput!) {\n
updatePolicy(urn: $urn, input: $input) }""" "variables":{"urn":"urn:li:dataHubPolicy:7" "input":{"type":"PLATFORM" "state":"INACTIVE" "name":"Updated Platform Policy" "description":"My Metadaata Policy" "privileges":["MANAGE_POLICIES"] "actors":{"users":["urn:li:corpuser:datahub"] "resourceOwners":<false> "allUsers":<false> "allGroups":<false> } } } }<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep>res_data=response.json()<assert_stmt>res_data<assert_stmt>res_data["data"]<assert_stmt>res_data["data"]["updatePolicy"]<assert_stmt>res_data["data"]["updatePolicy"]<eq>"urn:li:dataHubPolicy:7"<block_end>@pytest.mark.dependency(depends=["test_healthchecks" "test_run_ingestion" "test_frontend_list_policies" "test_frontend_update_policy" ])<def_stmt>test_frontend_delete_policy frontend_session<block_start>json={"query":"""mutation deletePolicy($urn: String!) {\n
deletePolicy(urn: $urn) }""" "variables":{"urn":"urn:li:dataHubPolicy:7"} }<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep>res_data=response.json()<line_sep># Now verify the policy has been removed.
json={"query":"""query listPolicies($input: ListPoliciesInput!) {\n
listPolicies(input: $input) {\n
start\n
count\n
total\n
policies {\n
urn\n
}\n
}\n
}""" "variables":{"input":{"start":"0" "count":"20" }} }<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep>res_data=response.json()<assert_stmt>res_data<assert_stmt>res_data["data"]<assert_stmt>res_data["data"]["listPolicies"]<line_sep># Verify that the URN is no longer in the list
result=filter(<lambda>x:x["urn"]<eq>"urn:li:dataHubPolicy:7" res_data["data"]["listPolicies"]["policies"] )<assert_stmt>len(list(result))<eq>0<block_end>@pytest.mark.dependency(depends=["test_healthchecks" "test_run_ingestion" "test_frontend_list_policies" "test_frontend_delete_policy" ])<def_stmt>test_frontend_create_policy frontend_session# Policy tests are not idempotent. If you rerun this test it will be wrong.
<block_start>json={"query":"""mutation createPolicy($input: PolicyUpdateInput!) {\n
createPolicy(input: $input) }""" "variables":{"input":{"type":"METADATA" "name":"Test Metadata Policy" "description":"My Metadaata Policy" "state":"ACTIVE" "resources":{"type":"dataset" "allResources":<true>} "privileges":["EDIT_ENTITY_TAGS"] "actors":{"users":["urn:li:corpuser:datahub"] "resourceOwners":<false> "allUsers":<false> "allGroups":<false> } }} }<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep>res_data=response.json()<assert_stmt>res_data<assert_stmt>res_data["data"]<assert_stmt>res_data["data"]["createPolicy"]<line_sep>new_urn=res_data["data"]["createPolicy"]<line_sep># Sleep for eventual consistency
time.sleep(3)<line_sep># Now verify the policy has been added.
json={"query":"""query listPolicies($input: ListPoliciesInput!) {\n
listPolicies(input: $input) {\n
start\n
count\n
total\n
policies {\n
urn\n
}\n
}\n
}""" "variables":{"input":{"start":"0" "count":"20" }} }<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep>res_data=response.json()<assert_stmt>res_data<assert_stmt>res_data["data"]<assert_stmt>res_data["data"]["listPolicies"]<line_sep># Verify that the URN appears in the list
result=filter(<lambda>x:x["urn"]<eq>new_urn res_data["data"]["listPolicies"]["policies"])<assert_stmt>len(list(result))<eq>1<block_end>@pytest.mark.dependency(depends=["test_healthchecks" "test_run_ingestion"])<def_stmt>test_frontend_app_config frontend_session<block_start>json={"query":"""query appConfig {\n
appConfig {\n
analyticsConfig {\n
enabled\n
}\n
policiesConfig {\n
enabled\n
platformPrivileges {\n
type\n
displayName\n
description\n
}\n
resourcePrivileges {\n
resourceType\n
resourceTypeDisplayName\n
entityType\n
privileges {\n
type\n
displayName\n
description\n
}\n
}\n
}\n
}\n
}"""}<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep>res_data=response.json()<assert_stmt>res_data<assert_stmt>res_data["data"]<assert_stmt>res_data["data"]["appConfig"]<assert_stmt>res_data["data"]["appConfig"]["analyticsConfig"]["enabled"]<is><true><assert_stmt>res_data["data"]["appConfig"]["policiesConfig"]["enabled"]<is><true><block_end>@pytest.mark.dependency(depends=["test_healthchecks" "test_run_ingestion"])<def_stmt>test_frontend_me_query frontend_session<block_start>json={"query":"""query me {\n
me {\n
corpUser {\n
urn\n
username\n
editableInfo {\n
pictureLink\n
}\n
info {\n
firstName\n
fullName\n
title\n
email\n
}\n
}\n
platformPrivileges {\n
viewAnalytics
managePolicies
manageIdentities
generatePersonalAccessTokens
}\n
}\n
}"""}<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep>res_data=response.json()<assert_stmt>res_data<assert_stmt>res_data["data"]<assert_stmt>res_data["data"]["me"]["corpUser"]["urn"]<eq>"urn:li:corpuser:datahub"<assert_stmt>res_data["data"]["me"]["platformPrivileges"]["viewAnalytics"]<is><true><assert_stmt>res_data["data"]["me"]["platformPrivileges"]["managePolicies"]<is><true><assert_stmt>res_data["data"]["me"]["platformPrivileges"]["manageIdentities"]<is><true><assert_stmt>(res_data["data"]["me"]["platformPrivileges"]["generatePersonalAccessTokens"]<is><true>)<block_end>@pytest.mark.dependency(depends=["test_healthchecks" "test_run_ingestion"])<def_stmt>test_list_users frontend_session<block_start>json={"query":"""query listUsers($input: ListUsersInput!) {\n
listUsers(input: $input) {\n
start\n
count\n
total\n
users {\n
urn\n
type\n
username\n
properties {\n
firstName
}\n
}\n
}\n
}""" "variables":{"input":{"start":"0" "count":"2" }} }<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep>res_data=response.json()<assert_stmt>res_data<assert_stmt>res_data["data"]<assert_stmt>res_data["data"]["listUsers"]<assert_stmt>res_data["data"]["listUsers"]["start"]<eq>0<assert_stmt>res_data["data"]["listUsers"]["count"]<eq>2<assert_stmt>(len(res_data["data"]["listUsers"]["users"])<ge>2)<block_end># Length of default user set.
@pytest.mark.dependency(depends=["test_healthchecks" "test_run_ingestion"])<def_stmt>test_list_groups frontend_session<block_start>json={"query":"""query listGroups($input: ListGroupsInput!) {\n
listGroups(input: $input) {\n
start\n
count\n
total\n
groups {\n
urn\n
type\n
name\n
properties {\n
displayName
}\n
}\n
}\n
}""" "variables":{"input":{"start":"0" "count":"2" }} }<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep>res_data=response.json()<assert_stmt>res_data<assert_stmt>res_data["data"]<assert_stmt>res_data["data"]["listGroups"]<assert_stmt>res_data["data"]["listGroups"]["start"]<eq>0<assert_stmt>res_data["data"]["listGroups"]["count"]<eq>2<assert_stmt>(len(res_data["data"]["listGroups"]["groups"])<ge>2)<block_end># Length of default group set.
@pytest.mark.dependency(depends=["test_healthchecks" "test_run_ingestion" "test_list_groups"])<def_stmt>test_add_remove_members_from_group frontend_session# Assert no group edges for user jdoe
<block_start>json={"query":"""query corpUser($urn: String!) {\n
corpUser(urn: $urn) {\n
urn\n
relationships(input: { types: ["IsMemberOfGroup"], direction: OUTGOING, start: 0, count: 1 }) {\n
total\n
}\n
}\n
}""" "variables":{"urn":"urn:li:corpuser:jdoe"} }<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep>res_data=response.json()<assert_stmt>res_data<assert_stmt>res_data["data"]<assert_stmt>res_data["data"]["corpUser"]<assert_stmt>res_data["data"]["corpUser"]["relationships"]["total"]<eq>0<line_sep># Add jdoe to group
json={"query":"""mutation addGroupMembers($input: AddGroupMembersInput!) {\n
addGroupMembers(input: $input) }""" "variables":{"input":{"groupUrn":"urn:li:corpGroup:bfoo" "userUrns":["urn:li:corpuser:jdoe"] }} }<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep># Sleep for edge store to be updated. Not ideal!
time.sleep(3)<line_sep># Verify the member has been added
json={"query":"""query corpUser($urn: String!) {\n
corpUser(urn: $urn) {\n
urn\n
relationships(input: { types: ["IsMemberOfGroup"], direction: OUTGOING, start: 0, count: 1 }) {\n
total\n
}\n
}\n
}""" "variables":{"urn":"urn:li:corpuser:jdoe"} }<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep>res_data=response.json()<assert_stmt>res_data<assert_stmt>res_data["data"]<assert_stmt>res_data["data"]["corpUser"]<assert_stmt>res_data["data"]["corpUser"]["relationships"]<assert_stmt>res_data["data"]["corpUser"]["relationships"]["total"]<eq>1<line_sep># Now remove jdoe from the group
json={"query":"""mutation removeGroupMembers($input: RemoveGroupMembersInput!) {\n
removeGroupMembers(input: $input) }""" "variables":{"input":{"groupUrn":"urn:li:corpGroup:bfoo" "userUrns":["urn:li:corpuser:jdoe"] }} }<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep># Sleep for edge store to be updated. Not ideal!
time.sleep(3)<line_sep># Verify the member has been removed
json={"query":"""query corpUser($urn: String!) {\n
corpUser(urn: $urn) {\n
urn\n
relationships(input: { types: ["IsMemberOfGroup"], direction: OUTGOING, start: 0, count: 1 }) {\n
total\n
}\n
}\n
}""" "variables":{"urn":"urn:li:corpuser:jdoe"} }<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep>res_data=response.json()<assert_stmt>res_data<assert_stmt>res_data["data"]<assert_stmt>res_data["data"]["corpUser"]<assert_stmt>res_data["data"]["corpUser"]["relationships"]["total"]<eq>0<block_end>@pytest.mark.dependency(depends=["test_healthchecks" "test_run_ingestion"])<def_stmt>test_update_corp_group_properties frontend_session<block_start>group_urn="urn:li:corpGroup:bfoo"<line_sep># Update Corp Group Description
json={"query":"""mutation updateCorpGroupProperties($urn: String!, $input: CorpGroupUpdateInput!) {\n
updateCorpGroupProperties(urn: $urn, input: $input) { urn } }""" "variables":{"urn":group_urn "input":{"description":"My test description" "slack":"test_group_slack" "email":"<EMAIL>" } } }<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep>res_data=response.json()<line_sep>print(res_data)<assert_stmt>"error"<not><in>res_data<assert_stmt>res_data["data"]["updateCorpGroupProperties"]<is><not><none><line_sep># Verify the description has been updated
json={"query":"""query corpGroup($urn: String!) {\n
corpGroup(urn: $urn) {\n
urn\n
editableProperties {\n
description\n
slack\n
email\n
}\n
}\n
}""" "variables":{"urn":group_urn} }<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep>res_data=response.json()<assert_stmt>res_data<assert_stmt>"error"<not><in>res_data<assert_stmt>res_data["data"]<assert_stmt>res_data["data"]["corpGroup"]<assert_stmt>res_data["data"]["corpGroup"]["editableProperties"]<assert_stmt>res_data["data"]["corpGroup"]["editableProperties"]<eq>{"description":"My test description" "slack":"test_group_slack" "email":"<EMAIL>"}<line_sep># Reset the editable properties
json={"query":"""mutation updateCorpGroupProperties($urn: String!, $input: UpdateCorpGroupPropertiesInput!) {\n
updateCorpGroupProperties(urn: $urn, input: $input) }""" "variables":{"urn":group_urn "input":{"description":"" "slack":"" "email":""} } }<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<block_end>@pytest.mark.dependency(depends=["test_healthchecks" "test_run_ingestion" "test_update_corp_group_properties"])<def_stmt>test_update_corp_group_description frontend_session<block_start>group_urn="urn:li:corpGroup:bfoo"<line_sep># Update Corp Group Description
json={"query":"""mutation updateDescription($input: DescriptionUpdateInput!) {\n
updateDescription(input: $input) }""" "variables":{"input":{"description":"My test description" "resourceUrn":group_urn} } }<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep>res_data=response.json()<line_sep>print(res_data)<assert_stmt>"error"<not><in>res_data<assert_stmt>res_data["data"]["updateDescription"]<is><true><line_sep># Verify the description has been updated
json={"query":"""query corpGroup($urn: String!) {\n
corpGroup(urn: $urn) {\n
urn\n
editableProperties {\n
description\n
}\n
}\n
}""" "variables":{"urn":group_urn} }<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep>res_data=response.json()<assert_stmt>res_data<assert_stmt>"error"<not><in>res_data<assert_stmt>res_data["data"]<assert_stmt>res_data["data"]["corpGroup"]<assert_stmt>res_data["data"]["corpGroup"]["editableProperties"]<assert_stmt>res_data["data"]["corpGroup"]["editableProperties"]["description"]<eq>"My test description"<line_sep># Reset Corp Group Description
json={"query":"""mutation updateDescription($input: DescriptionUpdateInput!) {\n
updateDescription(input: $input) }""" "variables":{"input":{"description":"" "resourceUrn":group_urn} } }<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<block_end>@pytest.mark.dependency(depends=["test_healthchecks" "test_run_ingestion" "test_list_groups" "test_add_remove_members_from_group" ])<def_stmt>test_remove_user frontend_session<block_start>json={"query":"""mutation removeUser($urn: String!) {\n
removeUser(urn: $urn) }""" "variables":{"urn":"urn:li:corpuser:jdoe"} }<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep>json={"query":"""query corpUser($urn: String!) {\n
corpUser(urn: $urn) {\n
urn\n
properties {\n
firstName\n
}\n
}\n
}""" "variables":{"urn":"urn:li:corpuser:jdoe"} }<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep>res_data=response.json()<assert_stmt>res_data<assert_stmt>"error"<not><in>res_data<assert_stmt>res_data["data"]<assert_stmt>res_data["data"]["corpUser"]<assert_stmt>res_data["data"]["corpUser"]["properties"]<is><none><block_end>@pytest.mark.dependency(depends=["test_healthchecks" "test_run_ingestion" "test_list_groups" "test_add_remove_members_from_group" ])<def_stmt>test_remove_group frontend_session<block_start>json={"query":"""mutation removeGroup($urn: String!) {\n
removeGroup(urn: $urn) }""" "variables":{"urn":"urn:li:corpGroup:bfoo"} }<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep>json={"query":"""query corpGroup($urn: String!) {\n
corpGroup(urn: $urn) {\n
urn\n
properties {\n
displayName\n
}\n
}\n
}""" "variables":{"urn":"urn:li:corpGroup:bfoo"} }<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep>res_data=response.json()<assert_stmt>res_data<assert_stmt>res_data["data"]<assert_stmt>res_data["data"]["corpGroup"]<assert_stmt>res_data["data"]["corpGroup"]["properties"]<is><none><block_end>@pytest.mark.dependency(depends=["test_healthchecks" "test_run_ingestion" "test_list_groups" "test_remove_group" ])<def_stmt>test_create_group frontend_session<block_start>json={"query":"""mutation createGroup($input: CreateGroupInput!) {\n
createGroup(input: $input) }""" "variables":{"input":{"id":"test-id" "name":"Test Group" "description":"My test group" }} }<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep>json={"query":"""query corpGroup($urn: String!) {\n
corpGroup(urn: $urn) {\n
urn\n
properties {\n
displayName\n
}\n
}\n
}""" "variables":{"urn":"urn:li:corpGroup:test-id"} }<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep>res_data=response.json()<assert_stmt>res_data<assert_stmt>res_data["data"]<assert_stmt>res_data["data"]["corpGroup"]<assert_stmt>res_data["data"]["corpGroup"]["properties"]["displayName"]<eq>"Test Group"<block_end>@pytest.mark.dependency(depends=["test_healthchecks" "test_run_ingestion"])<def_stmt>test_home_page_recommendations frontend_session<block_start>min_expected_recommendation_modules=0<line_sep>json={"query":"""query listRecommendations($input: ListRecommendationsInput!) {\n
listRecommendations(input: $input) { modules { title } } }""" "variables":{"input":{"userUrn":"urn:li:corpuser:datahub" "requestContext":{"scenario":"HOME"} "limit":5 }} }<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep>res_data=response.json()<line_sep>print(res_data)<assert_stmt>res_data<assert_stmt>res_data["data"]<assert_stmt>res_data["data"]["listRecommendations"]<assert_stmt>"error"<not><in>res_data<assert_stmt>(len(res_data["data"]["listRecommendations"]["modules"])<g>min_expected_recommendation_modules)<block_end>@pytest.mark.dependency(depends=["test_healthchecks" "test_run_ingestion"])<def_stmt>test_search_results_recommendations frontend_session# This test simply ensures that the recommendations endpoint does not return an error.
<block_start>json={"query":"""query listRecommendations($input: ListRecommendationsInput!) {\n
listRecommendations(input: $input) { modules { title } }""" "variables":{"input":{"userUrn":"urn:li:corpuser:datahub" "requestContext":{"scenario":"SEARCH_RESULTS" "searchRequestContext":{"query":"asdsdsdds" "filters":[]} } "limit":5 }} }<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep>res_data=response.json()<assert_stmt>res_data<assert_stmt>"error"<not><in>res_data<block_end>@pytest.mark.dependency(depends=["test_healthchecks" "test_run_ingestion"])<def_stmt>test_generate_personal_access_token frontend_session# Test success case
<block_start>json={"query":"""query getAccessToken($input: GetAccessTokenInput!) {\n
getAccessToken(input: $input) {\n
accessToken\n
}\n
}""" "variables":{"input":{"type":"PERSONAL" "actorUrn":"urn:li:corpuser:datahub" "duration":"ONE_MONTH" }} }<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep>res_data=response.json()<assert_stmt>res_data<assert_stmt>res_data["data"]<assert_stmt>res_data["data"]["getAccessToken"]["accessToken"]<is><not><none><assert_stmt>"error"<not><in>res_data<line_sep># Test unauthenticated case
json={"query":"""query getAccessToken($input: GetAccessTokenInput!) {\n
accessToken\n
}""" "variables":{"input":{"type":"PERSONAL" "actorUrn":"urn:li:corpuser:jsmith" "duration":"ONE_DAY" }} }<line_sep>response=frontend_session.post(f"{FRONTEND_ENDPOINT}/api/v2/graphql" json=json)<line_sep>response.raise_for_status()<line_sep>res_data=response.json()<assert_stmt>res_data<assert_stmt>"errors"<in>res_data<block_end># Assert the request fails
|
'''
Using sieve of Eratosthenes, primes(x) returns list of all primes less than x
Modification:
We don't need to check all even numbers, we can make the sieve excluding even
numbers and adding 2 to the primes list by default.
We are going to make an array of: x / 2 - 1 if number is even, else x / 2
(The -1 with even number it's to exclude the number itself)
Because we just need numbers [from 3..x if x is odd]
# We can get value represented at index i with (i*2 + 3)
For example, for x = 10, we start with an array of x / 2 - 1 = 4
[1, 1, 1, 1]
3 5 7 9
For x = 11:
[1, 1, 1, 1, 1]
3 5 7 9 11 # 11 is odd, it's included in the list
With this, we have reduced the array size to a half,
and complexity it's also a half now.
'''<def_stmt>primes x<block_start><assert_stmt>(x<ge>0)<line_sep># If x is even, exclude x from list (-1):
sieve_size=(x<floordiv>2-1)<if>x%2<eq>0<else>(x<floordiv>2)<line_sep>sieve=[1<for>v range(sieve_size)]# Sieve
primes=[]# List of Primes
<if_stmt>x<ge>2<block_start>primes.append(2)# Add 2 by default
<block_end><for_stmt>i range(sieve_size)<block_start><if_stmt>sieve[i]<eq>1<block_start>value_at_i=i<times>2+3<line_sep>primes.append(value_at_i)<for_stmt>j range(i sieve_size value_at_i)<block_start>sieve[j]=0<block_end><block_end><block_end><return>primes<block_end> |
<import_stmt>numpy<as>np<import_stmt>numpy.random<as>rnd<import_stmt>tensorflow<as>tf<import_from_stmt>..common.utils Timer logger<line_sep>"""
This is a simplified version of TensorFlow word2vec_basic.py
The primary purpose is pedagogical. Instead of calling some tensorflow functions
such as tf.nn.nce_loss, we directly sample uniformly at random for negative samples.
The other reason to use this is for the activity modeling example in activity_word2vec.py
where the 'vocabulary' is limited to the total number of sensors (few) such that a customized
implementation might be more efficient.
"""<class_stmt>CustomWord2vec(object)<block_start><def_stmt>__init__ self sensors=<none> sensor2code=<none> code2sensor=<none> dims=100 window_size=3 neg_samples=3 n_epochs=1 learning_rate=0.001 debug=<false><block_start>self.sensors=sensors<line_sep>self.sensor2code=sensor2code<line_sep>self.code2sensor=code2sensor<line_sep>self.dims=dims<line_sep>self.window_size=window_size<line_sep>self.neg_samples=neg_samples<line_sep>self.n_epochs=n_epochs<line_sep>self.learning_rate=learning_rate<line_sep>self.debug=debug<line_sep>self.X=self.Y=self.Z=self.W=self.embedding=self.weights=<none><line_sep>self.normalized_embeddings=<none><line_sep>self.similarity=<none><line_sep>self.training_op=<none><block_end><def_stmt>fit self seq<block_start>tf.set_random_seed(42)<line_sep>self.X=tf.placeholder(tf.int32 shape=[<none>])# input 'word'
self.Y=tf.placeholder(tf.int32 shape=[<none>])# predicted 'word'
self.Z=tf.placeholder(tf.float32 shape=[<none>])# multiplier {1, -1}
self.W=tf.placeholder(tf.float32 shape=[<none>])# weight [0, 1.0]
vocab_size=len(self.sensors)<line_sep>valid_examples=np.arange(0 vocab_size)<line_sep>valid_size=len(valid_examples)<line_sep>valid_dataset=tf.constant(valid_examples dtype=tf.int32)<line_sep>top_k=4# number of nearest neighbors for validation of similarity
init=tf.random_uniform((vocab_size self.dims) minval=-1.0 maxval=1.0 dtype=tf.float32)<line_sep># the encoding matrix
self.embedding=tf.Variable(init name="embedding")<line_sep>norm=tf.sqrt(tf.reduce_sum(tf.square(self.embedding) 1 keepdims=<true>))<line_sep>self.normalized_embeddings=self.embedding/norm<line_sep>self.valid_embeddings=tf.nn.embedding_lookup(self.normalized_embeddings valid_dataset)<line_sep>self.similarity=tf.matmul(self.valid_embeddings self.normalized_embeddings transpose_b=<true>)<line_sep>w_i=tf.nn.embedding_lookup(self.embedding self.X)<line_sep># the 'output' matrix, or the coefficients of logistic regression
# for each class (words). This will be ignored once the embeddings
# have been computed
self.weights=tf.Variable(init name="weights")# weights
self.b=tf.Variable(tf.zeros(vocab_size) name="b" dtype=tf.float32)# biases
w_o=tf.nn.embedding_lookup(self.weights self.Y)<line_sep>w_b=tf.nn.embedding_lookup(self.b self.Y)<with_stmt>tf.name_scope("loss")<block_start>"""
Refer to Equation 4 in:
Distributed Representations of Words and Phrases and their Compositionality,
by Mikolov et. al., 2014
loss = log(sigmoid(W_i.W_pos)) + E[log(sigmoid(-W_i.W_neg))]
Note: The second term above (E[.]) is an 'expectation'.
To compute the expectation, we multiply by the self.W.
To distinguish between pos/neg examples, we multiply by self.Z
"""<line_sep>sim=tf.reduce_sum(tf.multiply(w_i w_o) axis=1)+w_b<line_sep>sim_sigmoids=tf.log(tf.nn.sigmoid(tf.multiply(sim self.Z)))<line_sep>log_lik_loss=-tf.reduce_mean(tf.multiply(sim_sigmoids self.W))<line_sep>optimizer=tf.train.AdamOptimizer(learning_rate=self.learning_rate)<line_sep>self.training_op=optimizer.minimize(log_lik_loss)<block_end>init=tf.global_variables_initializer()<line_sep>self.session=tf.Session()<line_sep>self.session.run(init)<line_sep>timer=Timer()<line_sep>i=0<for_stmt>epoch range(self.n_epochs)<block_start><for_stmt>x,y,z,w self.get_batches_skip_gram(seq window_size=self.window_size neg_samples=self.neg_samples)# logger.debug(np.hstack([y, x, z, w]))
<block_start>sim_v,log_lik_loss_v,_=self.session.run([sim log_lik_loss self.training_op] feed_dict={self.X:x self.Y:y self.Z:z self.W:w})<if_stmt>self.debug<and>(i<eq>0<or>(i+1)%5000<eq>0)# the original word2vec code for logging the most similar
# words for a particular word
<block_start>logger.debug("i: %d, log_lik_loss_v: %f"%(i log_lik_loss_v))<line_sep>sim_valid=self.session.run(self.similarity)<for_stmt>j range(valid_size)<block_start>valid_word=self.code2sensor[valid_examples[j]]<line_sep>nearest=(-sim_valid[j :]).argsort()[1:top_k+1]<line_sep>log_str='Nearest to %s:'%valid_word<for_stmt>k range(top_k)<block_start>close_word=self.code2sensor[nearest[k]]<line_sep>log_str='%s %s,'%(log_str close_word)<block_end>logger.debug(log_str)<block_end><block_end><if_stmt>(i+1)%5000<eq>0# logger.debug("sim_v: %s\n%s" % (str(sim_v.shape), str(sim_v)))
<block_start>logger.debug("processed %d"%(i+1))<line_sep># break # early terminate for DEBUG only
<block_end>i<augadd>1<block_end>logger.debug(timer.message("Completed epoch %d in"%epoch))<block_end><block_end><def_stmt>get_embeddings self normalized=<true><block_start><return>self.session.run(self.normalized_embeddings)<if>normalized<else>self.session.run(self.embedding)<block_end><def_stmt>get_batches_skip_gram self seq window_size=3 skip_size=1 n_contexts=10 neg_samples=3<block_start>""" Skip-gram model for word2vec
The max #samples per batch will be:
n_contexts x ((window_size - 1) + neg_samples)
:param window_size: int
length of each context window. Must be > 1 and must be an odd number.
:param skip_size: int
:param n_contexts: int
Number of context windows per batch.
:param neg_samples: int
Number of negative samples per window
:return:
"""<if_stmt>window_size<le>1<or>window_size%2<eq>0<block_start><raise>ValueError("window_size must be greater than 1 and must be odd")<block_end>n=len(seq)<line_sep>s=window_size<floordiv>2<line_sep>all_sensors=set(self.code2sensor.keys())<line_sep>st=0<line_sep>sz=(window_size-1)+neg_samples# number of samples per context window
batch_size=n_contexts<times>sz<line_sep>x=y=z=w=<none><for_stmt>i range(s n-s skip_size)<block_start><if_stmt>i+skip_size<ge>n<block_start>logger.debug("i: %d, n: %d, s: %d, sz: %d"%(i n s sz))<block_end><if_stmt>st<eq>0<block_start>x=np.zeros(batch_size dtype=np.int32)<line_sep>y=np.zeros(batch_size dtype=np.int32)<line_sep>z=np.zeros(batch_size dtype=np.float32)<line_sep>w=np.zeros(batch_size dtype=np.float32)<block_end>w_in=seq[i]<line_sep># w_in will be same for both positive and negative samples
x[st:(st+sz)]=w_in<line_sep>z[st:(st+2<times>s)]=1<line_sep>z[(st+2<times>s):(st+sz)]=-1<line_sep>w[st:(st+2<times>s)]=1# weights for positive samples
w[(st+2<times>s):(st+sz)]=1./neg_samples# weights for negative samples
# first, populate the positive examples
y[st:(st+s)]=seq[(i-s):i]<line_sep>y[(st+s):(st+2<times>s)]=seq[(i+1):(i+s+1)]<line_sep># Now, sample a few negative examples...
# sample a few sensor ids uniformly at random from those
# which do not occur in the current context
curr=set(seq[(i-s):(i+s)])# sensors in current context window
non_context=list(all_sensors-curr)# sensors *not* in current context window
np.random.shuffle(non_context)# random subsample
y[(st+2<times>s):(st+sz)]=non_context[0:neg_samples]<line_sep>st<augadd>sz<if_stmt>st<ge>batch_size<block_start><yield>x y z w<line_sep>st=0<block_end><block_end><block_end><block_end> |
<import_from_stmt>pyflakes.checker Checker<import_stmt>sys<import_stmt>ast<import_stmt>os<import_from_stmt>pathlib Path<import_from_stmt>filecmp dircmp<import_stmt>subprocess<import_from_stmt>pytest raises<import_stmt>pytest<import_from_stmt>..removestar names_to_replace star_imports get_names get_names_from_dir get_names_dynamically fix_code get_mod_filename replace_imports is_noqa_comment_allowing_star_import ExternalModuleError <line_sep>code_mod1="""\
a = 1
aa = 2
b = 3
"""<line_sep>mod1_names={'a' 'aa' 'b'}<line_sep>code_mod2="""\
b = 1
c = 2
cc = 3
"""<line_sep>mod2_names={'b' 'c' 'cc'}<line_sep>code_mod3="""\
name = 0
"""<line_sep>mod3_names={'name'}<line_sep>code_mod4="""\
from .mod1 import *
from .mod2 import *
from .mod3 import name
def func():
return a + b + c + d + d + name
"""<line_sep>mod4_names={'a' 'aa' 'b' 'c' 'cc' 'name' 'func'}<line_sep>code_mod4_fixed="""\
from .mod1 import a
from .mod2 import b, c
from .mod3 import name
def func():
return a + b + c + d + d + name
"""<line_sep>code_mod5="""\
from module.mod1 import *
from module.mod2 import *
from module.mod3 import name
def func():
return a + b + c + d + d + name
"""<line_sep>mod5_names={'a' 'aa' 'b' 'c' 'cc' 'name' 'func'}<line_sep>code_mod5_fixed="""\
from module.mod1 import a
from module.mod2 import b, c
from module.mod3 import name
def func():
return a + b + c + d + d + name
"""<line_sep>code_mod6="""\
from os.path import *
isfile(join('a', 'b'))
"""<line_sep>code_mod6_fixed="""\
from os.path import isfile, join
isfile(join('a', 'b'))
"""<line_sep>code_mod7="""\
from .mod6 import *
"""<line_sep>code_mod7_fixed=""<line_sep>mod7_names={'isfile' 'join'}<line_sep>code_mod8="""\
a = 1
b = 2
c = 3
__all__ = ['a']
__all__ += ['b']
"""<line_sep>mod8_names={'a' 'b'}<line_sep>code_mod9="""\
from .mod8 import *
def func():
return a + b
"""<line_sep>code_mod9_fixed="""\
from .mod8 import a, b
def func():
return a + b
"""<line_sep>mod9_names={'a' 'b' 'func'}<line_sep>code_submod1="""\
from ..mod1 import *
from ..mod2 import *
from ..mod3 import name
from .submod3 import *
def func():
return a + b + c + d + d + e + name
"""<line_sep>submod1_names={'a' 'aa' 'b' 'c' 'cc' 'e' 'name' 'func'}<line_sep>code_submod1_fixed="""\
from ..mod1 import a
from ..mod2 import b, c
from ..mod3 import name
from .submod3 import e
def func():
return a + b + c + d + d + e + name
"""<line_sep>code_submod2="""\
from module.mod1 import *
from module.mod2 import *
from module.mod3 import name
from module.submod.submod3 import *
def func():
return a + b + c + d + d + e + name
"""<line_sep>submod2_names={'a' 'aa' 'b' 'c' 'cc' 'e' 'name' 'func'}<line_sep>code_submod2_fixed="""\
from module.mod1 import a
from module.mod2 import b, c
from module.mod3 import name
from module.submod.submod3 import e
def func():
return a + b + c + d + d + e + name
"""<line_sep>code_submod3="""\
e = 1
"""<line_sep>submod3_names={'e'}<line_sep>code_submod4="""\
from . import *
func()
"""<line_sep>submod4_names={'func'}<line_sep>code_submod4_fixed="""\
from . import func
func()
"""<line_sep>code_submod_init="""\
from .submod1 import func
"""<line_sep>submod_names={'func'}<line_sep># An actual import adds submod1 and submod3 to the submod namespace, since
# they are imported submodule names. The static code does not yet support
# these. If any other imports happen first, like 'import submod.submod2',
# those would be included as well.
submod_dynamic_names={'submod1' 'submod3' 'func'}<line_sep>code_bad_syntax="""\
from mod
"""<line_sep>code_mod_unfixable="""\
from .mod1 import *;
from .mod2 import\t*
def func():
return a + c
"""<line_sep>mod_unfixable_names={'a' 'aa' 'b' 'c' 'cc' 'func'}<line_sep>code_mod_commented_unused_star="""\
from .mod1 import * # comment about mod1
from .mod2 import * # noqa
"""<line_sep>mod_commented_unused_star_names={'a' 'aa' 'b' 'c' 'cc'}<line_sep>code_mod_commented_unused_star_fixed="""\
# comment about mod1
from .mod2 import * # noqa
"""<line_sep>code_mod_commented_star="""\
from .mod1 import * # noqa
from .mod2 import * # noqa: F401
from .mod3 import * # generic comment
def func():
return a + c + name
"""<line_sep>mod_commented_star_names={'a' 'aa' 'b' 'c' 'cc' 'name' 'func'}<line_sep>code_mod_commented_star_fixed="""\
from .mod1 import * # noqa
from .mod2 import * # noqa: F401
from .mod3 import name # generic comment
def func():
return a + c + name
"""<line_sep>code_submod_recursive_init="""\
from .submod1 import *
"""<line_sep>submod_recursive_names={'a' 'b'}<line_sep>submod_recursive_dynamic_names={'submod1' 'a' 'b'}<line_sep>code_submod_recursive_submod1="""\
a = 1
b = 2
"""<line_sep>submod_recursive_submod1_names={'a' 'b'}<line_sep>code_submod_recursive_submod2="""\
from . import *
def func():
return a + 1
"""<line_sep>submod_recursive_submod2_names={'a' 'b' 'func'}<line_sep>submod_recursive_submod2_dynamic_names={'a' 'b' 'func' 'submod1'}<line_sep>code_submod_recursive_submod2_fixed="""\
from . import a
def func():
return a + 1
"""<def_stmt>create_module module<block_start>os.makedirs(module)<with_stmt>open(module/'mod1.py' 'w')<as>f<block_start>f.write(code_mod1)<block_end><with_stmt>open(module/'mod2.py' 'w')<as>f<block_start>f.write(code_mod2)<block_end><with_stmt>open(module/'mod3.py' 'w')<as>f<block_start>f.write(code_mod3)<block_end><with_stmt>open(module/'mod4.py' 'w')<as>f<block_start>f.write(code_mod4)<block_end><with_stmt>open(module/'mod5.py' 'w')<as>f<block_start>f.write(code_mod5)<block_end><with_stmt>open(module/'mod6.py' 'w')<as>f<block_start>f.write(code_mod6)<block_end><with_stmt>open(module/'mod7.py' 'w')<as>f<block_start>f.write(code_mod7)<block_end><with_stmt>open(module/'mod8.py' 'w')<as>f<block_start>f.write(code_mod8)<block_end><with_stmt>open(module/'mod9.py' 'w')<as>f<block_start>f.write(code_mod9)<block_end><with_stmt>open(module/'__init__.py' 'w')<as>f<block_start><pass><block_end><with_stmt>open(module/'mod_bad.py' 'w')<as>f<block_start>f.write(code_bad_syntax)<block_end><with_stmt>open(module/'mod_unfixable.py' 'w')<as>f<block_start>f.write(code_mod_unfixable)<block_end><with_stmt>open(module/'mod_commented_unused_star.py' 'w')<as>f<block_start>f.write(code_mod_commented_unused_star)<block_end><with_stmt>open(module/'mod_commented_star.py' 'w')<as>f<block_start>f.write(code_mod_commented_star)<block_end>submod=module/'submod'<line_sep>os.makedirs(submod)<with_stmt>open(submod/'__init__.py' 'w')<as>f<block_start>f.write(code_submod_init)<block_end><with_stmt>open(submod/'submod1.py' 'w')<as>f<block_start>f.write(code_submod1)<block_end><with_stmt>open(submod/'submod2.py' 'w')<as>f<block_start>f.write(code_submod2)<block_end><with_stmt>open(submod/'submod3.py' 'w')<as>f<block_start>f.write(code_submod3)<block_end><with_stmt>open(submod/'submod4.py' 'w')<as>f<block_start>f.write(code_submod4)<block_end>submod_recursive=module/'submod_recursive'<line_sep>os.makedirs(submod_recursive)<with_stmt>open(submod_recursive/'__init__.py' 'w')<as>f<block_start>f.write(code_submod_recursive_init)<block_end><with_stmt>open(submod_recursive/'submod1.py' 'w')<as>f<block_start>f.write(code_submod_recursive_submod1)<block_end><with_stmt>open(submod_recursive/'submod2.py' 'w')<as>f<block_start>f.write(code_submod_recursive_submod2)<block_end><block_end><def_stmt>test_names_to_replace <block_start><for_stmt>code [code_mod1 code_mod2 code_mod3 code_mod7 code_mod8 code_submod3 code_submod_init code_submod_recursive_init code_submod_recursive_submod1]<block_start>names=names_to_replace(Checker(ast.parse(code)))<assert_stmt>names<eq>set()<block_end><for_stmt>code [code_mod4 code_mod5]<block_start>names=names_to_replace(Checker(ast.parse(code)))<assert_stmt>names<eq>{'a' 'b' 'c' 'd'}<block_end><for_stmt>code [code_submod1 code_submod2]<block_start>names=names_to_replace(Checker(ast.parse(code)))<assert_stmt>names<eq>{'a' 'b' 'c' 'd' 'e'}<block_end>names=names_to_replace(Checker(ast.parse(code_submod4)))<assert_stmt>names<eq>{'func'}<line_sep>names=names_to_replace(Checker(ast.parse(code_mod6)))<assert_stmt>names<eq>{'isfile' 'join'}<line_sep>names=names_to_replace(Checker(ast.parse(code_submod_recursive_submod2)))<assert_stmt>names<eq>{'a'}<line_sep>names=names_to_replace(Checker(ast.parse(code_mod9)))<assert_stmt>names<eq>{'a' 'b'}<line_sep>names=names_to_replace(Checker(ast.parse(code_mod_unfixable)))<assert_stmt>names<eq>{'a' 'c'}<line_sep>names=names_to_replace(Checker(ast.parse(code_mod_commented_unused_star)))<assert_stmt>names<eq>set()<line_sep>names=names_to_replace(Checker(ast.parse(code_mod_commented_star)))<assert_stmt>names<eq>{'a' 'c' 'name'}<block_end><def_stmt>test_star_imports <block_start><for_stmt>code [code_mod1 code_mod2 code_mod3 code_mod8 code_submod3 code_submod_init code_submod_recursive_submod1]<block_start>stars=star_imports(Checker(ast.parse(code)))<assert_stmt>stars<eq>[]<block_end>stars=star_imports(Checker(ast.parse(code_mod4)))<assert_stmt>stars<eq>['.mod1' '.mod2']<line_sep>stars=star_imports(Checker(ast.parse(code_mod5)))<assert_stmt>stars<eq>['module.mod1' 'module.mod2']<line_sep>stars=star_imports(Checker(ast.parse(code_mod6)))<assert_stmt>stars<eq>['os.path']<line_sep>stars=star_imports(Checker(ast.parse(code_mod7)))<assert_stmt>stars<eq>['.mod6']<line_sep>stars=star_imports(Checker(ast.parse(code_mod9)))<assert_stmt>stars<eq>['.mod8']<line_sep>stars=star_imports(Checker(ast.parse(code_submod1)))<assert_stmt>stars<eq>['..mod1' '..mod2' '.submod3']<line_sep>stars=star_imports(Checker(ast.parse(code_submod2)))<assert_stmt>stars<eq>['module.mod1' 'module.mod2' 'module.submod.submod3']<for_stmt>code [code_submod4 code_submod_recursive_submod2]<block_start>stars=star_imports(Checker(ast.parse(code)))<assert_stmt>stars<eq>['.']<block_end>stars=star_imports(Checker(ast.parse(code_submod_recursive_init)))<assert_stmt>stars<eq>['.submod1']<line_sep>stars=star_imports(Checker(ast.parse(code_mod_unfixable)))<assert_stmt>stars<eq>['.mod1' '.mod2']<line_sep>stars=star_imports(Checker(ast.parse(code_mod_commented_unused_star)))<assert_stmt>stars<eq>['.mod1' '.mod2']<line_sep>stars=star_imports(Checker(ast.parse(code_mod_commented_star)))<assert_stmt>stars<eq>['.mod1' '.mod2' '.mod3']<block_end><def_stmt>test_get_names <block_start>names=get_names(code_mod1)<assert_stmt>names<eq>{'a' 'aa' 'b'}<line_sep>names=get_names(code_mod2)<assert_stmt>names<eq>{'b' 'c' 'cc'}<line_sep>names=get_names(code_mod3)<assert_stmt>names<eq>{'name'}<line_sep>names=get_names(code_mod4)<line_sep># TODO: Remove the imported name 'name'
<assert_stmt>names<eq>{'.mod1.*' '.mod2.*' 'name' 'func'}<line_sep>names=get_names(code_mod5)<line_sep># TODO: Remove the imported name 'name'
<assert_stmt>names<eq>{'module.mod1.*' 'module.mod2.*' 'name' 'func'}<line_sep>names=get_names(code_mod6)<assert_stmt>names<eq>{'os.path.*'}<line_sep>names=get_names(code_submod_init)<assert_stmt>names<eq>{'func'}<line_sep>names=get_names(code_submod1)<line_sep># TODO: Remove the imported name 'name'
<assert_stmt>names<eq>{'..mod1.*' '..mod2.*' '.submod3.*' 'name' 'func'}<line_sep>names=get_names(code_submod2)<line_sep># TODO: Remove the imported name 'name'
<assert_stmt>names<eq>{'module.mod1.*' 'module.mod2.*' 'module.submod.submod3.*' 'name' 'func'}<line_sep>names=get_names(code_submod3)<assert_stmt>names<eq>{'e'}<line_sep>names=get_names(code_submod4)<assert_stmt>names<eq>{'..*'}<line_sep>raises(SyntaxError <lambda>:get_names(code_bad_syntax))<line_sep>names=get_names(code_mod_unfixable)<assert_stmt>names<eq>{'.mod1.*' '.mod2.*' 'func'}<line_sep>names=get_names(code_mod_commented_unused_star)<assert_stmt>names<eq>{'.mod1.*' '.mod2.*'}<line_sep>names=get_names(code_mod_commented_star)<assert_stmt>names<eq>{'.mod1.*' '.mod2.*' '.mod3.*' 'func'}<line_sep>names=get_names(code_submod_recursive_init)<assert_stmt>names<eq>{'.submod1.*'}<line_sep>names=get_names(code_submod_recursive_submod1)<assert_stmt>names<eq>{'a' 'b'}<line_sep>names=get_names(code_submod_recursive_submod2)<assert_stmt>names<eq>{'..*' 'func'}<block_end>@pytest.mark.parametrize('relative' [<true> <false>])<def_stmt>test_get_names_from_dir tmpdir relative<block_start>directory=tmpdir/'module'<line_sep>create_module(directory)<if_stmt>relative<block_start>chdir=tmpdir<line_sep>directory=Path('module')<block_end><else_stmt><block_start>chdir='.'<block_end>curdir=os.path.abspath('.')<try_stmt><block_start>os.chdir(chdir)<assert_stmt>get_names_from_dir('.mod1' directory)<eq>mod1_names<assert_stmt>get_names_from_dir('.mod2' directory)<eq>mod2_names<assert_stmt>get_names_from_dir('.mod3' directory)<eq>mod3_names<assert_stmt>get_names_from_dir('.mod4' directory)<eq>mod4_names<assert_stmt>get_names_from_dir('.mod5' directory)<eq>mod5_names<assert_stmt>get_names_from_dir('.mod6' directory)<eq>get_names_dynamically('os.path')<line_sep>raises(NotImplementedError <lambda>:get_names_from_dir('.mod6' directory allow_dynamic=<false>))<assert_stmt>get_names_from_dir('.mod7' directory)<eq>get_names_dynamically('os.path')<line_sep>raises(NotImplementedError <lambda>:get_names_from_dir('.mod7' directory allow_dynamic=<false>))<assert_stmt>get_names_from_dir('.mod8' directory)<eq>mod8_names<assert_stmt>get_names_from_dir('.mod9' directory)<eq>mod9_names<assert_stmt>get_names_from_dir('.mod_unfixable' directory)<eq>mod_unfixable_names<assert_stmt>get_names_from_dir('.mod_commented_unused_star' directory)<eq>mod_commented_unused_star_names<assert_stmt>get_names_from_dir('.mod_commented_star' directory)<eq>mod_commented_star_names<assert_stmt>get_names_from_dir('.submod' directory)<eq>submod_names<assert_stmt>get_names_from_dir('.submod.submod1' directory)<eq>submod1_names<assert_stmt>get_names_from_dir('.submod.submod2' directory)<eq>submod2_names<assert_stmt>get_names_from_dir('.submod.submod3' directory)<eq>submod3_names<assert_stmt>get_names_from_dir('.submod.submod4' directory)<eq>submod4_names<assert_stmt>get_names_from_dir('.submod_recursive' directory)<eq>submod_recursive_names<assert_stmt>get_names_from_dir('.submod_recursive.submod1' directory)<eq>submod_recursive_submod1_names<assert_stmt>get_names_from_dir('.submod_recursive.submod2' directory)<eq>submod_recursive_submod2_names<assert_stmt>get_names_from_dir('module.mod1' directory)<eq>mod1_names<assert_stmt>get_names_from_dir('module.mod2' directory)<eq>mod2_names<assert_stmt>get_names_from_dir('module.mod3' directory)<eq>mod3_names<assert_stmt>get_names_from_dir('module.mod4' directory)<eq>mod4_names<assert_stmt>get_names_from_dir('module.mod5' directory)<eq>mod5_names<assert_stmt>get_names_from_dir('module.mod6' directory)<eq>get_names_dynamically('os.path')<line_sep>raises(NotImplementedError <lambda>:get_names_from_dir('module.mod6' directory allow_dynamic=<false>))<assert_stmt>get_names_from_dir('module.mod7' directory)<eq>get_names_dynamically('os.path')<line_sep>raises(NotImplementedError <lambda>:get_names_from_dir('module.mod7' directory allow_dynamic=<false>))<assert_stmt>get_names_from_dir('module.mod8' directory)<eq>mod8_names<assert_stmt>get_names_from_dir('module.mod9' directory)<eq>mod9_names<assert_stmt>get_names_from_dir('module.mod_unfixable' directory)<eq>mod_unfixable_names<assert_stmt>get_names_from_dir('module.mod_commented_unused_star' directory)<eq>mod_commented_unused_star_names<assert_stmt>get_names_from_dir('module.mod_commented_star' directory)<eq>mod_commented_star_names<assert_stmt>get_names_from_dir('module.submod' directory)<eq>submod_names<assert_stmt>get_names_from_dir('module.submod.submod1' directory)<eq>submod1_names<assert_stmt>get_names_from_dir('module.submod.submod2' directory)<eq>submod2_names<assert_stmt>get_names_from_dir('module.submod.submod3' directory)<eq>submod3_names<assert_stmt>get_names_from_dir('module.submod.submod4' directory)<eq>submod4_names<assert_stmt>get_names_from_dir('module.submod_recursive' directory)<eq>submod_recursive_names<assert_stmt>get_names_from_dir('module.submod_recursive.submod1' directory)<eq>submod_recursive_submod1_names<assert_stmt>get_names_from_dir('module.submod_recursive.submod2' directory)<eq>submod_recursive_submod2_names<line_sep>submod=directory/'submod'<assert_stmt>get_names_from_dir('..submod' submod)<eq>submod_names<assert_stmt>get_names_from_dir('.' submod)<eq>submod_names<assert_stmt>get_names_from_dir('.submod1' submod)<eq>submod1_names<assert_stmt>get_names_from_dir('.submod2' submod)<eq>submod2_names<assert_stmt>get_names_from_dir('.submod3' submod)<eq>submod3_names<assert_stmt>get_names_from_dir('.submod4' submod)<eq>submod4_names<assert_stmt>get_names_from_dir('..mod1' submod)<eq>mod1_names<assert_stmt>get_names_from_dir('..mod2' submod)<eq>mod2_names<assert_stmt>get_names_from_dir('..mod3' submod)<eq>mod3_names<assert_stmt>get_names_from_dir('..mod4' submod)<eq>mod4_names<assert_stmt>get_names_from_dir('..mod5' submod)<eq>mod5_names<assert_stmt>get_names_from_dir('..mod6' submod)<eq>get_names_dynamically('os.path')<line_sep>raises(NotImplementedError <lambda>:get_names_from_dir('..mod6' submod allow_dynamic=<false>))<assert_stmt>get_names_from_dir('..mod7' submod)<eq>get_names_dynamically('os.path')<line_sep>raises(NotImplementedError <lambda>:get_names_from_dir('..mod7' submod allow_dynamic=<false>))<assert_stmt>get_names_from_dir('..mod8' submod)<eq>mod8_names<assert_stmt>get_names_from_dir('..mod9' submod)<eq>mod9_names<assert_stmt>get_names_from_dir('..mod_unfixable' submod)<eq>mod_unfixable_names<assert_stmt>get_names_from_dir('..mod_commented_unused_star' submod)<eq>mod_commented_unused_star_names<assert_stmt>get_names_from_dir('..mod_commented_star' submod)<eq>mod_commented_star_names<assert_stmt>get_names_from_dir('..submod_recursive' submod)<eq>submod_recursive_names<assert_stmt>get_names_from_dir('..submod_recursive.submod1' submod)<eq>submod_recursive_submod1_names<assert_stmt>get_names_from_dir('..submod_recursive.submod2' submod)<eq>submod_recursive_submod2_names<assert_stmt>get_names_from_dir('module.mod1' submod)<eq>mod1_names<assert_stmt>get_names_from_dir('module.mod2' submod)<eq>mod2_names<assert_stmt>get_names_from_dir('module.mod3' submod)<eq>mod3_names<assert_stmt>get_names_from_dir('module.mod4' submod)<eq>mod4_names<assert_stmt>get_names_from_dir('module.mod5' submod)<eq>mod5_names<assert_stmt>get_names_from_dir('module.mod6' submod)<eq>get_names_dynamically('os.path')<line_sep>raises(NotImplementedError <lambda>:get_names_from_dir('module.mod6' submod allow_dynamic=<false>))<assert_stmt>get_names_from_dir('module.mod7' submod)<eq>get_names_dynamically('os.path')<line_sep>raises(NotImplementedError <lambda>:get_names_from_dir('module.mod7' submod allow_dynamic=<false>))<assert_stmt>get_names_from_dir('module.mod8' submod)<eq>mod8_names<assert_stmt>get_names_from_dir('module.mod9' submod)<eq>mod9_names<assert_stmt>get_names_from_dir('module.mod_unfixable' submod)<eq>mod_unfixable_names<assert_stmt>get_names_from_dir('module.mod_commented_unused_star' submod)<eq>mod_commented_unused_star_names<assert_stmt>get_names_from_dir('module.mod_commented_star' submod)<eq>mod_commented_star_names<assert_stmt>get_names_from_dir('module.submod' submod)<eq>submod_names<assert_stmt>get_names_from_dir('module.submod.submod1' submod)<eq>submod1_names<assert_stmt>get_names_from_dir('module.submod.submod2' submod)<eq>submod2_names<assert_stmt>get_names_from_dir('module.submod.submod3' submod)<eq>submod3_names<assert_stmt>get_names_from_dir('module.submod.submod4' submod)<eq>submod4_names<assert_stmt>get_names_from_dir('module.submod_recursive' submod)<eq>submod_recursive_names<assert_stmt>get_names_from_dir('module.submod_recursive.submod1' submod)<eq>submod_recursive_submod1_names<assert_stmt>get_names_from_dir('module.submod_recursive.submod2' submod)<eq>submod_recursive_submod2_names<line_sep>submod_recursive=directory/'submod_recursive'<assert_stmt>get_names_from_dir('..submod' submod_recursive)<eq>submod_names<assert_stmt>get_names_from_dir('..submod.submod1' submod_recursive)<eq>submod1_names<assert_stmt>get_names_from_dir('..submod.submod2' submod_recursive)<eq>submod2_names<assert_stmt>get_names_from_dir('..submod.submod3' submod_recursive)<eq>submod3_names<assert_stmt>get_names_from_dir('..submod.submod4' submod_recursive)<eq>submod4_names<assert_stmt>get_names_from_dir('..mod1' submod_recursive)<eq>mod1_names<assert_stmt>get_names_from_dir('..mod2' submod_recursive)<eq>mod2_names<assert_stmt>get_names_from_dir('..mod3' submod_recursive)<eq>mod3_names<assert_stmt>get_names_from_dir('..mod4' submod_recursive)<eq>mod4_names<assert_stmt>get_names_from_dir('..mod5' submod_recursive)<eq>mod5_names<assert_stmt>get_names_from_dir('..mod6' submod_recursive)<eq>get_names_dynamically('os.path')<line_sep>raises(NotImplementedError <lambda>:get_names_from_dir('..mod6' submod_recursive allow_dynamic=<false>))<assert_stmt>get_names_from_dir('..mod7' submod_recursive)<eq>get_names_dynamically('os.path')<line_sep>raises(NotImplementedError <lambda>:get_names_from_dir('..mod7' submod_recursive allow_dynamic=<false>))<assert_stmt>get_names_from_dir('..mod8' submod_recursive)<eq>mod8_names<assert_stmt>get_names_from_dir('..mod9' submod_recursive)<eq>mod9_names<assert_stmt>get_names_from_dir('..mod_unfixable' submod_recursive)<eq>mod_unfixable_names<assert_stmt>get_names_from_dir('..mod_commented_unused_star' submod_recursive)<eq>mod_commented_unused_star_names<assert_stmt>get_names_from_dir('..mod_commented_star' submod_recursive)<eq>mod_commented_star_names<assert_stmt>get_names_from_dir('.' submod_recursive)<eq>submod_recursive_names<assert_stmt>get_names_from_dir('..submod_recursive' submod_recursive)<eq>submod_recursive_names<assert_stmt>get_names_from_dir('.submod1' submod_recursive)<eq>submod_recursive_submod1_names<assert_stmt>get_names_from_dir('.submod2' submod_recursive)<eq>submod_recursive_submod2_names<assert_stmt>get_names_from_dir('module.mod1' submod_recursive)<eq>mod1_names<assert_stmt>get_names_from_dir('module.mod2' submod_recursive)<eq>mod2_names<assert_stmt>get_names_from_dir('module.mod3' submod_recursive)<eq>mod3_names<assert_stmt>get_names_from_dir('module.mod4' submod_recursive)<eq>mod4_names<assert_stmt>get_names_from_dir('module.mod5' submod_recursive)<eq>mod5_names<assert_stmt>get_names_from_dir('module.mod6' submod_recursive)<eq>get_names_dynamically('os.path')<line_sep>raises(NotImplementedError <lambda>:get_names_from_dir('module.mod6' submod allow_dynamic=<false>))<assert_stmt>get_names_from_dir('module.mod7' submod_recursive)<eq>get_names_dynamically('os.path')<line_sep>raises(NotImplementedError <lambda>:get_names_from_dir('module.mod7' submod allow_dynamic=<false>))<assert_stmt>get_names_from_dir('module.mod8' submod_recursive)<eq>mod8_names<assert_stmt>get_names_from_dir('module.mod9' submod_recursive)<eq>mod9_names<assert_stmt>get_names_from_dir('module.mod_unfixable' submod_recursive)<eq>mod_unfixable_names<assert_stmt>get_names_from_dir('module.mod_commented_unused_star' submod)<eq>mod_commented_unused_star_names<assert_stmt>get_names_from_dir('module.mod_commented_star' submod)<eq>mod_commented_star_names<assert_stmt>get_names_from_dir('module.submod' submod_recursive)<eq>submod_names<assert_stmt>get_names_from_dir('module.submod.submod1' submod_recursive)<eq>submod1_names<assert_stmt>get_names_from_dir('module.submod.submod2' submod_recursive)<eq>submod2_names<assert_stmt>get_names_from_dir('module.submod.submod3' submod_recursive)<eq>submod3_names<assert_stmt>get_names_from_dir('module.submod.submod4' submod_recursive)<eq>submod4_names<assert_stmt>get_names_from_dir('module.submod_recursive' submod_recursive)<eq>submod_recursive_names<assert_stmt>get_names_from_dir('module.submod_recursive.submod1' submod_recursive)<eq>submod_recursive_submod1_names<assert_stmt>get_names_from_dir('module.submod_recursive.submod2' submod_recursive)<eq>submod_recursive_submod2_names<line_sep>raises(ExternalModuleError <lambda>:get_names_from_dir('os.path' directory))<line_sep>raises(ExternalModuleError <lambda>:get_names_from_dir('os.path' submod))<line_sep>raises(RuntimeError <lambda>:get_names_from_dir('.mod_bad' directory))<line_sep>raises(RuntimeError <lambda>:get_names_from_dir('module.mod_bad' directory))<line_sep>raises(RuntimeError <lambda>:get_names_from_dir('.mod_doesnt_exist' directory))<line_sep>raises(RuntimeError <lambda>:get_names_from_dir('module.mod_doesnt_exist' directory))<block_end><finally_stmt><block_start>os.chdir(curdir)<block_end><block_end><def_stmt>test_get_names_dynamically tmpdir<block_start>os_path=get_names_dynamically('os.path')<assert_stmt>'isfile'<in>os_path<assert_stmt>'join'<in>os_path<line_sep>directory=tmpdir/'module'<line_sep>create_module(directory)<line_sep>sys_path=sys.path<try_stmt><block_start>sys.path.insert(0 str(tmpdir))<assert_stmt>get_names_dynamically('module.mod1')<eq>mod1_names<assert_stmt>get_names_dynamically('module.mod2')<eq>mod2_names<assert_stmt>get_names_dynamically('module.mod3')<eq>mod3_names<assert_stmt>get_names_dynamically('module.mod4')<eq>mod4_names<assert_stmt>get_names_dynamically('module.mod5')<eq>mod5_names<assert_stmt>get_names_dynamically('module.mod6')<eq>os_path<assert_stmt>get_names_dynamically('module.mod7')<eq>os_path<assert_stmt>get_names_dynamically('module.mod8')<eq>mod8_names<assert_stmt>get_names_dynamically('module.mod9')<eq>mod9_names<assert_stmt>get_names_dynamically('module.mod_unfixable')<eq>mod_unfixable_names<assert_stmt>get_names_dynamically('module.mod_commented_unused_star')<eq>mod_commented_unused_star_names<assert_stmt>get_names_dynamically('module.mod_commented_star')<eq>mod_commented_star_names<assert_stmt>get_names_dynamically('module.submod')<eq>submod_dynamic_names<assert_stmt>get_names_dynamically('module.submod.submod1')<eq>submod1_names<assert_stmt>get_names_dynamically('module.submod.submod2')<eq>submod2_names<assert_stmt>get_names_dynamically('module.submod.submod3')<eq>submod3_names<line_sep>raises(RuntimeError <lambda>:get_names_dynamically('module.submod.submod4'))<assert_stmt>get_names_dynamically('module.submod_recursive')<eq>submod_recursive_dynamic_names<assert_stmt>get_names_dynamically('module.submod_recursive.submod1')<eq>submod_recursive_submod1_names<assert_stmt>get_names_dynamically('module.submod_recursive.submod2')<eq>submod_recursive_submod2_dynamic_names<line_sep># Doesn't actually import because of the undefined name 'd'
# assert get_names_dynamically('module.submod.submod4') == submod4_names
<block_end><finally_stmt><block_start>sys.path=sys_path<block_end>raises(RuntimeError <lambda>:get_names_dynamically('notarealmodule'))<block_end><def_stmt>test_fix_code tmpdir capsys# TODO: Test the verbose and quiet flags
<block_start>directory=tmpdir/'module'<line_sep>create_module(directory)<assert_stmt>fix_code(code_mod1 file=directory/'mod1.py')<eq>code_mod1<line_sep>out,err=capsys.readouterr()<assert_stmt><not>out<assert_stmt><not>err<assert_stmt>fix_code(code_mod2 file=directory/'mod2.py')<eq>code_mod2<line_sep>out,err=capsys.readouterr()<assert_stmt><not>out<assert_stmt><not>err<assert_stmt>fix_code(code_mod3 file=directory/'mod3.py')<eq>code_mod3<line_sep>out,err=capsys.readouterr()<assert_stmt><not>out<assert_stmt><not>err<assert_stmt>fix_code(code_mod4 file=directory/'mod4.py')<eq>code_mod4_fixed<line_sep>out,err=capsys.readouterr()<assert_stmt><not>out<assert_stmt>'Warning'<in>err<assert_stmt>str(directory/'mod4.py')<in>err<assert_stmt>"'b'"<in>err<assert_stmt>"'a'"<not><in>err<assert_stmt>"'.mod1'"<in>err<assert_stmt>"'.mod2'"<in>err<assert_stmt>"Using '.mod2'"<in>err<assert_stmt>"could not find import for 'd'"<in>err<assert_stmt>fix_code(code_mod5 file=directory/'mod5.py')<eq>code_mod5_fixed<line_sep>out,err=capsys.readouterr()<assert_stmt><not>out<assert_stmt>'Warning'<in>err<assert_stmt>str(directory/'mod5.py')<in>err<assert_stmt>"'b'"<in>err<assert_stmt>"'a'"<not><in>err<assert_stmt>"'module.mod1'"<in>err<assert_stmt>"'module.mod2'"<in>err<assert_stmt>"Using 'module.mod2'"<in>err<assert_stmt>"could not find import for 'd'"<in>err<assert_stmt>fix_code(code_mod6 file=directory/'mod6.py')<eq>code_mod6_fixed<line_sep>out,err=capsys.readouterr()<assert_stmt><not>out<assert_stmt><not>err<assert_stmt>raises(NotImplementedError <lambda>:fix_code(code_mod6 file=directory/'mod6.py' allow_dynamic=<false>))<assert_stmt>fix_code(code_mod7 file=directory/'mod7.py')<eq>code_mod7_fixed<line_sep>out,err=capsys.readouterr()<assert_stmt><not>out<assert_stmt><not>err<assert_stmt>raises(NotImplementedError <lambda>:fix_code(code_mod7 file=directory/'mod7.py' allow_dynamic=<false>))<assert_stmt>fix_code(code_mod8 file=directory/'mod8.py')<eq>code_mod8<line_sep>out,err=capsys.readouterr()<assert_stmt><not>out<assert_stmt><not>err<assert_stmt>fix_code(code_mod9 file=directory/'mod9.py')<eq>code_mod9_fixed<line_sep>out,err=capsys.readouterr()<assert_stmt><not>out<assert_stmt><not>err<assert_stmt>fix_code(code_mod_unfixable file=directory/'mod_unfixable.py')<eq>code_mod_unfixable<line_sep>out,err=capsys.readouterr()<assert_stmt><not>out<assert_stmt>'Warning'<in>err<assert_stmt>'Could not find the star imports for'<in>err<for_stmt>mod ["'.mod1'" "'.mod2'"]<block_start><assert_stmt>mod<in>err<block_end><assert_stmt>fix_code(code_mod_commented_unused_star file=directory/'mod_commented_unused_star.py')<eq>code_mod_commented_unused_star_fixed<line_sep>out,err=capsys.readouterr()<assert_stmt><not>out<assert_stmt>'Warning'<in>err<assert_stmt>("The removed star import statement for '.mod1' had an inline "<concat>"comment which may not make sense without the import")<in>err<assert_stmt>fix_code(code_mod_commented_star file=directory/'mod_commented_star.py')<eq>code_mod_commented_star_fixed<line_sep>out,err=capsys.readouterr()<assert_stmt><not>out<assert_stmt><not>err<line_sep>submod=directory/'submod'<assert_stmt>fix_code(code_submod_init file=submod/'__init__.py')<eq>code_submod_init<line_sep>out,err=capsys.readouterr()<assert_stmt><not>out<assert_stmt><not>err<assert_stmt>fix_code(code_submod1 file=submod/'submod1.py')<eq>code_submod1_fixed<line_sep>out,err=capsys.readouterr()<assert_stmt><not>out<assert_stmt>'Warning'<in>err<assert_stmt>str(submod/'submod1.py')<in>err<assert_stmt>"'b'"<in>err<assert_stmt>"'a'"<not><in>err<assert_stmt>"'..mod1'"<in>err<assert_stmt>"'..mod2'"<in>err<assert_stmt>"'.mod1'"<not><in>err<assert_stmt>"'.mod2'"<not><in>err<assert_stmt>"Using '..mod2'"<in>err<assert_stmt>"could not find import for 'd'"<in>err<assert_stmt>fix_code(code_submod2 file=submod/'submod2.py')<eq>code_submod2_fixed<line_sep>out,err=capsys.readouterr()<assert_stmt><not>out<assert_stmt>'Warning'<in>err<assert_stmt>str(submod/'submod2.py')<in>err<assert_stmt>"'b'"<in>err<assert_stmt>"'a'"<not><in>err<assert_stmt>"'module.mod1'"<in>err<assert_stmt>"'module.mod2'"<in>err<assert_stmt>"'module.submod.submod3'"<not><in>err<assert_stmt>"'module.submod.mod1'"<not><in>err<assert_stmt>"'module.submod.mod2'"<not><in>err<assert_stmt>"Using 'module.mod2'"<in>err<assert_stmt>"could not find import for 'd'"<in>err<assert_stmt>fix_code(code_submod3 file=submod/'submod3.py')<eq>code_submod3<line_sep>out,err=capsys.readouterr()<assert_stmt><not>out<assert_stmt><not>err<assert_stmt>fix_code(code_submod4 file=submod/'submod4.py')<eq>code_submod4_fixed<line_sep>out,err=capsys.readouterr()<assert_stmt><not>out<assert_stmt><not>err<line_sep>submod_recursive=directory/'submod_recursive'<line_sep># TODO: It's not actually useful to test this
<assert_stmt>fix_code(code_submod_recursive_init file=submod_recursive/'__init__.py')<eq>""<line_sep>out,err=capsys.readouterr()<assert_stmt><not>out<assert_stmt><not>err<assert_stmt>fix_code(code_submod_recursive_submod1 file=submod_recursive/'submod1.py')<eq>code_submod_recursive_submod1<line_sep>out,err=capsys.readouterr()<assert_stmt><not>out<assert_stmt><not>err<assert_stmt>fix_code(code_submod_recursive_submod2 file=submod_recursive/'submod2.py')<eq>code_submod_recursive_submod2_fixed<line_sep>out,err=capsys.readouterr()<assert_stmt><not>out<assert_stmt><not>err<line_sep>raises(RuntimeError <lambda>:fix_code(code_bad_syntax file=directory/'mod_bad.py'))<line_sep>out,err=capsys.readouterr()<assert_stmt><not>out<assert_stmt><not>err<block_end><def_stmt>touch f<block_start><with_stmt>open(f 'w')<block_start><pass><block_end><block_end>@pytest.mark.parametrize('relative' [<true> <false>])<def_stmt>test_get_mod_filename tmpdir relative<block_start><if_stmt>relative<block_start>chdir=tmpdir<line_sep>tmpdir=Path('.')<block_end><else_stmt><block_start>chdir='.'<block_end>curdir=os.path.abspath('.')<try_stmt><block_start>os.chdir(chdir)<line_sep>module=tmpdir/'module'<line_sep>os.makedirs(module)<line_sep>touch(module/'__init__.py')<line_sep>touch(module/'mod1.py')<line_sep>submod=module/'submod'<line_sep>os.makedirs(submod)<line_sep>touch(submod/'__init__.py')<line_sep>touch(submod/'mod1.py')<line_sep>subsubmod=submod/'submod'<line_sep>os.makedirs(subsubmod)<line_sep>touch(subsubmod/'__init__.py')<line_sep>touch(subsubmod/'mod1.py')<def_stmt>_test mod directory expected<block_start>result=os.path.abspath(get_mod_filename(mod directory))<assert_stmt>result<eq>os.path.abspath(expected)<block_end>_test('.' module module/'__init__.py')<line_sep>_test('.mod1' module module/'mod1.py')<line_sep>_test('.submod' module submod/'__init__.py')<line_sep>_test('.submod.mod1' module submod/'mod1.py')<line_sep>_test('.submod.submod' module subsubmod/'__init__.py')<line_sep>_test('.submod.submod.mod1' module subsubmod/'mod1.py')<line_sep>raises(RuntimeError <lambda>:get_mod_filename('.notreal' module))<line_sep>_test('module' module module/'__init__.py')<line_sep>_test('module.mod1' module module/'mod1.py')<line_sep>_test('module.submod' module submod/'__init__.py')<line_sep>_test('module.submod.mod1' module submod/'mod1.py')<line_sep>_test('module.submod.submod' module subsubmod/'__init__.py')<line_sep>_test('module.submod.submod.mod1' module subsubmod/'mod1.py')<line_sep>raises(RuntimeError <lambda>:get_mod_filename('module.notreal' module))<line_sep>raises(RuntimeError <lambda>:get_mod_filename('module.submod.notreal' module))<line_sep>raises(ExternalModuleError <lambda>:get_mod_filename('notreal.notreal' module))<line_sep>_test('..' submod module/'__init__.py')<line_sep>_test('..mod1' submod module/'mod1.py')<line_sep>_test('.' submod submod/'__init__.py')<line_sep>_test('.mod1' submod submod/'mod1.py')<line_sep>_test('..submod' submod submod/'__init__.py')<line_sep>_test('..submod.mod1' submod submod/'mod1.py')<line_sep>_test('.submod' submod subsubmod/'__init__.py')<line_sep>_test('.submod.mod1' submod subsubmod/'mod1.py')<line_sep>_test('..submod.submod' submod subsubmod/'__init__.py')<line_sep>_test('..submod.submod.mod1' submod subsubmod/'mod1.py')<line_sep>raises(RuntimeError <lambda>:get_mod_filename('.notreal' submod))<line_sep>raises(RuntimeError <lambda>:get_mod_filename('..notreal' submod))<line_sep>_test('module' submod module/'__init__.py')<line_sep>_test('module.mod1' submod module/'mod1.py')<line_sep>_test('module.submod' submod submod/'__init__.py')<line_sep>_test('module.submod.mod1' submod submod/'mod1.py')<line_sep>_test('module.submod.submod' submod subsubmod/'__init__.py')<line_sep>_test('module.submod.submod.mod1' submod subsubmod/'mod1.py')<line_sep>raises(RuntimeError <lambda>:get_mod_filename('module.notreal' submod))<line_sep>raises(RuntimeError <lambda>:get_mod_filename('module.submod.notreal' submod))<line_sep>raises(ExternalModuleError <lambda>:get_mod_filename('notreal.notreal' submod))<line_sep>_test('...' subsubmod module/'__init__.py')<line_sep>_test('...mod1' subsubmod module/'mod1.py')<line_sep>_test('..' subsubmod submod/'__init__.py')<line_sep>_test('..mod1' subsubmod submod/'mod1.py')<line_sep>_test('...submod' subsubmod submod/'__init__.py')<line_sep>_test('...submod.mod1' subsubmod submod/'mod1.py')<line_sep>_test('.' subsubmod subsubmod/'__init__.py')<line_sep>_test('.mod1' subsubmod subsubmod/'mod1.py')<line_sep>_test('...submod.submod' subsubmod subsubmod/'__init__.py')<line_sep>_test('...submod.submod.mod1' subsubmod subsubmod/'mod1.py')<line_sep>_test('..submod' subsubmod subsubmod/'__init__.py')<line_sep>_test('..submod.mod1' subsubmod subsubmod/'mod1.py')<line_sep>raises(RuntimeError <lambda>:get_mod_filename('.notreal' subsubmod))<line_sep>raises(RuntimeError <lambda>:get_mod_filename('..notreal' subsubmod))<line_sep>raises(RuntimeError <lambda>:get_mod_filename('..notreal' subsubmod))<line_sep>_test('module' subsubmod module/'__init__.py')<line_sep>_test('module.mod1' subsubmod module/'mod1.py')<line_sep>_test('module.submod' subsubmod submod/'__init__.py')<line_sep>_test('module.submod.mod1' subsubmod submod/'mod1.py')<line_sep>_test('module.submod.submod' subsubmod subsubmod/'__init__.py')<line_sep>_test('module.submod.submod.mod1' subsubmod subsubmod/'mod1.py')<line_sep>raises(RuntimeError <lambda>:get_mod_filename('module.notreal' subsubmod))<line_sep>raises(RuntimeError <lambda>:get_mod_filename('module.submod.notreal' subsubmod))<line_sep>raises(ExternalModuleError <lambda>:get_mod_filename('notreal.notreal' subsubmod))<block_end><finally_stmt><block_start>os.chdir(curdir)<block_end><block_end><def_stmt>test_replace_imports # The verbose and quiet flags are already tested in test_fix_code
<block_start><for_stmt>code [code_mod1 code_mod2 code_mod3 code_mod8 code_submod3 code_submod_init code_submod_recursive_submod1 code_mod_unfixable]<block_start><assert_stmt>replace_imports(code repls={} verbose=<false> quiet=<true>)<eq>code<block_end><assert_stmt>replace_imports(code_mod4 repls={'.mod1':['a'] '.mod2':['b' 'c']} verbose=<false> quiet=<true>)<eq>code_mod4_fixed<assert_stmt>replace_imports(code_mod5 repls={'module.mod1':['a'] 'module.mod2':['b' 'c']} verbose=<false> quiet=<true>)<eq>code_mod5_fixed<assert_stmt>replace_imports(code_mod6 repls={'os.path':['isfile' 'join']} verbose=<false> quiet=<false>)<eq>code_mod6_fixed<assert_stmt>replace_imports(code_mod7 repls={'.mod6':[]} verbose=<false> quiet=<false>)<eq>code_mod7_fixed<assert_stmt>replace_imports(code_mod9 repls={'.mod8':['a' 'b']} verbose=<false> quiet=<false>)<eq>code_mod9_fixed<assert_stmt>replace_imports(code_submod1 repls={'..mod1':['a'] '..mod2':['b' 'c'] '.submod3':['e']} verbose=<false> quiet=<true>)<eq>code_submod1_fixed<assert_stmt>replace_imports(code_submod2 repls={'module.mod1':['a'] 'module.mod2':['b' 'c'] 'module.submod.submod3':['e']} verbose=<false> quiet=<true>)<eq>code_submod2_fixed<assert_stmt>replace_imports(code_submod4 repls={'.':['func']} verbose=<false> quiet=<true>)<eq>code_submod4_fixed<assert_stmt>replace_imports(code_submod_recursive_submod2 repls={'.':['a']})<eq>code_submod_recursive_submod2_fixed<assert_stmt>replace_imports(code_mod_unfixable repls={'.mod1':['a'] '.mod2':['c'] '.mod3':['name']})<eq>code_mod_unfixable<assert_stmt>replace_imports(code_mod_commented_unused_star repls={'.mod1':[] '.mod2':[]})<eq>code_mod_commented_unused_star_fixed<assert_stmt>replace_imports(code_mod_commented_star repls={'.mod1':['a'] '.mod2':['c'] '.mod3':['name']})<eq>code_mod_commented_star_fixed<block_end>@pytest.mark.parametrize('verbose_enabled, verbose_kwarg' [(<false> {}) # Default is False
(<false> {'verbose':<false>}) (<true> {'verbose':<true>}) ] ids=['implicit no verbose' 'explicit no verbose' 'explicit verbose'])@pytest.mark.parametrize('kwargs, fixed_code, verbose_messages' [(dict(code=code_mod4 repls={'.mod1':['a'] '.mod2':['b' 'c']}) code_mod4_fixed ["Replacing 'from .mod1 import *' with 'from .mod1 import a'" "Replacing 'from .mod2 import *' with 'from .mod2 import b, c'"]) (dict(code=code_mod4 repls={'.mod1':['a'] '.mod2':['b' 'c']} file='directory/mod4.py') code_mod4_fixed ["directory/mod4.py: Replacing 'from .mod1 import *' with 'from .mod1 import a'" "directory/mod4.py: Replacing 'from .mod2 import *' with 'from .mod2 import b, c'"]) (dict(code=code_mod_commented_star repls={'.mod1':['a'] '.mod2':['c'] '.mod3':['name']}) code_mod_commented_star_fixed ["Replacing 'from .mod3 import *' with 'from .mod3 import name'" "Retaining 'from .mod1 import *' due to noqa comment" "Retaining 'from .mod2 import *' due to noqa comment"]) (dict(code=code_mod_commented_star repls={'.mod1':['a'] '.mod2':['c'] '.mod3':['name']} file='directory/mod_commented_star.py') code_mod_commented_star_fixed ["directory/mod_commented_star.py: Replacing 'from .mod3 import *' with 'from .mod3 import name'" "directory/mod_commented_star.py: Retaining 'from .mod1 import *' due to noqa comment" "directory/mod_commented_star.py: Retaining 'from .mod2 import *' due to noqa comment"]) ] ids=['mod4 without file' 'mod4 with file' 'mod_commented_star without file' 'mod_commented_star with file'])<def_stmt>test_replace_imports_verbose_messages kwargs fixed_code verbose_messages verbose_enabled verbose_kwarg capsys<block_start><assert_stmt>replace_imports(**kwargs **verbose_kwarg)<eq>fixed_code<line_sep>_,err=capsys.readouterr()<if_stmt>verbose_enabled<block_start><assert_stmt>sorted(err.splitlines())<eq>verbose_messages<block_end><else_stmt><block_start><assert_stmt>err<eq>''<block_end><block_end><def_stmt>test_replace_imports_warnings capsys<block_start><assert_stmt>replace_imports(code_mod_unfixable file='module/mod_unfixable.py' repls={'.mod1':['a'] '.mod2':['c']})<eq>code_mod_unfixable<line_sep>out,err=capsys.readouterr()<assert_stmt>set(err.splitlines())<eq>{"Warning: module/mod_unfixable.py: Could not find the star imports for '.mod1'" "Warning: module/mod_unfixable.py: Could not find the star imports for '.mod2'"}<assert_stmt>replace_imports(code_mod_unfixable file=<none> repls={'.mod1':['a'] '.mod2':['c']})<eq>code_mod_unfixable<line_sep>out,err=capsys.readouterr()<assert_stmt>set(err.splitlines())<eq>{"Warning: Could not find the star imports for '.mod1'" "Warning: Could not find the star imports for '.mod2'"}<assert_stmt>replace_imports(code_mod_unfixable quiet=<true> repls={'.mod1':['a'] '.mod2':['c']})<eq>code_mod_unfixable<line_sep>out,err=capsys.readouterr()<assert_stmt>err<eq>''<assert_stmt>replace_imports(code_mod_commented_unused_star file='module/mod_commented_unused_star.py' repls={'.mod1':[] '.mod2':[]})<eq>code_mod_commented_unused_star_fixed<line_sep>out,err=capsys.readouterr()<assert_stmt>set(err.splitlines())<eq>{"Warning: module/mod_commented_unused_star.py: The removed star import statement for '.mod1' had an inline comment which may not make sense without the import" }<assert_stmt>replace_imports(code_mod_commented_unused_star file=<none> repls={'.mod1':[] '.mod2':[]})<eq>code_mod_commented_unused_star_fixed<line_sep>out,err=capsys.readouterr()<assert_stmt>set(err.splitlines())<eq>{"Warning: The removed star import statement for '.mod1' had an inline comment which may not make sense without the import" }<assert_stmt>replace_imports(code_mod_commented_unused_star quiet=<true> repls={'.mod1':[] '.mod2':[]})<eq>code_mod_commented_unused_star_fixed<line_sep>out,err=capsys.readouterr()<assert_stmt>err<eq>''<block_end><def_stmt>test_replace_imports_line_wrapping <block_start>code="""\
from reallyreallylongmodulename import *
print(longname1, longname2, longname3, longname4, longname5, longname6,
longname7, longname8, longname9)
"""<line_sep>code_fixed="""\
{imp}
print(longname1, longname2, longname3, longname4, longname5, longname6,
longname7, longname8, longname9)
"""<line_sep>repls={'reallyreallylongmodulename':['longname1' 'longname2' 'longname3' 'longname4' 'longname5' 'longname6' 'longname7' 'longname8' 'longname9']}<assert_stmt>replace_imports(code repls)<eq>code_fixed.format(imp='''\
from reallyreallylongmodulename import (longname1, longname2, longname3, longname4, longname5,
longname6, longname7, longname8, longname9)''')<line_sep># Make sure the first line has at least one imported name.
# There's no point to doing
#
# from mod import (
# name,
#
# if we are aligning the names to the opening parenthesis anyway.
<assert_stmt>replace_imports(code repls max_line_length=49)<eq>code_fixed.format(imp='''\
from reallyreallylongmodulename import (longname1,
longname2,
longname3,
longname4,
longname5,
longname6,
longname7,
longname8,
longname9)''')<assert_stmt>replace_imports(code repls max_line_length=50)<eq>code_fixed.format(imp='''\
from reallyreallylongmodulename import (longname1,
longname2,
longname3,
longname4,
longname5,
longname6,
longname7,
longname8,
longname9)''')<assert_stmt>replace_imports(code repls max_line_length=51)<eq>code_fixed.format(imp='''\
from reallyreallylongmodulename import (longname1,
longname2,
longname3,
longname4,
longname5,
longname6,
longname7,
longname8,
longname9)''')<assert_stmt>replace_imports(code repls max_line_length=120)<eq>code_fixed.format(imp='''\
from reallyreallylongmodulename import (longname1, longname2, longname3, longname4, longname5, longname6, longname7,
longname8, longname9)''')<assert_stmt>len("from reallyreallylongmodulename import longname1, longname2, longname3, longname4, longname5, longname6, longname7, longname8, longname9")<eq>136<assert_stmt>replace_imports(code repls max_line_length=137)<eq>code_fixed.format(imp='''\
from reallyreallylongmodulename import longname1, longname2, longname3, longname4, longname5, longname6, longname7, longname8, longname9''')<assert_stmt>replace_imports(code repls max_line_length=136)<eq>code_fixed.format(imp='''\
from reallyreallylongmodulename import longname1, longname2, longname3, longname4, longname5, longname6, longname7, longname8, longname9''')<assert_stmt>replace_imports(code repls max_line_length=135)<eq>code_fixed.format(imp='''\
from reallyreallylongmodulename import (longname1, longname2, longname3, longname4, longname5, longname6, longname7, longname8,
longname9)''')<assert_stmt>replace_imports(code repls max_line_length=200)<eq>code_fixed.format(imp='''\
from reallyreallylongmodulename import longname1, longname2, longname3, longname4, longname5, longname6, longname7, longname8, longname9''')<assert_stmt>replace_imports(code repls max_line_length=float('inf'))<eq>code_fixed.format(imp='''\
from reallyreallylongmodulename import longname1, longname2, longname3, longname4, longname5, longname6, longname7, longname8, longname9''')<block_end>@pytest.mark.parametrize('case_permutation' [<lambda>s:s <lambda>s:s.upper() <lambda>s:s.lower()] ids=['same case' 'upper case' 'lower case'])@pytest.mark.parametrize('allows_star, comment' [(<true> '# noqa') (<true> '#noqa') (<true> '# noqa ') (<false> '# noqa foo bar') (<false> '# noqa:') (<false> '# noqa :') (<true> '# noqa: F401') (<true> '#noqa:F401') (<true> '# noqa: F401 ') (<true> '#\tnoqa:\tF401\t') (<true> '# noqa: F403') (<true> '# noqa: A1,F403,A1') (<true> '# noqa: A1 F401 A1') (<true> '# noqa: A1, F401, A1') (<true> '# noqa: A1 , F401 , A1') (<false> '# generic comment') (<false> '#') (<false> '') (<false> '# foo: F401') (<false> '# F401') (<false> '# noqa F401') # missing : after noqa
])<def_stmt>test_is_noqa_comment_allowing_star_import case_permutation allows_star comment<block_start><assert_stmt>is_noqa_comment_allowing_star_import(case_permutation(comment))<is>allows_star<block_end><def_stmt>_dirs_equal cmp<block_start><if_stmt>cmp.diff_files<block_start><return><false><block_end><if_stmt><not>cmp.subdirs<block_start><return><true><block_end><return>all(_dirs_equal(c)<for>c cmp.subdirs.values())<block_end><def_stmt>test_cli tmpdir<block_start><import_from_stmt>..__main__ __file__<line_sep># TODO: Test the verbose and quiet flags
directory_orig=tmpdir/'orig'/'module'<line_sep>directory=tmpdir/'module'<line_sep>create_module(directory)<line_sep>create_module(directory_orig)<line_sep>cmp=dircmp(directory directory_orig)<assert_stmt>_dirs_equal(cmp)<line_sep># Make sure we are running the command for the right file
p=subprocess.run([sys.executable '-m' 'removestar' '--_this-file' 'none'] stdout=subprocess.PIPE stderr=subprocess.PIPE encoding='utf-8')<assert_stmt>p.stderr<eq>''<assert_stmt>p.stdout<eq>__file__<line_sep>p=subprocess.run([sys.executable '-m' 'removestar' directory] stdout=subprocess.PIPE stderr=subprocess.PIPE encoding='utf-8')<line_sep>warnings=set(f"""\
Warning: {directory}/submod/submod1.py: 'b' comes from multiple modules: '..mod1', '..mod2'. Using '..mod2'.
Warning: {directory}/submod/submod1.py: could not find import for 'd'
Warning: {directory}/submod/submod2.py: 'b' comes from multiple modules: 'module.mod1', 'module.mod2'. Using 'module.mod2'.
Warning: {directory}/submod/submod2.py: could not find import for 'd'
Warning: {directory}/mod4.py: 'b' comes from multiple modules: '.mod1', '.mod2'. Using '.mod2'.
Warning: {directory}/mod4.py: could not find import for 'd'
Warning: {directory}/mod5.py: 'b' comes from multiple modules: 'module.mod1', 'module.mod2'. Using 'module.mod2'.
Warning: {directory}/mod5.py: could not find import for 'd'
Warning: {directory}/mod_unfixable.py: Could not find the star imports for '.mod1'
Warning: {directory}/mod_unfixable.py: Could not find the star imports for '.mod2'
Warning: {directory}/mod_commented_unused_star.py: The removed star import statement for '.mod1' had an inline comment which may not make sense without the import
""".splitlines())<line_sep>error=f"Error with {directory}/mod_bad.py: SyntaxError: invalid syntax (mod_bad.py, line 1)"<assert_stmt>set(p.stderr.splitlines())<eq>warnings.union({error})<line_sep>diffs=[f"""\
--- original/{directory}/mod4.py
+++ fixed/{directory}/mod4.py
@@ -1,5 +1,5 @@
-from .mod1 import *
-from .mod2 import *
+from .mod1 import a
+from .mod2 import b, c
from .mod3 import name
\n\
def func():\
""" f"""\
--- original/{directory}/mod5.py
+++ fixed/{directory}/mod5.py
@@ -1,5 +1,5 @@
-from module.mod1 import *
-from module.mod2 import *
+from module.mod1 import a
+from module.mod2 import b, c
from module.mod3 import name
\n\
def func():\
""" f"""\
--- original/{directory}/mod6.py
+++ fixed/{directory}/mod6.py
@@ -1,2 +1,2 @@
-from os.path import *
+from os.path import isfile, join
isfile(join('a', 'b'))\
""" f"""\
--- original/{directory}/mod7.py
+++ fixed/{directory}/mod7.py
@@ -1 +0,0 @@
-from .mod6 import *\
""" f"""\
--- original/{directory}/mod9.py
+++ fixed/{directory}/mod9.py
@@ -1,4 +1,4 @@
-from .mod8 import *
+from .mod8 import a, b
\n\
def func():
return a + b\
""" f"""\
--- original/{directory}/mod_commented_unused_star.py
+++ fixed/{directory}/mod_commented_unused_star.py
@@ -1,2 +1,2 @@
-from .mod1 import * # comment about mod1
+# comment about mod1
from .mod2 import * # noqa\
""" f"""\
--- original/{directory}/mod_commented_star.py
+++ fixed/{directory}/mod_commented_star.py
@@ -1,6 +1,6 @@
from .mod1 import * # noqa
from .mod2 import * # noqa: F401
-from .mod3 import * # generic comment
+from .mod3 import name # generic comment
\n\
def func():\
""" f"""\
--- original/{directory}/submod/submod1.py
+++ fixed/{directory}/submod/submod1.py
@@ -1,7 +1,7 @@
-from ..mod1 import *
-from ..mod2 import *
+from ..mod1 import a
+from ..mod2 import b, c
from ..mod3 import name
-from .submod3 import *
+from .submod3 import e
\n\
def func():
return a + b + c + d + d + e + name\
""" f"""\
--- original/{directory}/submod/submod2.py
+++ fixed/{directory}/submod/submod2.py
@@ -1,7 +1,7 @@
-from module.mod1 import *
-from module.mod2 import *
+from module.mod1 import a
+from module.mod2 import b, c
from module.mod3 import name
-from module.submod.submod3 import *
+from module.submod.submod3 import e
\n\
def func():
return a + b + c + d + d + e + name\
""" f"""\
--- original/{directory}/submod/submod4.py
+++ fixed/{directory}/submod/submod4.py
@@ -1,3 +1,3 @@
-from . import *
+from . import func
\n\
func()\
""" f"""\
--- original/{directory}/submod_recursive/submod2.py
+++ fixed/{directory}/submod_recursive/submod2.py
@@ -1,4 +1,4 @@
-from . import *
+from . import a
\n\
def func():
return a + 1\
""" ]<line_sep>unchanged=['__init__.py' 'mod_bad.py' 'mod_unfixable.py']<for_stmt>d diffs<block_start><assert_stmt>d<in>p.stdout p.stdout<block_end><for_stmt>mod_path unchanged<block_start><assert_stmt>'--- original/{directory}/{mod_path}'<not><in>p.stdout<block_end>cmp=dircmp(directory directory_orig)<assert_stmt>_dirs_equal(cmp)<line_sep>p=subprocess.run([sys.executable '-m' 'removestar' '--quiet' directory] stdout=subprocess.PIPE stderr=subprocess.PIPE encoding='utf-8')<assert_stmt>p.stderr<eq>''<for_stmt>d diffs<block_start><assert_stmt>d<in>p.stdout<block_end>cmp=dircmp(directory directory_orig)<assert_stmt>_dirs_equal(cmp)<line_sep>p=subprocess.run([sys.executable '-m' 'removestar' '--verbose' directory] stdout=subprocess.PIPE stderr=subprocess.PIPE encoding='utf-8')<line_sep>changes=set(f"""\
{directory}/mod4.py: Replacing 'from .mod1 import *' with 'from .mod1 import a'
{directory}/mod4.py: Replacing 'from .mod2 import *' with 'from .mod2 import b, c'
{directory}/mod5.py: Replacing 'from module.mod1 import *' with 'from module.mod1 import a'
{directory}/mod5.py: Replacing 'from module.mod2 import *' with 'from module.mod2 import b, c'
{directory}/mod6.py: Replacing 'from os.path import *' with 'from os.path import isfile, join'
{directory}/mod7.py: Replacing 'from .mod6 import *' with ''
{directory}/mod9.py: Replacing 'from .mod8 import *' with 'from .mod8 import a, b'
{directory}/mod_commented_unused_star.py: Replacing 'from .mod1 import *' with ''
{directory}/mod_commented_unused_star.py: Retaining 'from .mod2 import *' due to noqa comment
{directory}/mod_commented_star.py: Replacing 'from .mod3 import *' with 'from .mod3 import name'
{directory}/mod_commented_star.py: Retaining 'from .mod1 import *' due to noqa comment
{directory}/mod_commented_star.py: Retaining 'from .mod2 import *' due to noqa comment
{directory}/submod/submod1.py: Replacing 'from ..mod1 import *' with 'from ..mod1 import a'
{directory}/submod/submod1.py: Replacing 'from ..mod2 import *' with 'from ..mod2 import b, c'
{directory}/submod/submod1.py: Replacing 'from .submod3 import *' with 'from .submod3 import e'
{directory}/submod/submod4.py: Replacing 'from . import *' with 'from . import func'
{directory}/submod/submod2.py: Replacing 'from module.mod1 import *' with 'from module.mod1 import a'
{directory}/submod/submod2.py: Replacing 'from module.mod2 import *' with 'from module.mod2 import b, c'
{directory}/submod/submod2.py: Replacing 'from module.submod.submod3 import *' with 'from module.submod.submod3 import e'
{directory}/submod_recursive/submod2.py: Replacing 'from . import *' with 'from . import a'
""".splitlines())<assert_stmt>set(p.stderr.splitlines())<eq>changes.union({error}).union(warnings)<for_stmt>d diffs<block_start><assert_stmt>d<in>p.stdout p.stdout<block_end>cmp=dircmp(directory directory_orig)<assert_stmt>_dirs_equal(cmp)<line_sep>p=subprocess.run([sys.executable '-m' 'removestar' '--no-dynamic-importing' directory] stdout=subprocess.PIPE stderr=subprocess.PIPE encoding='utf-8')<line_sep>static_error=set(f"""\
Error with {directory}/mod6.py: Static determination of external module imports is not supported.
Error with {directory}/mod7.py: Static determination of external module imports is not supported.
""".splitlines())<assert_stmt>set(p.stderr.splitlines())<eq>{error}.union(static_error).union(warnings)<for_stmt>d diffs<block_start><if_stmt>'mod6'<in>d<block_start><assert_stmt>d<not><in>p.stdout<block_end><else_stmt><block_start><assert_stmt>d<in>p.stdout p.stdout<block_end><block_end>cmp=dircmp(directory directory_orig)<assert_stmt>_dirs_equal(cmp)<line_sep># Test --quiet hides both errors
p=subprocess.run([sys.executable '-m' 'removestar' '--quiet' '--no-dynamic-importing' directory] stdout=subprocess.PIPE stderr=subprocess.PIPE encoding='utf-8')<assert_stmt>p.stderr<eq>''<for_stmt>d diffs<block_start><if_stmt>'mod6'<in>d<block_start><assert_stmt>d<not><in>p.stdout<block_end><else_stmt><block_start><assert_stmt>d<in>p.stdout p.stdout<block_end><block_end>cmp=dircmp(directory directory_orig)<assert_stmt>_dirs_equal(cmp)<line_sep># XXX: This modifies directory, so keep it at the end of the test
p=subprocess.run([sys.executable '-m' 'removestar' '--quiet' '-i' directory] stdout=subprocess.PIPE stderr=subprocess.PIPE encoding='utf-8')<assert_stmt>p.stderr<eq>''<assert_stmt>p.stdout<eq>''<line_sep>cmp=dircmp(directory directory_orig)<assert_stmt><not>_dirs_equal(cmp)<assert_stmt>cmp.diff_files<eq>['mod4.py' 'mod5.py' 'mod6.py' 'mod7.py' 'mod9.py' 'mod_commented_star.py' 'mod_commented_unused_star.py']<assert_stmt>cmp.subdirs['submod'].diff_files<eq>['submod1.py' 'submod2.py' 'submod4.py']<assert_stmt>cmp.subdirs['submod_recursive'].diff_files<eq>['submod2.py']<with_stmt>open(directory/'mod4.py')<as>f<block_start><assert_stmt>f.read()<eq>code_mod4_fixed<block_end><with_stmt>open(directory/'mod5.py')<as>f<block_start><assert_stmt>f.read()<eq>code_mod5_fixed<block_end><with_stmt>open(directory/'mod6.py')<as>f<block_start><assert_stmt>f.read()<eq>code_mod6_fixed<block_end><with_stmt>open(directory/'mod7.py')<as>f<block_start><assert_stmt>f.read()<eq>code_mod7_fixed<block_end><with_stmt>open(directory/'mod9.py')<as>f<block_start><assert_stmt>f.read()<eq>code_mod9_fixed<block_end><with_stmt>open(directory/'mod_commented_unused_star.py')<as>f<block_start><assert_stmt>f.read()<eq>code_mod_commented_unused_star_fixed<block_end><with_stmt>open(directory/'mod_commented_star.py')<as>f<block_start><assert_stmt>f.read()<eq>code_mod_commented_star_fixed<block_end><with_stmt>open(directory/'submod'/'submod1.py')<as>f<block_start><assert_stmt>f.read()<eq>code_submod1_fixed<block_end><with_stmt>open(directory/'submod'/'submod2.py')<as>f<block_start><assert_stmt>f.read()<eq>code_submod2_fixed<block_end><with_stmt>open(directory/'submod'/'submod4.py')<as>f<block_start><assert_stmt>f.read()<eq>code_submod4_fixed<block_end><with_stmt>open(directory/'submod_recursive'/'submod2.py')<as>f<block_start><assert_stmt>f.read()<eq>code_submod_recursive_submod2_fixed<block_end><with_stmt>open(directory/'mod_bad.py')<as>f<block_start><assert_stmt>f.read()<eq>code_bad_syntax<block_end><with_stmt>open(directory/'mod_unfixable.py')<as>f<block_start><assert_stmt>f.read()<eq>code_mod_unfixable<block_end># Test error on nonexistent file
p=subprocess.run([sys.executable '-m' 'removestar' directory/'notarealfile.py'] stdout=subprocess.PIPE stderr=subprocess.PIPE encoding='utf-8')<assert_stmt>p.stderr<eq>f'Error: {directory}/notarealfile.py: no such file or directory\n'<assert_stmt>p.stdout<eq>''<block_end> |
<import_from_stmt>..utils TranspileTestCase BuiltinFunctionTestCase SAMPLE_SUBSTITUTIONS<class_stmt>ListTests(TranspileTestCase)<block_start><pass><block_end><class_stmt>BuiltinListFunctionTests(BuiltinFunctionTestCase TranspileTestCase)<block_start>functions=["list"]<line_sep>substitutions=dict(SAMPLE_SUBSTITUTIONS)<line_sep>substitutions.update({"[1, 2.3456, 'another']":["[1, 'another', 2.3456]" "[2.3456, 1, 'another']" "[2.3456, 'another', 1]" "['another', 1, 2.3456]" "['another', 2.3456, 1]" ] "['a', 'c', 'd']":["['a', 'd', 'c']" "['c', 'a', 'd']" "['c', 'd', 'a']" "['d', 'a', 'c']" "['d', 'c', 'a']" ]})<block_end> |
<import_from_stmt>picktrue.sites utils<def_stmt>test_get_name_ext_from_url <block_start><assert_stmt>utils.get_filename_fom_url("https://img9.doubanio.com/view/photo/l/public/p2208623414.jpg")<eq>"p2208623414.jpg"<assert_stmt>utils.get_filename_fom_url("https://img9.doubanio.com/view/photo/l/public/p2208623414.jpg?hello=world")<eq>"p2208623414.jpg"<block_end> |
<import_from_stmt>nipype logging<import_from_stmt>nipype.interfaces ants<line_sep>logger=logging.getLogger('workflow')<import_from_stmt>CPAC.pipeline nipype_pipeline_engine<as>pe<import_stmt>nipype.interfaces.fsl<as>fsl<import_stmt>nipype.interfaces.utility<as>util<import_from_stmt>nipype.interfaces afni<import_from_stmt>nipype.interfaces.afni preprocess<import_from_stmt>nipype.interfaces.afni utils<as>afni_utils<import_from_stmt>CPAC.func_preproc.utils add_afni_prefix nullify chunk_ts split_ts_chunks oned_text_concat notch_filter_motion<import_from_stmt>CPAC.utils.interfaces.function Function<import_from_stmt>CPAC.generate_motion_statistics motion_power_statistics<import_from_stmt>CPAC.utils.utils check_prov_for_motion_tool<line_sep># niworkflows
<import_from_stmt>..utils.interfaces.ants AI<def_stmt>collect_arguments *args<block_start>command_args=[]<if_stmt>args[0]<block_start>command_args<augadd>[args[1]]<block_end>command_args<augadd>args[2:]<line_sep><return>' '.join(command_args)<block_end><def_stmt>anat_refined_mask init_bold_mask=<true> wf_name='init_bold_mask'<block_start>wf=pe.Workflow(name=wf_name)<line_sep>input_node=pe.Node(util.IdentityInterface(fields=['func' 'anatomical_brain_mask' 'anat_brain' 'init_func_brain_mask']) name='inputspec')<line_sep>output_node=pe.Node(util.IdentityInterface(fields=['func_brain_mask']) name='outputspec')<line_sep># 1 Take single volume of func
func_single_volume=pe.Node(interface=afni.Calc() name='func_single_volume')<line_sep># TODO add an option to select volume
func_single_volume.inputs.set(expr='a' single_idx=1 outputtype='NIFTI_GZ')<line_sep>wf.connect(input_node 'func' func_single_volume 'in_file_a')<line_sep># 2 get temporary func brain
func_tmp_brain=pe.Node(interface=afni_utils.Calc() name='func_tmp_brain')<line_sep>func_tmp_brain.inputs.expr='a*b'<line_sep>func_tmp_brain.inputs.outputtype='NIFTI_GZ'<line_sep>wf.connect(func_single_volume 'out_file' func_tmp_brain 'in_file_a')<line_sep># 2.1 get a tmp func brain mask
<if_stmt>init_bold_mask<eq><true># 2.1.1 N4BiasFieldCorrection single volume of raw_func
<block_start>func_single_volume_n4_corrected=pe.Node(interface=ants.N4BiasFieldCorrection(dimension=3 copy_header=<true> bspline_fitting_distance=200) shrink_factor=2 name='func_single_volume_n4_corrected')<line_sep>func_single_volume_n4_corrected.inputs.args='-r True'<line_sep>wf.connect(func_single_volume 'out_file' func_single_volume_n4_corrected 'input_image')<line_sep># 2.1.2 bet n4 corrected image - generate tmp func brain mask
func_tmp_brain_mask=pe.Node(interface=fsl.BET() name='func_tmp_brain_mask_pre')<line_sep>func_tmp_brain_mask.inputs.mask=<true><line_sep>wf.connect(func_single_volume_n4_corrected 'output_image' func_tmp_brain_mask 'in_file')<line_sep># 2.1.3 dilate func tmp brain mask
func_tmp_brain_mask_dil=pe.Node(interface=fsl.ImageMaths() name='func_tmp_brain_mask_dil')<line_sep>func_tmp_brain_mask_dil.inputs.op_string='-dilM'<line_sep>wf.connect(func_tmp_brain_mask 'mask_file' func_tmp_brain_mask_dil 'in_file')<line_sep>wf.connect(func_tmp_brain_mask_dil 'out_file' func_tmp_brain 'in_file_b')<block_end><else_stmt># 2.1.1 connect dilated init func brain mask
<block_start>wf.connect(input_node 'init_func_brain_mask' func_tmp_brain 'in_file_b')<block_end># 3. get transformation of anat to func
# 3.1 Register func tmp brain to anat brain to get func2anat matrix
linear_reg_func_to_anat=pe.Node(interface=fsl.FLIRT() name='func_to_anat_linear_reg')<line_sep>linear_reg_func_to_anat.inputs.cost='mutualinfo'<line_sep>linear_reg_func_to_anat.inputs.dof=6<line_sep>wf.connect(func_tmp_brain 'out_file' linear_reg_func_to_anat 'in_file')<line_sep>wf.connect(input_node 'anat_brain' linear_reg_func_to_anat 'reference')<line_sep># 3.2 Inverse func to anat affine
inv_func_to_anat_affine=pe.Node(interface=fsl.ConvertXFM() name='inv_func2anat_affine')<line_sep>inv_func_to_anat_affine.inputs.invert_xfm=<true><line_sep>wf.connect(linear_reg_func_to_anat 'out_matrix_file' inv_func_to_anat_affine 'in_file')<line_sep># 4. anat mask to func space
# Transform anatomical mask to functional space to get BOLD mask
reg_anat_mask_to_func=pe.Node(interface=fsl.FLIRT() name='reg_anat_mask_to_func')<line_sep>reg_anat_mask_to_func.inputs.apply_xfm=<true><line_sep>reg_anat_mask_to_func.inputs.cost='mutualinfo'<line_sep>reg_anat_mask_to_func.inputs.dof=6<line_sep>reg_anat_mask_to_func.inputs.interp='nearestneighbour'<line_sep>wf.connect(input_node 'anatomical_brain_mask' reg_anat_mask_to_func 'in_file')<line_sep>wf.connect(func_tmp_brain 'out_file' reg_anat_mask_to_func 'reference')<line_sep>wf.connect(inv_func_to_anat_affine 'out_file' reg_anat_mask_to_func 'in_matrix_file')<line_sep># 5. get final func mask: refine func tmp mask with anat_mask_in_func mask
func_mask=pe.Node(interface=fsl.MultiImageMaths() name='func_mask')<line_sep>func_mask.inputs.op_string="-mul %s"<line_sep>wf.connect(reg_anat_mask_to_func 'out_file' func_mask 'operand_files')<if_stmt>init_bold_mask<eq><true><block_start>wf.connect(func_tmp_brain_mask_dil 'out_file' func_mask 'in_file')<block_end><else_stmt><block_start>wf.connect(input_node 'init_func_brain_mask' func_mask 'in_file')<block_end>wf.connect(func_mask 'out_file' output_node 'func_brain_mask')<line_sep><return>wf<block_end><def_stmt>anat_based_mask wf_name='bold_mask'# reference DCAN lab BOLD mask
# https://github.com/DCAN-Labs/DCAN-HCP/blob/master/fMRIVolume/scripts/DistortionCorrectionAndEPIToT1wReg_FLIRTBBRAndFreeSurferBBRbased.sh
<block_start>wf=pe.Workflow(name=wf_name)<line_sep>input_node=pe.Node(util.IdentityInterface(fields=['func' 'anat_brain' 'anat_head']) name='inputspec')<line_sep>output_node=pe.Node(util.IdentityInterface(fields=['func_brain_mask']) name='outputspec')<line_sep># 0. Take single volume of func
func_single_volume=pe.Node(interface=afni.Calc() name='func_single_volume')<line_sep>func_single_volume.inputs.set(expr='a' single_idx=1 outputtype='NIFTI_GZ')<line_sep>wf.connect(input_node 'func' func_single_volume 'in_file_a')<line_sep># 1. Register func head to anat head to get func2anat matrix
linear_reg_func_to_anat=pe.Node(interface=fsl.FLIRT() name='func_to_anat_linear_reg')<line_sep>linear_reg_func_to_anat.inputs.dof=6<line_sep>linear_reg_func_to_anat.inputs.interp='spline'<line_sep>linear_reg_func_to_anat.inputs.searchr_x=[30 30]<line_sep>linear_reg_func_to_anat.inputs.searchr_y=[30 30]<line_sep>linear_reg_func_to_anat.inputs.searchr_z=[30 30]<line_sep>wf.connect(func_single_volume 'out_file' linear_reg_func_to_anat 'in_file')<line_sep>wf.connect(input_node 'anat_head' linear_reg_func_to_anat 'reference')<line_sep># 2. Inverse func to anat affine, to get anat-to-func transform
inv_func_to_anat_affine=pe.Node(interface=fsl.ConvertXFM() name='inv_func2anat_affine')<line_sep>inv_func_to_anat_affine.inputs.invert_xfm=<true><line_sep>wf.connect(linear_reg_func_to_anat 'out_matrix_file' inv_func_to_anat_affine 'in_file')<line_sep># 3. get BOLD mask
# 3.1 Apply anat-to-func transform to transfer anatomical brain to functional space
reg_anat_brain_to_func=pe.Node(interface=fsl.ApplyWarp() name='reg_anat_brain_to_func')<line_sep>reg_anat_brain_to_func.inputs.interp='nn'<line_sep>reg_anat_brain_to_func.inputs.relwarp=<true><line_sep>wf.connect(input_node 'anat_brain' reg_anat_brain_to_func 'in_file')<line_sep>wf.connect(input_node 'func' reg_anat_brain_to_func 'ref_file')<line_sep>wf.connect(inv_func_to_anat_affine 'out_file' reg_anat_brain_to_func 'premat')<line_sep># 3.2 Binarize transfered image and fill holes to get BOLD mask.
# Binarize
func_mask_bin=pe.Node(interface=fsl.ImageMaths() name='func_mask')<line_sep>func_mask_bin.inputs.op_string='-bin'<line_sep>wf.connect(reg_anat_brain_to_func 'out_file' func_mask_bin 'in_file')<line_sep>wf.connect(func_mask_bin 'out_file' output_node 'func_brain_mask')<line_sep><return>wf<block_end><def_stmt>normalize_motion_parameters in_file<block_start>"""
Convert FSL mcflirt motion parameters to AFNI space
"""<import_stmt>os<import_stmt>numpy<as>np<line_sep>motion_params=np.genfromtxt(in_file).T<line_sep>motion_params=np.vstack((motion_params[2 :]<times>180/np.pi motion_params[0 :]<times>180/np.pi -motion_params[1 :]<times>180/np.pi motion_params[5 :] motion_params[3 :] -motion_params[4 :]))<line_sep>motion_params=np.transpose(motion_params)<line_sep>out_file=os.path.join(os.getcwd() 'motion_params.1D')<line_sep>np.savetxt(out_file motion_params)<line_sep><return>out_file<block_end><def_stmt>get_mcflirt_rms_abs rms_files<block_start><for_stmt>path rms_files<block_start><if_stmt>'abs.rms'<in>path<block_start>abs_file=path<block_end><if_stmt>'rel.rms'<in>path<block_start>rels_file=path<block_end><block_end><return>(abs_file rels_file)<block_end><def_stmt>estimate_reference_image in_file# fMRIPrep-style BOLD reference
# Ref: https://github.com/nipreps/niworkflows/blob/maint/1.3.x/niworkflows/interfaces/registration.py#L446-L549
<block_start><import_stmt>os<import_stmt>numpy<as>np<import_stmt>nibabel<as>nb<line_sep>ref_input=[in_file]<line_sep>mc_out_file='bold_mc.nii.gz'<line_sep># Build the nibabel spatial image we will work with
ref_im=[]<for_stmt>im_i ref_input<block_start>max_new_volumes=50-len(ref_im)<if_stmt>max_new_volumes<le>0<block_start><break><block_end>nib_i=nb.squeeze_image(nb.load(im_i))<if_stmt>nib_i.dataobj.ndim<eq>3<block_start>ref_im.append(nib_i)<block_end><elif_stmt>nib_i.dataobj.ndim<eq>4<block_start>ref_im<augadd>nb.four_to_three(nib_i.slicer[<ellipsis> :max_new_volumes])<block_end><block_end>ref_im=nb.squeeze_image(nb.concat_images(ref_im))<line_sep>out_file=os.path.join(os.getcwd() "ref_bold.nii.gz")<line_sep># Slicing may induce inconsistencies with shape-dependent values in extensions.
# For now, remove all. If this turns out to be a mistake, we can select extensions
# that don't break pipeline stages.
ref_im.header.extensions.clear()<if_stmt>ref_im.shape[-1]<g>40<block_start>ref_im=nb.Nifti1Image(ref_im.dataobj[: : : 20:40] ref_im.affine ref_im.header)<block_end>ref_name=os.path.join(os.getcwd() "slice.nii.gz")<line_sep>ref_im.to_filename(ref_name)<line_sep>cmd='3dvolreg -Fourier -twopass -zpad 4 -prefix %s %s'%(mc_out_file ref_name)<line_sep>os.system(cmd)<line_sep>mc_slice_nii=nb.load(mc_out_file)<line_sep>median_image_data=np.median(mc_slice_nii.get_fdata() axis=3)<line_sep>nb.Nifti1Image(median_image_data ref_im.affine ref_im.header).to_filename(out_file)<line_sep><return>out_file<block_end><def_stmt>create_scale_func_wf scaling_factor wf_name='scale_func'<block_start>"""Workflow to scale func data.
Parameters
----------
scaling_factor : float
Scale the size of the dataset voxels by the factor.
wf_name : string
name of the workflow
Workflow Inputs::
inputspec.func : func file or a list of func/rest nifti file
User input functional(T2*) Image
Workflow Outputs::
outputspec.scaled_func : string (nifti file)
Path to Output image with scaled data
Order of commands:
- Scale the size of the dataset voxels by the factor 'fac'. For details see `3dcalc <https://afni.nimh.nih.gov/pub/dist/doc/program_help/3drefit.html>`_::
3drefit -xyzscale fac rest.nii.gz
"""<line_sep># allocate a workflow object
preproc=pe.Workflow(name=wf_name)<line_sep># configure the workflow's input spec
inputNode=pe.Node(util.IdentityInterface(fields=['func']) name='inputspec')<line_sep># configure the workflow's output spec
outputNode=pe.Node(util.IdentityInterface(fields=['scaled_func']) name='outputspec')<line_sep># allocate a node to edit the functional file
func_scale=pe.Node(interface=afni_utils.Refit() name='func_scale')<line_sep>func_scale.inputs.xyzscale=scaling_factor<line_sep># wire in the func_get_idx node
preproc.connect(inputNode 'func' func_scale 'in_file')<line_sep># wire the output
preproc.connect(func_scale 'out_file' outputNode 'scaled_func')<line_sep><return>preproc<block_end><def_stmt>create_wf_edit_func wf_name="edit_func"<block_start>"""Workflow to edit the scan to the proscribed TRs.
Workflow Inputs::
inputspec.func : func file or a list of func/rest nifti file
User input functional(T2*) Image
inputspec.start_idx : string
Starting volume/slice of the functional image (optional)
inputspec.stop_idx : string
Last volume/slice of the functional image (optional)
Workflow Outputs::
outputspec.edited_func : string (nifti file)
Path to Output image with the initial few slices dropped
Order of commands:
- Get the start and the end volume index of the functional run. If not defined by the user, return the first and last volume.
get_idx(in_files, stop_idx, start_idx)
- Dropping the initial TRs. For details see `3dcalc <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dcalc.html>`_::
3dcalc -a rest.nii.gz[4..299]
-expr 'a'
-prefix rest_3dc.nii.gz
"""<line_sep># allocate a workflow object
preproc=pe.Workflow(name=wf_name)<line_sep># configure the workflow's input spec
inputNode=pe.Node(util.IdentityInterface(fields=['func' 'start_idx' 'stop_idx']) name='inputspec')<line_sep># configure the workflow's output spec
outputNode=pe.Node(util.IdentityInterface(fields=['edited_func']) name='outputspec')<line_sep># allocate a node to check that the requested edits are
# reasonable given the data
func_get_idx=pe.Node(util.Function(input_names=['in_files' 'stop_idx' 'start_idx'] output_names=['stopidx' 'startidx'] function=get_idx) name='func_get_idx')<line_sep># wire in the func_get_idx node
preproc.connect(inputNode 'func' func_get_idx 'in_files')<line_sep>preproc.connect(inputNode 'start_idx' func_get_idx 'start_idx')<line_sep>preproc.connect(inputNode 'stop_idx' func_get_idx 'stop_idx')<line_sep># allocate a node to edit the functional file
func_drop_trs=pe.Node(interface=afni_utils.Calc() name='func_drop_trs' mem_gb=0.37 mem_x=(739971956005215/151115727451828646838272 'in_file_a'))<line_sep>func_drop_trs.inputs.expr='a'<line_sep>func_drop_trs.inputs.outputtype='NIFTI_GZ'<line_sep># wire in the inputs
preproc.connect(inputNode 'func' func_drop_trs 'in_file_a')<line_sep>preproc.connect(func_get_idx 'startidx' func_drop_trs 'start_idx')<line_sep>preproc.connect(func_get_idx 'stopidx' func_drop_trs 'stop_idx')<line_sep># wire the output
preproc.connect(func_drop_trs 'out_file' outputNode 'edited_func')<line_sep><return>preproc<block_end><def_stmt>slice_timing_wf name='slice_timing' tpattern=<none> tzero=<none># allocate a workflow object
<block_start>wf=pe.Workflow(name=name)<line_sep># configure the workflow's input spec
inputNode=pe.Node(util.IdentityInterface(fields=['func_ts' 'tr' 'tpattern']) name='inputspec')<line_sep># configure the workflow's output spec
outputNode=pe.Node(util.IdentityInterface(fields=['slice_time_corrected']) name='outputspec')<line_sep># create TShift AFNI node
func_slice_timing_correction=pe.Node(interface=preprocess.TShift() name='slice_timing' mem_gb=0.45 mem_x=(5247073869855161/604462909807314587353088 'in_file'))<line_sep>func_slice_timing_correction.inputs.outputtype='NIFTI_GZ'<if_stmt>tzero<is><not><none><block_start>func_slice_timing_correction.inputs.tzero=tzero<block_end>wf.connect([(inputNode func_slice_timing_correction [('func_ts' 'in_file') # (
# # add the @ prefix to the tpattern file going into
# # AFNI 3dTshift - needed this so the tpattern file
# # output from get_scan_params would be tied downstream
# # via a connection (to avoid poofing)
# ('tpattern', nullify, add_afni_prefix),
# 'tpattern'
# ),
(('tr' nullify) 'tr') ]) ])<if_stmt>tpattern<is><not><none><block_start>func_slice_timing_correction.inputs.tpattern=tpattern<block_end><else_stmt><block_start>wf.connect(inputNode ('tpattern' nullify add_afni_prefix) func_slice_timing_correction 'tpattern')<block_end>wf.connect(func_slice_timing_correction 'out_file' outputNode 'slice_time_corrected')<line_sep><return>wf<block_end><def_stmt>get_idx in_files stop_idx=<none> start_idx=<none><block_start>"""
Method to get the first and the last slice for
the functional run. It verifies the user specified
first and last slice. If the values are not valid, it
calculates and returns the very first and the last slice
Parameters
----------
in_file : string (nifti file)
Path to input functional run
stop_idx : int
Last volume to be considered, specified by user
in the configuration file
stop_idx : int
First volume to be considered, specified by user
in the configuration file
Returns
-------
stop_idx : int
Value of first slice to consider for the functional run
start_idx : int
Value of last slice to consider for the functional run
"""<line_sep># Import packages
<import_from_stmt>nibabel load<line_sep># Init variables
img=load(in_files)<line_sep>hdr=img.get_header()<line_sep>shape=hdr.get_data_shape()<line_sep># Check to make sure the input file is 4-dimensional
<if_stmt>len(shape)<ne>4<block_start><raise>TypeError('Input nifti file: %s is not a 4D file'%in_files)<block_end># Grab the number of volumes
nvols=int(hdr.get_data_shape()[3])<if_stmt>(start_idx<eq><none>)<or>(int(start_idx)<l>0)<or>(int(start_idx)<g>(nvols-1))<block_start>startidx=0<block_end><else_stmt><block_start>startidx=int(start_idx)<block_end><if_stmt>(stop_idx<eq><none>)<or>(int(stop_idx)<g>(nvols-1))<block_start>stopidx=nvols-1<block_end><else_stmt><block_start>stopidx=int(stop_idx)<block_end><return>stopidx startidx<block_end><def_stmt>motion_correct_connections wf cfg strat_pool pipe_num opt<block_start><if_stmt>opt<ne>'3dvolreg'<and>opt<ne>'mcflirt'<block_start><raise>Exception("\n\n[!] Error: The 'tool' parameter of the "<concat>"'motion_correction' workflow must be either "<concat>"'3dvolreg' or 'mcflirt'.\n\nTool input: "<concat>"{0}\n\n".format(opt))<block_end><if_stmt>cfg<block_start><if_stmt>int(cfg.pipeline_setup['system_config']['max_cores_per_participant'])<g>1<block_start>chunk_imports=['import nibabel as nb']<line_sep>chunk=pe.Node(Function(input_names=['func_file' 'n_chunks' 'chunk_size'] output_names=['TR_ranges'] function=chunk_ts imports=chunk_imports) name=f'chunk_{pipe_num}')<line_sep>#chunk.inputs.n_chunks = int(cfg.pipeline_setup['system_config'][
# 'max_cores_per_participant'])
# 10-TR sized chunks
chunk.inputs.chunk_size=10<line_sep>node,out=strat_pool.get_data(["desc-preproc_bold" "bold"])<line_sep>wf.connect(node out chunk 'func_file')<line_sep>split_imports=['import os' 'import subprocess']<line_sep>split=pe.Node(Function(input_names=['func_file' 'tr_ranges'] output_names=['split_funcs'] function=split_ts_chunks imports=split_imports) name=f'split_{pipe_num}')<line_sep>node,out=strat_pool.get_data(['desc-preproc_bold' 'bold'])<line_sep>wf.connect(node out split 'func_file')<line_sep>wf.connect(chunk 'TR_ranges' split 'tr_ranges')<line_sep>out_split_func=pe.Node(interface=util.IdentityInterface(fields=['out_file']) name=f'out_split_func_{pipe_num}')<line_sep>wf.connect(split 'split_funcs' out_split_func 'out_file')<line_sep>func_motion_correct=pe.MapNode(interface=preprocess.Volreg() name=f'func_generate_ref_{pipe_num}' iterfield=['in_file'])<line_sep>wf.connect(out_split_func 'out_file' func_motion_correct 'in_file')<line_sep>func_concat=pe.Node(interface=afni_utils.TCat() name=f'func_concat_{pipe_num}')<line_sep>func_concat.inputs.outputtype='NIFTI_GZ'<line_sep>wf.connect(func_motion_correct 'out_file' func_concat 'in_files')<line_sep>out_motion=pe.Node(interface=util.IdentityInterface(fields=['out_file']) name=f'out_motion_{pipe_num}')<line_sep>wf.connect(func_concat 'out_file' out_motion 'out_file')<block_end><else_stmt><block_start>out_split_func=pe.Node(interface=util.IdentityInterface(fields=['out_file']) name=f'out_split_func_{pipe_num}')<line_sep>node,out=strat_pool.get_data(['desc-preproc_bold' 'bold'])<line_sep>wf.connect(node out out_split_func 'out_file')<line_sep>func_motion_correct=pe.Node(interface=preprocess.Volreg() name=f'func_generate_ref_{pipe_num}')<line_sep>wf.connect(out_split_func 'out_file' func_motion_correct 'in_file')<line_sep>out_motion=pe.Node(interface=util.IdentityInterface(fields=['out_file']) name=f'out_motion_{pipe_num}')<line_sep>wf.connect(func_motion_correct 'out_file' out_motion 'out_file')<block_end><block_end><else_stmt><block_start>out_split_func=pe.Node(interface=util.IdentityInterface(fields=['out_file']) name=f'out_split_func_{pipe_num}')<line_sep>node,out=strat_pool.get_data(['desc-preproc_bold' 'bold'])<line_sep>wf.connect(node out out_split_func 'out_file')<line_sep>func_motion_correct=pe.Node(interface=preprocess.Volreg() name=f'func_generate_ref_{pipe_num}')<line_sep>wf.connect(out_split_func 'out_file' func_motion_correct 'in_file')<line_sep>out_motion=pe.Node(interface=util.IdentityInterface(fields=['out_file']) name=f'out_motion_{pipe_num}')<line_sep>wf.connect(func_motion_correct 'out_file' out_motion 'out_file')<block_end>func_motion_correct.inputs.zpad=4<line_sep>func_motion_correct.inputs.outputtype='NIFTI_GZ'<line_sep>args=f'-Fourier'<if_stmt>cfg.functional_preproc['motion_estimates_and_correction']['motion_correction']['AFNI-3dvolreg']['functional_volreg_twopass']<block_start>args=f'-twopass {args}'<block_end>func_motion_correct.inputs.args=args<line_sep># Calculate motion parameters
<if_stmt>opt<eq>'3dvolreg'<block_start>func_motion_correct_A=func_motion_correct.clone(f'func_motion_correct_3dvolreg_{pipe_num}')<line_sep>func_motion_correct_A.inputs.md1d_file='max_displacement.1D'<line_sep>func_motion_correct_A.inputs.args=args<line_sep>wf.connect(out_split_func 'out_file' func_motion_correct_A 'in_file')<line_sep>node,out=strat_pool.get_data('motion-basefile')<line_sep>wf.connect(node out func_motion_correct_A 'basefile')<if_stmt>cfg<block_start><if_stmt>int(cfg.pipeline_setup['system_config']['max_cores_per_participant'])<g>1<block_start>motion_concat=pe.Node(interface=afni_utils.TCat() name=f'motion_concat_{pipe_num}')<line_sep>motion_concat.inputs.outputtype='NIFTI_GZ'<line_sep>wf.connect(func_motion_correct_A 'out_file' motion_concat 'in_files')<line_sep>out_motion_A=pe.Node(interface=util.IdentityInterface(fields=['out_file']) name=f'out_motion_A_{pipe_num}')<line_sep>wf.connect(motion_concat 'out_file' out_motion_A 'out_file')<line_sep>concat_imports=['import os']<line_sep>md1d_concat=pe.Node(Function(input_names=['in_files'] output_names=['out_file'] function=oned_text_concat imports=concat_imports) name=f'md1d_concat_{pipe_num}')<line_sep>wf.connect(func_motion_correct_A 'md1d_file' md1d_concat 'in_files')<line_sep>oned_concat=pe.Node(Function(input_names=['in_files'] output_names=['out_file'] function=oned_text_concat imports=concat_imports) name=f'oned_concat_{pipe_num}')<line_sep>wf.connect(func_motion_correct_A 'oned_file' oned_concat 'in_files')<line_sep>oned_matrix_concat=pe.Node(Function(input_names=['in_files'] output_names=['out_file'] function=oned_text_concat imports=concat_imports) name=f'oned_matrix_concat_{pipe_num}')<line_sep>wf.connect(func_motion_correct_A 'oned_matrix_save' oned_matrix_concat 'in_files')<line_sep>out_md1d=pe.Node(interface=util.IdentityInterface(fields=['out_file']) name=f'out_md1d_{pipe_num}')<line_sep>wf.connect(md1d_concat 'out_file' out_md1d 'out_file')<line_sep>out_oned=pe.Node(interface=util.IdentityInterface(fields=['out_file']) name=f'out_oned_{pipe_num}')<line_sep>wf.connect(oned_concat 'out_file' out_oned 'out_file')<line_sep>out_oned_matrix=pe.Node(interface=util.IdentityInterface(fields=['out_file']) name=f'out_oned_matrix_{pipe_num}')<line_sep>wf.connect(oned_matrix_concat 'out_file' out_oned_matrix 'out_file')<block_end><else_stmt><block_start>out_motion_A=pe.Node(interface=util.IdentityInterface(fields=['out_file']) name=f'out_motion_A_{pipe_num}')<line_sep>wf.connect(func_motion_correct_A 'out_file' out_motion_A 'out_file')<line_sep>out_md1d=pe.Node(interface=util.IdentityInterface(fields=['out_file']) name=f'out_md1d_{pipe_num}')<line_sep>wf.connect(func_motion_correct_A 'md1d_file' out_md1d 'out_file')<line_sep>out_oned=pe.Node(interface=util.IdentityInterface(fields=['out_file']) name=f'out_oned_{pipe_num}')<line_sep>wf.connect(func_motion_correct_A 'oned_file' out_oned 'out_file')<line_sep>out_oned_matrix=pe.Node(interface=util.IdentityInterface(fields=['out_file']) name=f'out_oned_matrix_{pipe_num}')<line_sep>wf.connect(func_motion_correct_A 'oned_matrix_save' out_oned_matrix 'out_file')<block_end><block_end><else_stmt><block_start>out_motion_A=pe.Node(interface=util.IdentityInterface(fields=['out_file']) name=f'out_motion_A_{pipe_num}')<line_sep>wf.connect(func_motion_correct_A 'out_file' out_motion_A 'out_file')<line_sep>out_md1d=pe.Node(interface=util.IdentityInterface(fields=['out_file']) name=f'out_md1d_{pipe_num}')<line_sep>wf.connect(func_motion_correct_A 'md1d_file' out_md1d 'out_file')<line_sep>out_oned=pe.Node(interface=util.IdentityInterface(fields=['out_file']) name=f'out_oned_{pipe_num}')<line_sep>wf.connect(func_motion_correct_A 'oned_file' out_oned 'out_file')<line_sep>out_oned_matrix=pe.Node(interface=util.IdentityInterface(fields=['out_file']) name=f'out_oned_matrix_{pipe_num}')<line_sep>wf.connect(func_motion_correct_A 'oned_matrix_save' out_oned_matrix 'out_file')<block_end>outputs={'desc-preproc_bold':(out_motion_A 'out_file') 'desc-motion_bold':(out_motion_A 'out_file') 'max-displacement':(out_md1d 'out_file') 'movement-parameters':(out_oned 'out_file') 'coordinate-transformation':(out_oned_matrix 'out_file')}<block_end><elif_stmt>opt<eq>'mcflirt'<block_start>func_motion_correct_A=pe.Node(interface=fsl.MCFLIRT(save_mats=<true> save_plots=<true>) name=f'func_motion_correct_mcflirt_{pipe_num}' mem_gb=2.5)<line_sep>func_motion_correct_A.inputs.save_mats=<true><line_sep>func_motion_correct_A.inputs.save_plots=<true><line_sep>func_motion_correct_A.inputs.save_rms=<true><line_sep>node,out=strat_pool.get_data(['desc-preproc_bold' 'bold'])<line_sep>wf.connect(node out func_motion_correct_A 'in_file')<line_sep>node,out=strat_pool.get_data('motion-basefile')<line_sep>wf.connect(node out func_motion_correct_A 'ref_file')<line_sep>normalize_motion_params=pe.Node(Function(input_names=['in_file'] output_names=['out_file'] function=normalize_motion_parameters) name=f'norm_motion_params_{pipe_num}')<line_sep>wf.connect(func_motion_correct_A 'par_file' normalize_motion_params 'in_file')<line_sep>get_rms_abs=pe.Node(Function(input_names=['rms_files'] output_names=['abs_file' 'rels_file'] function=get_mcflirt_rms_abs) name=f'get_mcflirt_rms_abs_{pipe_num}')<line_sep>wf.connect(func_motion_correct_A 'rms_files' get_rms_abs 'rms_files')<line_sep>outputs={'desc-preproc_bold':(func_motion_correct_A 'out_file') 'desc-motion_bold':(func_motion_correct_A 'out_file') 'max-displacement':(get_rms_abs 'abs_file') 'rels-displacement':(get_rms_abs 'rels_file') 'movement-parameters':(normalize_motion_params 'out_file') 'coordinate-transformation':(func_motion_correct_A 'mat_file')}<block_end><return>(wf outputs)<block_end><def_stmt>func_scaling wf cfg strat_pool pipe_num opt=<none><block_start>'''
{"name": "func_scaling",
"config": ["functional_preproc", "scaling"],
"switch": ["run"],
"option_key": "None",
"option_val": "None",
"inputs": [["desc-preproc_bold", "bold"]],
"outputs": ["desc-preproc_bold"]}
'''<line_sep>scale_func_wf=create_scale_func_wf(scaling_factor=cfg.scaling_factor wf_name=f"scale_func_{pipe_num}")<line_sep>node,out=strat_pool.get_data(["desc-preproc_bold" "bold"])<line_sep>wf.connect(node out scale_func_wf 'inputspec.func')<line_sep>outputs={'desc-preproc_bold':(scale_func_wf 'outputspec.scaled_func')}<line_sep><return>(wf outputs)<block_end><def_stmt>func_truncate wf cfg strat_pool pipe_num opt=<none><block_start>'''
{"name": "func_truncate",
"config": ["functional_preproc", "truncation"],
"switch": "None",
"option_key": "None",
"option_val": "None",
"inputs": [["desc-preproc_bold", "bold"]],
"outputs": {
"desc-preproc_bold": {
"Description": "Truncated functional time-series BOLD data."
}}
}
'''<line_sep># if cfg.functional_preproc['truncation']['start_tr'] == 0 and \
# cfg.functional_preproc['truncation']['stop_tr'] == None:
# data, key = strat_pool.get_data(["desc-preproc_bold", "bold"],
# True)
# outputs = {key: data}
# return (wf, outputs)
trunc_wf=create_wf_edit_func(wf_name=f"edit_func_{pipe_num}")<line_sep>trunc_wf.inputs.inputspec.start_idx=cfg.functional_preproc['truncation']['start_tr']<line_sep>trunc_wf.inputs.inputspec.stop_idx=cfg.functional_preproc['truncation']['stop_tr']<line_sep>node,out=strat_pool.get_data(["desc-preproc_bold" "bold"])<line_sep>wf.connect(node out trunc_wf 'inputspec.func')<line_sep>outputs={'desc-preproc_bold':(trunc_wf 'outputspec.edited_func')}<line_sep><return>(wf outputs)<block_end><def_stmt>func_despike wf cfg strat_pool pipe_num opt=<none><block_start>'''
{"name": "func_despike",
"config": ["functional_preproc", "despiking"],
"switch": ["run"],
"option_key": "None",
"option_val": "None",
"inputs": [["desc-preproc_bold", "bold"]],
"outputs": {
"desc-preproc_bold": {
"Description": "De-spiked BOLD time-series via AFNI 3dDespike."
}}
}
'''<line_sep>despike=pe.Node(interface=preprocess.Despike() name=f'func_despiked_{pipe_num}' mem_gb=0.66 mem_x=(8251808479088459/1208925819614629174706176 'in_file'))<line_sep>despike.inputs.outputtype='NIFTI_GZ'<line_sep>node,out=strat_pool.get_data(["desc-preproc_bold" "bold"])<line_sep>wf.connect(node out despike 'in_file')<line_sep>outputs={'desc-preproc_bold':(despike 'out_file')}<line_sep><return>(wf outputs)<block_end><def_stmt>func_slice_time wf cfg strat_pool pipe_num opt=<none><block_start>'''
{"name": "func_slice_time",
"config": ["functional_preproc", "slice_timing_correction"],
"switch": ["run"],
"option_key": "None",
"option_val": "None",
"inputs": [["desc-preproc_bold", "bold"],
"TR",
"tpattern"],
"outputs": {
"desc-preproc_bold": {
"Description": "Slice-time corrected BOLD time-series via AFNI 3dTShift."
},
"desc-stc_bold": {
"Description": "Slice-time corrected BOLD time-series via AFNI 3dTShift."}}
}
'''<line_sep>slice_time=slice_timing_wf(name='func_slice_timing_correction_'<concat>f'{pipe_num}' tpattern=cfg.functional_preproc['slice_timing_correction']['tpattern'] tzero=cfg.functional_preproc['slice_timing_correction']['tzero'])<line_sep>node,out=strat_pool.get_data(["desc-preproc_bold" "bold"])<line_sep>wf.connect(node out slice_time 'inputspec.func_ts')<line_sep>node,out=strat_pool.get_data('TR')<line_sep>wf.connect(node out slice_time 'inputspec.tr')<line_sep>node,out=strat_pool.get_data('tpattern')<line_sep>wf.connect(node out slice_time 'inputspec.tpattern')<line_sep>outputs={'desc-preproc_bold':(slice_time 'outputspec.slice_time_corrected') 'desc-stc_bold':(slice_time 'outputspec.slice_time_corrected')}<line_sep><return>(wf outputs)<block_end><def_stmt>func_reorient wf cfg strat_pool pipe_num opt=<none><block_start>'''
{"name": "func_reorient",
"config": ["functional_preproc"],
"switch": ["run"],
"option_key": "None",
"option_val": "None",
"inputs": [["desc-preproc_bold", "bold"]],
"outputs": ["desc-preproc_bold", "desc-reorient_bold"]}
'''<line_sep>func_deoblique=pe.Node(interface=afni_utils.Refit() name=f'func_deoblique_{pipe_num}' mem_gb=0.68 mem_x=(4664065662093477/1208925819614629174706176 'in_file'))<line_sep>func_deoblique.inputs.deoblique=<true><line_sep>node,out=strat_pool.get_data(['desc-preproc_bold' 'bold'])<line_sep>wf.connect(node out func_deoblique 'in_file')<line_sep>func_reorient=pe.Node(interface=afni_utils.Resample() name=f'func_reorient_{pipe_num}' mem_gb=0.68 mem_x=(9005234470657405/1208925819614629174706176 'in_file'))<line_sep>func_reorient.inputs.orientation='RPI'<line_sep>func_reorient.inputs.outputtype='NIFTI_GZ'<line_sep>wf.connect(func_deoblique 'out_file' func_reorient 'in_file')<line_sep>outputs={'desc-preproc_bold':(func_reorient 'out_file') 'desc-reorient_bold':(func_reorient 'out_file')}<line_sep><return>(wf outputs)<block_end><def_stmt>get_motion_ref wf cfg strat_pool pipe_num opt=<none><block_start>'''
{"name": "get_motion_ref",
"config": ["functional_preproc", "motion_estimates_and_correction",
"motion_correction"],
"switch": "None",
"option_key": "motion_correction_reference",
"option_val": ["mean", "median", "selected_volume", "fmriprep_reference"],
"inputs": [["desc-preproc_bold", "bold"],
"bold"],
"outputs": ["motion-basefile"]}
'''<if_stmt>opt<ne>'mean'<and>opt<ne>'median'<and>opt<ne>'selected_volume'<and>opt<ne>'fmriprep_reference'<block_start><raise>Exception("\n\n[!] Error: The 'tool' parameter of the "<concat>"'motion_correction_reference' workflow must be either "<concat>"'mean' or 'median' or 'selected_volume' or 'fmriprep_reference'.\n\nTool input: "<concat>"{0}\n\n".format(opt))<block_end><if_stmt>opt<eq>'mean'<block_start>func_get_RPI=pe.Node(interface=afni_utils.TStat() name=f'func_get_mean_RPI_{pipe_num}' mem_gb=0.48 mem_x=(1435097126797993/302231454903657293676544 'in_file'))<line_sep>func_get_RPI.inputs.options='-mean'<line_sep>func_get_RPI.inputs.outputtype='NIFTI_GZ'<line_sep>node,out=strat_pool.get_data(['desc-preproc_bold' 'bold'])<line_sep>wf.connect(node out func_get_RPI 'in_file')<block_end><elif_stmt>opt<eq>'median'<block_start>func_get_RPI=pe.Node(interface=afni_utils.TStat() name=f'func_get_median_RPI_{pipe_num}')<line_sep>func_get_RPI.inputs.options='-median'<line_sep>func_get_RPI.inputs.outputtype='NIFTI_GZ'<line_sep>node,out=strat_pool.get_data(['desc-preproc_bold' 'bold'])<line_sep>wf.connect(node out func_get_RPI 'in_file')<block_end><elif_stmt>opt<eq>'selected_volume'<block_start>func_get_RPI=pe.Node(interface=afni.Calc() name=f'func_get_selected_RPI_{pipe_num}')<line_sep>func_get_RPI.inputs.set(expr='a' single_idx=cfg.functional_preproc['motion_estimates_and_correction']['motion_correction']['motion_correction_reference_volume'] outputtype='NIFTI_GZ')<line_sep>node,out=strat_pool.get_data(['desc-preproc_bold' 'bold'])<line_sep>wf.connect(node out func_get_RPI 'in_file_a')<block_end><elif_stmt>opt<eq>'fmriprep_reference'<block_start>func_get_RPI=pe.Node(util.Function(input_names=['in_file'] output_names=['out_file'] function=estimate_reference_image) name=f'func_get_fmriprep_ref_{pipe_num}')<line_sep>node,out=strat_pool.get_data('bold')<line_sep>wf.connect(node out func_get_RPI 'in_file')<block_end>outputs={'motion-basefile':(func_get_RPI 'out_file')}<line_sep><return>(wf outputs)<block_end><def_stmt>func_motion_correct wf cfg strat_pool pipe_num opt=<none><block_start>'''
{"name": "motion_correction",
"config": ["functional_preproc", "motion_estimates_and_correction",
"motion_correction"],
"switch": "None",
"option_key": "using",
"option_val": ["3dvolreg", "mcflirt"],
"inputs": [(["desc-preproc_bold", "bold"],
"motion-basefile")],
"outputs": ["desc-preproc_bold",
"desc-motion_bold",
"max-displacement",
"rels-displacement",
"movement-parameters",
"coordinate-transformation"]}
'''<line_sep>wf,outputs=motion_correct_connections(wf cfg strat_pool pipe_num opt)<line_sep><return>(wf outputs)<block_end><def_stmt>func_motion_estimates wf cfg strat_pool pipe_num opt=<none><block_start>'''
{"name": "motion_estimates",
"config": ["functional_preproc", "motion_estimates_and_correction",
"motion_correction"],
"switch": "None",
"option_key": "using",
"option_val": ["3dvolreg", "mcflirt"],
"inputs": [(["desc-preproc_bold", "bold"],
"motion-basefile")],
"outputs": ["max-displacement",
"rels-displacement",
"movement-parameters",
"coordinate-transformation"]}
'''<line_sep>wf,wf_outputs=motion_correct_connections(wf cfg strat_pool pipe_num opt)<line_sep>outputs={'max-displacement':wf_outputs['max-displacement'] 'movement-parameters':wf_outputs['movement-parameters']}<if_stmt>'coordinate-transformation'<in>wf_outputs<block_start>outputs['coordinate-transformation']=wf_outputs['coordinate-transformation']<block_end><if_stmt>'rels-displacement'<in>wf_outputs<block_start>outputs['rels-displacement']=wf_outputs['rels-displacement']<block_end><return>(wf outputs)<block_end><def_stmt>func_motion_correct_only wf cfg strat_pool pipe_num opt=<none><block_start>'''
{"name": "motion_correction_only",
"config": ["functional_preproc", "motion_estimates_and_correction",
"motion_correction"],
"switch": "None",
"option_key": "using",
"option_val": ["3dvolreg", "mcflirt"],
"inputs": [(["desc-preproc_bold", "bold"],
"motion-basefile")],
"outputs": ["desc-preproc_bold",
"desc-motion_bold"]}
'''<line_sep>wf,wf_outputs=motion_correct_connections(wf cfg strat_pool pipe_num opt)<line_sep>outputs={'desc-motion_bold':wf_outputs['desc-motion_bold']}<line_sep><return>(wf outputs)<block_end><def_stmt>motion_estimate_filter wf cfg strat_pool pipe_num opt=<none><block_start>'''
{"name": "motion_estimate_filter",
"config": ["functional_preproc", "motion_estimates_and_correction",
"motion_estimate_filter"],
"switch": ["run"],
"option_key": "filter_type",
"option_val": ["notch", "lowpass"],
"inputs": ["movement-parameters",
"TR"],
"outputs": ["movement-parameters",
"motion-filter-info",
"motion-filter-plot"]}
'''<line_sep>notch_imports=['import os' 'import numpy as np' 'from scipy.signal import iirnotch, lfilter, firwin, freqz' 'from matplotlib import pyplot as plt' 'from CPAC.func_preproc.utils import degrees_to_mm, mm_to_degrees']<line_sep>notch=pe.Node(Function(input_names=['motion_params' 'filter_type' 'TR' 'fc_RR_min' 'fc_RR_max' 'center_freq' 'freq_bw' 'lowpass_cutoff' 'filter_order'] output_names=['filtered_motion_params' 'filter_info' 'filter_plot'] function=notch_filter_motion imports=notch_imports) name=f'filter_motion_params_{pipe_num}')<line_sep>notch.inputs.filter_type=cfg.functional_preproc["motion_estimates_and_correction"]["motion_estimate_filter"]['filter_type']<line_sep>notch.inputs.fc_RR_min=cfg.functional_preproc["motion_estimates_and_correction"]["motion_estimate_filter"]['breathing_rate_min']<line_sep>notch.inputs.fc_RR_max=cfg.functional_preproc["motion_estimates_and_correction"]["motion_estimate_filter"]['breathing_rate_max']<line_sep>notch.inputs.center_freq=cfg.functional_preproc["motion_estimates_and_correction"]["motion_estimate_filter"]['center_frequency']<line_sep>notch.inputs.freq_bw=cfg.functional_preproc["motion_estimates_and_correction"]["motion_estimate_filter"]['filter_bandwidth']<line_sep>notch.inputs.lowpass_cutoff=cfg.functional_preproc["motion_estimates_and_correction"]["motion_estimate_filter"]['lowpass_cutoff']<line_sep>notch.inputs.filter_order=cfg.functional_preproc["motion_estimates_and_correction"]["motion_estimate_filter"]['filter_order']<line_sep>node,out=strat_pool.get_data('movement-parameters')<line_sep>wf.connect(node out notch 'motion_params')<line_sep>node,out=strat_pool.get_data('TR')<line_sep>wf.connect(node out notch 'TR')<line_sep>outputs={'motion-filter-info':(notch 'filter_info') 'motion-filter-plot':(notch 'filter_plot') 'movement-parameters':(notch 'filtered_motion_params')}<line_sep><return>(wf outputs)<block_end><def_stmt>calc_motion_stats wf cfg strat_pool pipe_num opt=<none><block_start>'''
{"name": "calc_motion_stats",
"config": "None",
"switch": [["functional_preproc", "run"],
["functional_preproc", "motion_estimates_and_correction",
"motion_estimates", "calculate_motion_after"]],
"option_key": "None",
"option_val": "None",
"inputs": [("desc-motion_bold",
"space-bold_desc-brain_mask",
"movement-parameters",
"max-displacement",
"rels-displacement",
"coordinate-transformation"),
"subject",
"scan"],
"outputs": ["framewise-displacement-power",
"framewise-displacement-jenkinson",
"dvars",
"power-params",
"motion-params"]}
'''<line_sep>motion_prov=strat_pool.get_cpac_provenance('movement-parameters')<line_sep>motion_correct_tool=check_prov_for_motion_tool(motion_prov)<line_sep>gen_motion_stats=motion_power_statistics(name=f'gen_motion_stats_{pipe_num}' motion_correct_tool=motion_correct_tool)<line_sep># Special case where the workflow is not getting outputs from
# resource pool but is connected to functional datasource
node,out_file=strat_pool.get_data('subject')<line_sep>wf.connect(node out_file gen_motion_stats 'inputspec.subject_id')<line_sep>node,out_file=strat_pool.get_data('scan')<line_sep>wf.connect(node out_file gen_motion_stats 'inputspec.scan_id')<line_sep>node,out_file=strat_pool.get_data("desc-motion_bold")<line_sep>wf.connect(node out_file gen_motion_stats 'inputspec.motion_correct')<line_sep>node,out_file=strat_pool.get_data('space-bold_desc-brain_mask')<line_sep>wf.connect(node out_file gen_motion_stats 'inputspec.mask')<line_sep>node,out_file=strat_pool.get_data('movement-parameters')<line_sep>wf.connect(node out_file gen_motion_stats 'inputspec.movement_parameters')<line_sep>node,out_file=strat_pool.get_data('max-displacement')<line_sep>wf.connect(node out_file gen_motion_stats 'inputspec.max_displacement')<if_stmt>strat_pool.check_rpool('rels-displacement')<block_start>node,out_file=strat_pool.get_data('rels-displacement')<line_sep>wf.connect(node out_file gen_motion_stats 'inputspec.rels_displacement')<block_end><if_stmt>strat_pool.check_rpool('coordinate-transformation')<block_start>node,out_file=strat_pool.get_data('coordinate-transformation')<line_sep>wf.connect(node out_file gen_motion_stats 'inputspec.transformations')<block_end>outputs={'framewise-displacement-power':(gen_motion_stats 'outputspec.FDP_1D') 'framewise-displacement-jenkinson':(gen_motion_stats 'outputspec.FDJ_1D') 'dvars':(gen_motion_stats 'outputspec.DVARS_1D') 'power-params':(gen_motion_stats 'outputspec.power_params') 'motion-params':(gen_motion_stats 'outputspec.motion_params')}<line_sep><return>(wf outputs)<block_end><def_stmt>bold_mask_afni wf cfg strat_pool pipe_num opt=<none><block_start>'''
{"name": "bold_mask_afni",
"config": ["functional_preproc"],
"switch": ["run"],
"option_key": ["func_masking", "using"],
"option_val": "AFNI",
"inputs": [["desc-preproc_bold", "bold"]],
"outputs": {
"space-bold_desc-brain_mask": {
"Description": "Binary brain mask of the BOLD functional time-series
created by AFNI 3dAutomask."}}
}
'''<line_sep>func_get_brain_mask=pe.Node(interface=preprocess.Automask() name=f'func_get_brain_mask_AFNI_{pipe_num}')<line_sep>func_get_brain_mask.inputs.outputtype='NIFTI_GZ'<line_sep>node,out=strat_pool.get_data(["desc-preproc_bold" "bold"])<line_sep>wf.connect(node out func_get_brain_mask 'in_file')<line_sep>outputs={'space-bold_desc-brain_mask':(func_get_brain_mask 'out_file')}<line_sep><return>(wf outputs)<block_end><def_stmt>bold_mask_fsl wf cfg strat_pool pipe_num opt=<none><block_start>'''
{"name": "bold_mask_fsl",
"config": ["functional_preproc"],
"switch": ["run"],
"option_key": ["func_masking", "using"],
"option_val": "FSL",
"inputs": [["desc-preproc_bold", "bold"]],
"outputs": ["space-bold_desc-brain_mask"]}
'''<line_sep>inputnode_bet=pe.Node(util.IdentityInterface(fields=['frac' 'mesh_boolean' 'outline' 'padding' 'radius' 'reduce_bias' 'remove_eyes' 'robust' 'skull' 'surfaces' 'threshold' 'vertical_gradient']) name=f'BET_options_{pipe_num}')<line_sep>func_get_brain_mask=pe.Node(interface=fsl.BET() name=f'func_get_brain_mask_BET_{pipe_num}')<line_sep>func_get_brain_mask.inputs.output_type='NIFTI_GZ'<line_sep>func_get_brain_mask.inputs.mask=<true><line_sep>inputnode_bet.inputs.set(frac=cfg.functional_preproc['func_masking']['FSL-BET']['frac'] mesh_boolean=cfg.functional_preproc['func_masking']['FSL-BET']['mesh_boolean'] outline=cfg.functional_preproc['func_masking']['FSL-BET']['outline'] padding=cfg.functional_preproc['func_masking']['FSL-BET']['padding'] radius=cfg.functional_preproc['func_masking']['FSL-BET']['radius'] reduce_bias=cfg.functional_preproc['func_masking']['FSL-BET']['reduce_bias'] remove_eyes=cfg.functional_preproc['func_masking']['FSL-BET']['remove_eyes'] robust=cfg.functional_preproc['func_masking']['FSL-BET']['robust'] skull=cfg.functional_preproc['func_masking']['FSL-BET']['skull'] surfaces=cfg.functional_preproc['func_masking']['FSL-BET']['surfaces'] threshold=cfg.functional_preproc['func_masking']['FSL-BET']['threshold'] vertical_gradient=cfg.functional_preproc['func_masking']['FSL-BET']['vertical_gradient'] )<line_sep>wf.connect([(inputnode_bet func_get_brain_mask [('frac' 'frac') ('mesh_boolean' 'mesh') ('outline' 'outline') ('padding' 'padding') ('radius' 'radius') ('reduce_bias' 'reduce_bias') ('remove_eyes' 'remove_eyes') ('robust' 'robust') ('skull' 'skull') ('surfaces' 'surfaces') ('threshold' 'threshold') ('vertical_gradient' 'vertical_gradient') ])])<if_stmt>cfg.functional_preproc['func_masking']['FSL-BET']['functional_mean_boolean']<block_start>func_skull_mean=pe.Node(interface=afni_utils.TStat() name=f'func_mean_skull_{pipe_num}')<line_sep>func_skull_mean.inputs.options='-mean'<line_sep>func_skull_mean.inputs.outputtype='NIFTI_GZ'<line_sep>node,out=strat_pool.get_data(["desc-preproc_bold" "bold"])<line_sep>wf.connect(node out func_skull_mean 'in_file')<line_sep>out_node,out_file=(func_skull_mean 'out_file')<if_stmt>cfg.functional_preproc['func_masking']['FSL-BET']['functional_mean_thr']['run']# T=$(fslstats ${subject}_tmean.nii.gz -p 98)
<block_start>threshold_T=pe.Node(interface=fsl.ImageStats() name=f'func_mean_skull_thr_value_{pipe_num}' iterfield=['in_file'])<line_sep>threshold_T.inputs.op_string="-p %f "%(cfg.functional_preproc['func_masking']['FSL-BET']['functional_mean_thr']['threshold_value'])<line_sep>wf.connect(func_skull_mean 'out_file' threshold_T 'in_file')<line_sep># z=$(echo "$T / 10" | bc -l)
<def_stmt>form_thr_string thr<block_start>threshold_z=str(float(thr/10))<line_sep><return>'-thr %s'%(threshold_z)<block_end>form_thr_string=pe.Node(util.Function(input_names=['thr'] output_names=['out_str'] function=form_thr_string) name=f'form_thr_string_{pipe_num}')<line_sep>wf.connect(threshold_T 'out_stat' form_thr_string 'thr')<line_sep># fslmaths ${subject}_tmean.nii.gz -thr ${z} ${subject}_tmean_thr.nii.gz
func_skull_mean_thr=pe.Node(interface=fsl.ImageMaths() name=f'func_mean_skull_thr_{pipe_num}')<line_sep>wf.connect(func_skull_mean 'out_file' func_skull_mean_thr 'in_file')<line_sep>wf.connect(form_thr_string 'out_str' func_skull_mean_thr 'op_string')<line_sep>out_node,out_file=(func_skull_mean_thr 'out_file')<block_end><if_stmt>cfg.functional_preproc['func_masking']['FSL-BET']['functional_mean_bias_correction']# fast --nopve -B ${subject}_tmean_thr.nii.gz
<block_start>func_mean_skull_fast=pe.Node(interface=fsl.FAST() name=f'func_mean_skull_fast_{pipe_num}')<line_sep>func_mean_skull_fast.inputs.no_pve=<true><line_sep>func_mean_skull_fast.inputs.output_biascorrected=<true><line_sep>wf.connect(out_node out_file func_mean_skull_fast 'in_files')<line_sep>out_node,out_file=(func_mean_skull_fast 'restored_image')<block_end>wf.connect(out_node out_file func_get_brain_mask 'in_file')<block_end><else_stmt><block_start>func_get_brain_mask.inputs.functional=<true><line_sep>node,out=strat_pool.get_data(["desc-preproc_bold" "bold"])<line_sep>wf.connect(node out func_get_brain_mask 'in_file')<block_end># erode one voxel of functional brian mask
erode_one_voxel=pe.Node(interface=fsl.ErodeImage() name=f'erode_one_voxel_{pipe_num}')<line_sep>erode_one_voxel.inputs.kernel_shape='box'<line_sep>erode_one_voxel.inputs.kernel_size=1.0<line_sep>wf.connect(func_get_brain_mask 'mask_file' erode_one_voxel 'in_file')<line_sep>outputs={'space-bold_desc-brain_mask':(erode_one_voxel 'out_file')}<line_sep><return>(wf outputs)<block_end><def_stmt>bold_mask_fsl_afni wf cfg strat_pool pipe_num opt=<none><block_start>'''
{"name": "bold_mask_fsl_afni",
"config": ["functional_preproc"],
"switch": ["run"],
"option_key": ["func_masking", "using"],
"option_val": "FSL_AFNI",
"inputs": [["desc-motion_bold", "desc-preproc_bold", "bold"],
"motion-basefile"],
"outputs": ["space-bold_desc-brain_mask",
"desc-ref_bold"]}
'''<line_sep># fMRIPrep-style BOLD mask
# Ref: https://github.com/nipreps/niworkflows/blob/maint/1.3.x/niworkflows/func/util.py#L246-L514
# Initialize transforms with antsAI
init_aff=pe.Node(AI(metric=("Mattes" 32 "Regular" 0.2) transform=("Affine" 0.1) search_factor=(20 0.12) principal_axes=<false> convergence=(10 1e-6 10) verbose=<true> ) name=f"init_aff_{pipe_num}" n_procs=cfg.pipeline_setup['system_config']['num_OMP_threads'] )<line_sep>init_aff.inputs.fixed_image=cfg.functional_preproc['func_masking']['FSL_AFNI']['bold_ref']<line_sep>init_aff.inputs.fixed_image_mask=cfg.functional_preproc['func_masking']['FSL_AFNI']['brain_mask']<line_sep>init_aff.inputs.search_grid=(40 (0 40 40))<line_sep># Set up spatial normalization
norm=pe.Node(ants.Registration(winsorize_upper_quantile=0.98 winsorize_lower_quantile=0.05 float=<true> metric=['Mattes'] metric_weight=[1] radius_or_number_of_bins=[64] transforms=['Affine'] transform_parameters=[[0.1]] number_of_iterations=[[200]] convergence_window_size=[10] convergence_threshold=[1.e-9] sampling_strategy=['Random' 'Random'] smoothing_sigmas=[[2]] sigma_units=['mm' 'mm' 'mm'] shrink_factors=[[2]] sampling_percentage=[0.2] use_histogram_matching=[<true>] use_estimate_learning_rate_once=[<true>]) name=f"norm_{pipe_num}" n_procs=cfg.pipeline_setup['system_config']['num_OMP_threads'] )<line_sep>norm.inputs.fixed_image=cfg.functional_preproc['func_masking']['FSL_AFNI']['bold_ref']<line_sep>map_brainmask=pe.Node(ants.ApplyTransforms(interpolation="BSpline" float=<true> ) name=f"map_brainmask_{pipe_num}" )<line_sep># Use the higher resolution and probseg for numerical stability in rounding
map_brainmask.inputs.input_image=cfg.functional_preproc['func_masking']['FSL_AFNI']['brain_probseg']<line_sep>binarize_mask=pe.Node(interface=fsl.maths.MathsCommand() name=f'binarize_mask_{pipe_num}')<line_sep>binarize_mask.inputs.args='-thr 0.85 -bin'<line_sep># Dilate pre_mask
pre_dilate=pe.Node(fsl.DilateImage(operation="max" kernel_shape="sphere" kernel_size=3.0 internal_datatype="char" ) name=f"pre_mask_dilate_{pipe_num}" )<line_sep># Run N4 normally, force num_threads=1 for stability (images are small, no need for >1)
n4_correct=pe.Node(ants.N4BiasFieldCorrection(dimension=3 copy_header=<true> bspline_fitting_distance=200) shrink_factor=2 rescale_intensities=<true> name=f"n4_correct_{pipe_num}" n_procs=1 )<line_sep>skullstrip_first_pass=pe.Node(fsl.BET(frac=0.2 mask=<true> functional=<false>) name=f'skullstrip_first_pass_{pipe_num}')<line_sep>bet_dilate=pe.Node(fsl.DilateImage(operation='max' kernel_shape='sphere' kernel_size=6.0 internal_datatype='char') name=f'skullstrip_first_dilate_{pipe_num}')<line_sep>bet_mask=pe.Node(fsl.ApplyMask() name=f'skullstrip_first_mask_'<concat>f'{pipe_num}')<line_sep>unifize=pe.Node(afni_utils.Unifize(t2=<true> outputtype='NIFTI_GZ' args='-clfrac 0.2 -rbt 18.3 65.0 90.0' out_file="uni.nii.gz") name=f'unifize_{pipe_num}')<line_sep>skullstrip_second_pass=pe.Node(preprocess.Automask(dilate=1 outputtype='NIFTI_GZ') name=f'skullstrip_second_pass_{pipe_num}')<line_sep>combine_masks=pe.Node(fsl.BinaryMaths(operation='mul') name=f'combine_masks_{pipe_num}')<line_sep>apply_mask=pe.Node(fsl.ApplyMask() name=f'extract_ref_brain_bold_{pipe_num}')<line_sep>node,out=strat_pool.get_data(["motion-basefile"])<line_sep>wf.connect([(node init_aff [(out "moving_image")]) (node map_brainmask [(out "reference_image")]) (node norm [(out "moving_image")]) (init_aff norm [("output_transform" "initial_moving_transform")]) (norm map_brainmask [("reverse_invert_flags" "invert_transform_flags") ("reverse_transforms" "transforms") ]) (map_brainmask binarize_mask [("output_image" "in_file")]) (binarize_mask pre_dilate [("out_file" "in_file")]) (pre_dilate n4_correct [("out_file" "mask_image")]) (node n4_correct [(out "input_image")]) (n4_correct skullstrip_first_pass [('output_image' 'in_file')]) (skullstrip_first_pass bet_dilate [('mask_file' 'in_file')]) (bet_dilate bet_mask [('out_file' 'mask_file')]) (skullstrip_first_pass bet_mask [('out_file' 'in_file')]) (bet_mask unifize [('out_file' 'in_file')]) (unifize skullstrip_second_pass [('out_file' 'in_file')]) (skullstrip_first_pass combine_masks [('mask_file' 'in_file')]) (skullstrip_second_pass combine_masks [('out_file' 'operand_file')]) (unifize apply_mask [('out_file' 'in_file')]) (combine_masks apply_mask [('out_file' 'mask_file')]) ])<line_sep>outputs={'space-bold_desc-brain_mask':(combine_masks 'out_file') 'desc-ref_bold':(apply_mask 'out_file')}<line_sep><return>(wf outputs)<block_end><def_stmt>bold_mask_anatomical_refined wf cfg strat_pool pipe_num opt=<none><block_start>'''
{"name": "bold_mask_anatomical_refined",
"config": ["functional_preproc"],
"switch": ["run"],
"option_key": ["func_masking", "using"],
"option_val": "Anatomical_Refined",
"inputs": ["bold",
["desc-preproc_bold", "bold"],
"desc-brain_T1w",
"space-T1w_desc-brain_mask"],
"outputs": ["space-bold_desc-brain_mask"]}
'''<line_sep># binarize anat mask, in case it is not a binary mask.
anat_brain_mask_bin=pe.Node(interface=fsl.ImageMaths() name=f'anat_brain_mask_bin_{pipe_num}')<line_sep>anat_brain_mask_bin.inputs.op_string='-bin'<line_sep>node,out=strat_pool.get_data('space-T1w_desc-brain_mask')<line_sep>wf.connect(node out anat_brain_mask_bin 'in_file')<line_sep># fill holes of anat mask
anat_mask_filled=pe.Node(interface=afni.MaskTool() name=f'anat_brain_mask_filled_{pipe_num}')<line_sep>anat_mask_filled.inputs.fill_holes=<true><line_sep>anat_mask_filled.inputs.outputtype='NIFTI_GZ'<line_sep>wf.connect(anat_brain_mask_bin 'out_file' anat_mask_filled 'in_file')<line_sep># init_bold_mask : input raw func
init_bold_mask=anat_refined_mask(init_bold_mask=<true> wf_name=f'init_bold_mask_{pipe_num}')<line_sep>func_deoblique=pe.Node(interface=afni_utils.Refit() name=f'raw_func_deoblique_{pipe_num}')<line_sep>func_deoblique.inputs.deoblique=<true><line_sep>node,out=strat_pool.get_data('bold')<line_sep>wf.connect(node out func_deoblique 'in_file')<line_sep>func_reorient=pe.Node(interface=afni_utils.Resample() name=f'raw_func_reorient_{pipe_num}')<line_sep>func_reorient.inputs.orientation='RPI'<line_sep>func_reorient.inputs.outputtype='NIFTI_GZ'<line_sep>wf.connect(func_deoblique 'out_file' func_reorient 'in_file')<line_sep>wf.connect(func_reorient 'out_file' init_bold_mask 'inputspec.func')<line_sep>wf.connect(anat_mask_filled 'out_file' init_bold_mask 'inputspec.anatomical_brain_mask')<line_sep>node,out=strat_pool.get_data('desc-brain_T1w')<line_sep>wf.connect(node out init_bold_mask 'inputspec.anat_brain')<line_sep># dilate init func brain mask
func_tmp_brain_mask=pe.Node(interface=fsl.ImageMaths() name=f'func_tmp_brain_mask_dil_{pipe_num}')<line_sep>func_tmp_brain_mask.inputs.op_string='-dilM'<line_sep>wf.connect(init_bold_mask 'outputspec.func_brain_mask' func_tmp_brain_mask 'in_file')<line_sep># refined_bold_mask : input motion corrected func
refined_bold_mask=anat_refined_mask(init_bold_mask=<false> wf_name='refined_bold_mask'<concat>f'_{pipe_num}')<line_sep>node,out=strat_pool.get_data(["desc-preproc_bold" "bold"])<line_sep>wf.connect(node out refined_bold_mask 'inputspec.func')<line_sep>node,out=strat_pool.get_data('desc-brain_T1w')<line_sep>wf.connect(node out refined_bold_mask 'inputspec.anat_brain')<line_sep>wf.connect(func_tmp_brain_mask 'out_file' refined_bold_mask 'inputspec.init_func_brain_mask')<line_sep># dilate anatomical mask
<if_stmt>cfg.functional_preproc['func_masking']['Anatomical_Refined']['anatomical_mask_dilation']<block_start>anat_mask_dilate=pe.Node(interface=afni.MaskTool() name=f'anat_mask_dilate_{pipe_num}')<line_sep>anat_mask_dilate.inputs.dilate_inputs='1'<line_sep>anat_mask_dilate.inputs.outputtype='NIFTI_GZ'<line_sep>wf.connect(anat_mask_filled 'out_file' anat_mask_dilate 'in_file')<line_sep>wf.connect(anat_mask_dilate 'out_file' refined_bold_mask 'inputspec.anatomical_brain_mask')<block_end><else_stmt><block_start>wf.connect(anat_mask_filled 'out_file' refined_bold_mask 'inputspec.anatomical_brain_mask')<block_end># get final func mask
func_mask_final=pe.Node(interface=fsl.MultiImageMaths() name=f'func_mask_final_{pipe_num}')<line_sep>func_mask_final.inputs.op_string="-mul %s"<line_sep>wf.connect(func_tmp_brain_mask 'out_file' func_mask_final 'in_file')<line_sep>wf.connect(refined_bold_mask 'outputspec.func_brain_mask' func_mask_final 'operand_files')<line_sep>outputs={'space-bold_desc-brain_mask':(func_mask_final 'out_file')}<line_sep><return>(wf outputs)<block_end><def_stmt>bold_mask_anatomical_based wf cfg strat_pool pipe_num opt=<none><block_start>'''Generate the BOLD mask by basing it off of the anatomical brain mask.
Adapted from DCAN Lab's BOLD mask method from the ABCD pipeline.
https://github.com/DCAN-Labs/DCAN-HCP/blob/master/fMRIVolume/scripts/DistortionCorrectionAndEPIToT1wReg_FLIRTBBRAndFreeSurferBBRbased.sh
Node Block:
{"name": "bold_mask_anatomical_based",
"config": ["functional_preproc"],
"switch": ["run"],
"option_key": ["func_masking", "using"],
"option_val": "Anatomical_Based",
"inputs": [["desc-preproc_bold", "bold"],
"desc-brain_T1w",
["desc-preproc_T1w", "desc-reorient_T1w", "T1w"]],
"outputs": ["space-bold_desc-brain_mask"]}
'''<line_sep># 0. Take single volume of func
func_single_volume=pe.Node(interface=afni.Calc() name='func_single_volume')<line_sep>func_single_volume.inputs.set(expr='a' single_idx=1 outputtype='NIFTI_GZ')<line_sep>node,out=strat_pool.get_data(["desc-preproc_bold" "bold"])<line_sep>wf.connect(node out func_single_volume 'in_file_a')<line_sep># 1. Register func head to anat head to get func2anat matrix
linear_reg_func_to_anat=pe.Node(interface=fsl.FLIRT() name='func_to_anat_linear_reg')<line_sep>linear_reg_func_to_anat.inputs.dof=6<line_sep>linear_reg_func_to_anat.inputs.interp='spline'<line_sep>linear_reg_func_to_anat.inputs.searchr_x=[30 30]<line_sep>linear_reg_func_to_anat.inputs.searchr_y=[30 30]<line_sep>linear_reg_func_to_anat.inputs.searchr_z=[30 30]<line_sep>wf.connect(func_single_volume 'out_file' linear_reg_func_to_anat 'in_file')<line_sep>node,out=strat_pool.get_data(["desc-preproc_T1w" "desc-reorient_T1w" "T1w"])<line_sep>wf.connect(node out linear_reg_func_to_anat 'reference')<line_sep># 2. Inverse func to anat affine, to get anat-to-func transform
inv_func_to_anat_affine=pe.Node(interface=fsl.ConvertXFM() name='inv_func2anat_affine')<line_sep>inv_func_to_anat_affine.inputs.invert_xfm=<true><line_sep>wf.connect(linear_reg_func_to_anat 'out_matrix_file' inv_func_to_anat_affine 'in_file')<line_sep># 3. get BOLD mask
# 3.1 Apply anat-to-func transform to transfer anatomical brain to functional space
reg_anat_brain_to_func=pe.Node(interface=fsl.ApplyWarp() name='reg_anat_brain_to_func')<line_sep>reg_anat_brain_to_func.inputs.interp='nn'<line_sep>reg_anat_brain_to_func.inputs.relwarp=<true><line_sep>node,out=strat_pool.get_data("desc-brain_T1w")<line_sep>wf.connect(node out reg_anat_brain_to_func 'in_file')<line_sep>node,out=strat_pool.get_data(["desc-preproc_bold" "bold"])<line_sep>wf.connect(node out reg_anat_brain_to_func 'ref_file')<line_sep>wf.connect(inv_func_to_anat_affine 'out_file' reg_anat_brain_to_func 'premat')<line_sep># 3.2 Binarize transfered image
func_mask_bin=pe.Node(interface=fsl.ImageMaths() name='func_mask_bin')<line_sep>func_mask_bin.inputs.op_string='-abs -bin'<line_sep>wf.connect(reg_anat_brain_to_func 'out_file' func_mask_bin 'in_file')<line_sep># 3.3 Fill holes to get BOLD mask
func_mask_fill_holes=pe.Node(interface=afni.MaskTool() name='func_mask_fill_holes')<line_sep>func_mask_fill_holes.inputs.fill_holes=<true><line_sep>func_mask_fill_holes.inputs.outputtype='NIFTI_GZ'<line_sep>wf.connect(func_mask_bin 'out_file' func_mask_fill_holes 'in_file')<line_sep>outputs={'space-bold_desc-brain_mask':(func_mask_fill_holes 'out_file')}<line_sep><return>(wf outputs)<block_end><def_stmt>bold_mask_anatomical_resampled wf cfg strat_pool pipe_num opt=<none><block_start>'''Resample anatomical brain mask in standard space to get BOLD brain mask in standard space
Adapted from DCAN Lab's BOLD mask method from the ABCD pipeline.
https://github.com/DCAN-Labs/DCAN-HCP/blob/master/fMRIVolume/scripts/OneStepResampling.sh#L121-L132
Node Block:
{"name": "bold_mask_anatomical_resampled",
"config": ["functional_preproc"],
"switch": ["run"],
"option_key": ["func_masking", "using"],
"option_val": "Anatomical_Resampled",
"inputs": [["desc-preproc_bold", "bold"],
"T1w-template-funcreg",
"space-template_desc-brain_T1w",
"space-template_desc-T1w_mask"],
"outputs": ["space-template_res-bold_desc-brain_T1w",
"space-template_desc-bold_mask",
"space-bold_desc-brain_mask"]}
'''<line_sep># applywarp --rel --interp=spline -i ${T1wImage} -r ${ResampRefIm} --premat=$FSLDIR/etc/flirtsch/ident.mat -o ${WD}/${T1wImageFile}.${FinalfMRIResolution}
anat_brain_to_func_res=pe.Node(interface=fsl.ApplyWarp() name=f'resample_anat_brain_in_standard_{pipe_num}')<line_sep>anat_brain_to_func_res.inputs.interp='spline'<line_sep>anat_brain_to_func_res.inputs.premat=cfg.registration_workflows['anatomical_registration']['registration']['FSL-FNIRT']['identity_matrix']<line_sep>node,out=strat_pool.get_data('space-template_desc-brain_T1w')<line_sep>wf.connect(node out anat_brain_to_func_res 'in_file')<line_sep>node,out=strat_pool.get_data('T1w-template-funcreg')<line_sep>wf.connect(node out anat_brain_to_func_res 'ref_file')<line_sep># Create brain masks in this space from the FreeSurfer output (changing resolution)
# applywarp --rel --interp=nn -i ${FreeSurferBrainMask}.nii.gz -r ${WD}/${T1wImageFile}.${FinalfMRIResolution} --premat=$FSLDIR/etc/flirtsch/ident.mat -o ${WD}/${FreeSurferBrainMaskFile}.${FinalfMRIResolution}.nii.gz
anat_brain_mask_to_func_res=pe.Node(interface=fsl.ApplyWarp() name=f'resample_anat_brain_mask_in_standard_{pipe_num}')<line_sep>anat_brain_mask_to_func_res.inputs.interp='nn'<line_sep>anat_brain_mask_to_func_res.inputs.premat=cfg.registration_workflows['anatomical_registration']['registration']['FSL-FNIRT']['identity_matrix']<line_sep>node,out=strat_pool.get_data('space-template_desc-T1w_mask')<line_sep>wf.connect(node out anat_brain_mask_to_func_res 'in_file')<line_sep>wf.connect(anat_brain_to_func_res 'out_file' anat_brain_mask_to_func_res 'ref_file')<line_sep># Resample func mask in template space back to native space
func_mask_template_to_native=pe.Node(interface=afni.Resample() name=f'resample_func_mask_to_native_{pipe_num}')<line_sep>func_mask_template_to_native.inputs.resample_mode='NN'<line_sep>func_mask_template_to_native.inputs.outputtype='NIFTI_GZ'<line_sep>wf.connect(anat_brain_mask_to_func_res 'out_file' func_mask_template_to_native 'in_file')<line_sep>node,out=strat_pool.get_data(["desc-preproc_bold" "bold"])<line_sep>wf.connect(node out func_mask_template_to_native 'master')<line_sep>outputs={'space-template_res-bold_desc-brain_T1w':(anat_brain_to_func_res 'out_file') 'space-template_desc-bold_mask':(anat_brain_mask_to_func_res 'out_file') "space-bold_desc-brain_mask":(func_mask_template_to_native 'out_file')}<line_sep><return>(wf outputs)<block_end><def_stmt>bold_mask_ccs wf cfg strat_pool pipe_num opt=<none><block_start>'''Generate the BOLD mask by basing it off of the anatomical brain.
Adapted from the BOLD mask method from the CCS pipeline.
https://github.com/TingsterX/CCS/blob/master/ccs_01_funcpreproc.sh#L89-L110
Node Block:
{"name": "bold_mask_ccs",
"config": ["functional_preproc"],
"switch": ["run"],
"option_key": ["func_masking", "using"],
"option_val": "CCS_Anatomical_Refined",
"inputs": [["desc-motion_bold", "desc-preproc_bold", "bold"],
"desc-brain_T1w",
["desc-preproc_T1w", "desc-reorient_T1w", "T1w"]],
"outputs": ["space-bold_desc-brain_mask",
"desc-ROIbrain_bold"]}
'''<line_sep># Run 3dAutomask to generate func initial mask
func_tmp_brain_mask=pe.Node(interface=preprocess.Automask() name=f'func_tmp_brain_mask_AFNI_{pipe_num}')<line_sep>func_tmp_brain_mask.inputs.dilate=1<line_sep>func_tmp_brain_mask.inputs.outputtype='NIFTI_GZ'<line_sep>node,out=strat_pool.get_data(["desc-motion_bold" "desc-preproc_bold" "bold"])<line_sep>wf.connect(node out func_tmp_brain_mask 'in_file')<line_sep># Extract 8th volume as func ROI
func_roi=pe.Node(interface=fsl.ExtractROI() name=f'extract_func_roi_{pipe_num}')<line_sep>func_roi.inputs.t_min=7<line_sep>func_roi.inputs.t_size=1<line_sep>node,out=strat_pool.get_data(["desc-motion_bold" "desc-preproc_bold" "bold"])<line_sep>wf.connect(node out func_roi 'in_file')<line_sep># Apply func initial mask on func ROI volume
func_tmp_brain=pe.Node(interface=fsl.maths.ApplyMask() name=f'get_func_tmp_brain_{pipe_num}')<line_sep>wf.connect(func_roi 'roi_file' func_tmp_brain 'in_file')<line_sep>wf.connect(func_tmp_brain_mask 'out_file' func_tmp_brain 'mask_file')<line_sep># Register func tmp brain to anat brain to get func2anat matrix
reg_func_to_anat=pe.Node(interface=fsl.FLIRT() name=f'func_to_anat_linear_reg_{pipe_num}')<line_sep>reg_func_to_anat.inputs.interp='trilinear'<line_sep>reg_func_to_anat.inputs.cost='corratio'<line_sep>reg_func_to_anat.inputs.dof=6<line_sep>wf.connect(func_tmp_brain 'out_file' reg_func_to_anat 'in_file')<line_sep>node,out=strat_pool.get_data("desc-brain_T1w")<line_sep>wf.connect(node out reg_func_to_anat 'reference')<line_sep># Inverse func2anat matrix
inv_func_to_anat_affine=pe.Node(interface=fsl.ConvertXFM() name=f'inv_func2anat_affine_{pipe_num}')<line_sep>inv_func_to_anat_affine.inputs.invert_xfm=<true><line_sep>wf.connect(reg_func_to_anat 'out_matrix_file' inv_func_to_anat_affine 'in_file')<line_sep># Transform anat brain to func space
reg_anat_brain_to_func=pe.Node(interface=fsl.FLIRT() name=f'reg_anat_brain_to_func_{pipe_num}')<line_sep>reg_anat_brain_to_func.inputs.apply_xfm=<true><line_sep>reg_anat_brain_to_func.inputs.interp='trilinear'<line_sep>node,out=strat_pool.get_data("desc-brain_T1w")<line_sep>wf.connect(node out reg_anat_brain_to_func 'in_file')<line_sep>wf.connect(func_roi 'roi_file' reg_anat_brain_to_func 'reference')<line_sep>wf.connect(inv_func_to_anat_affine 'out_file' reg_anat_brain_to_func 'in_matrix_file')<line_sep># Binarize and dilate anat brain in func space
bin_anat_brain_in_func=pe.Node(interface=fsl.ImageMaths() name=f'bin_anat_brain_in_func_{pipe_num}')<line_sep>bin_anat_brain_in_func.inputs.op_string='-bin -dilM'<line_sep>wf.connect(reg_anat_brain_to_func 'out_file' bin_anat_brain_in_func 'in_file')<line_sep># Binarize detectable func signals
bin_func=pe.Node(interface=fsl.ImageMaths() name=f'bin_func_{pipe_num}')<line_sep>bin_func.inputs.op_string='-Tstd -bin'<line_sep>node,out=strat_pool.get_data(["desc-motion_bold" "desc-preproc_bold" "bold"])<line_sep>wf.connect(node out bin_func 'in_file')<line_sep># Take intersection of masks
merge_func_mask=pe.Node(util.Merge(2) name=f'merge_func_mask_{pipe_num}')<line_sep>wf.connect(func_tmp_brain_mask 'out_file' merge_func_mask 'in1')<line_sep>wf.connect(bin_anat_brain_in_func 'out_file' merge_func_mask 'in2')<line_sep>intersect_mask=pe.Node(interface=fsl.MultiImageMaths() name=f'intersect_mask_{pipe_num}')<line_sep>intersect_mask.inputs.op_string='-mul %s -mul %s'<line_sep>intersect_mask.inputs.output_datatype='char'<line_sep>wf.connect(bin_func 'out_file' intersect_mask 'in_file')<line_sep>wf.connect(merge_func_mask 'out' intersect_mask 'operand_files')<line_sep># this is the func input for coreg in ccs
# TODO evaluate if it's necessary to use this brain
example_func_brain=pe.Node(interface=fsl.maths.ApplyMask() name=f'get_example_func_brain_{pipe_num}')<line_sep>wf.connect(func_roi 'roi_file' example_func_brain 'in_file')<line_sep>wf.connect(intersect_mask 'out_file' example_func_brain 'mask_file')<line_sep>outputs={'space-bold_desc-brain_mask':(intersect_mask 'out_file') 'desc-ROIbrain_bold':(example_func_brain 'out_file')}<line_sep><return>(wf outputs)<block_end><def_stmt>bold_masking wf cfg strat_pool pipe_num opt=<none><block_start>'''
{"name": "bold_masking",
"config": None,
"switch": [["functional_preproc", "run"],
["functional_preproc", "func_masking", "apply_func_mask_in_native_space"]],
"option_key": "None",
"option_val": "None",
"inputs": [(["desc-preproc_bold", "bold"],
"space-bold_desc-brain_mask")],
"outputs": {
"desc-preproc_bold": {
"Description": "The skull-stripped BOLD time-series.",
"SkullStripped": True},
"desc-brain_bold": {
"Description": "The skull-stripped BOLD time-series.",
"SkullStripped": True}}
}
'''<line_sep>func_edge_detect=pe.Node(interface=afni_utils.Calc() name=f'func_extract_brain_{pipe_num}')<line_sep>func_edge_detect.inputs.expr='a*b'<line_sep>func_edge_detect.inputs.outputtype='NIFTI_GZ'<line_sep>node,out=strat_pool.get_data(["desc-preproc_bold" "bold"])<line_sep>wf.connect(node out func_edge_detect 'in_file_a')<line_sep>node,out=strat_pool.get_data("space-bold_desc-brain_mask")<line_sep>wf.connect(node out func_edge_detect 'in_file_b')<line_sep>outputs={'desc-preproc_bold':(func_edge_detect 'out_file') 'desc-brain_bold':(func_edge_detect 'out_file')}<line_sep><return>(wf outputs)<block_end><def_stmt>func_mean wf cfg strat_pool pipe_num opt=<none><block_start>'''
{"name": "func_mean",
"config": "None",
"switch": [["functional_preproc", "run"],
["functional_preproc", "generate_func_mean", "run"]],
"option_key": "None",
"option_val": "None",
"inputs": [["desc-preproc_bold", "bold"]],
"outputs": ["desc-mean_bold"]
}
'''<line_sep>func_mean=pe.Node(interface=afni_utils.TStat() name=f'func_mean_{pipe_num}')<line_sep>func_mean.inputs.options='-mean'<line_sep>func_mean.inputs.outputtype='NIFTI_GZ'<line_sep>node,out=strat_pool.get_data(["desc-preproc_bold" "bold"])<line_sep>wf.connect(node out func_mean 'in_file')<line_sep>outputs={'desc-mean_bold':(func_mean 'out_file')}<line_sep><return>(wf outputs)<block_end><def_stmt>func_normalize wf cfg strat_pool pipe_num opt=<none><block_start>'''
{"name": "func_normalize",
"config": "None",
"switch": [["functional_preproc", "run"],
["functional_preproc", "normalize_func", "run"]],
"option_key": "None",
"option_val": "None",
"inputs": [["desc-preproc_bold", "bold"]],
"outputs": ["desc-preproc_bold"]}
'''<line_sep>func_normalize=pe.Node(interface=fsl.ImageMaths() name=f'func_normalize_{pipe_num}' mem_gb=0.7 mem_x=(4538494663498653/604462909807314587353088 'in_file'))<line_sep>func_normalize.inputs.op_string='-ing 10000'<line_sep>func_normalize.inputs.out_data_type='float'<line_sep>node,out=strat_pool.get_data(["desc-preproc_bold" "bold"])<line_sep>wf.connect(node out func_normalize 'in_file')<line_sep>outputs={'desc-preproc_bold':(func_normalize 'out_file')}<line_sep><return>(wf outputs)<block_end><def_stmt>func_mask_normalize wf cfg strat_pool pipe_num opt=<none><block_start>'''
{"name": "func_mask_normalize",
"config": ["functional_preproc"],
"switch": ["run"],
"option_key": "None",
"option_val": "None",
"inputs": [(["desc-preproc_bold", "bold"],
"space-bold_desc-brain_mask")],
"outputs": ["space-bold_desc-brain_mask"]}
'''<line_sep>func_mask_normalize=pe.Node(interface=fsl.ImageMaths() name=f'func_mask_normalize_{pipe_num}' mem_gb=0.7 mem_x=(4538494663498653/604462909807314587353088 'in_file'))<line_sep>func_mask_normalize.inputs.op_string='-Tmin -bin'<line_sep>func_mask_normalize.inputs.out_data_type='char'<line_sep>node,out=strat_pool.get_data(["desc-preproc_bold" "bold"])<line_sep>wf.connect(node out func_mask_normalize 'in_file')<line_sep>outputs={'space-bold_desc-brain_mask':(func_mask_normalize 'out_file')}<line_sep><return>(wf outputs)<block_end> |
<import_from_stmt>eng_to_ipa transcribe<import_stmt>sqlite3<import_stmt>re<import_from_stmt>os.path join abspath dirname<line_sep>conn=sqlite3.connect(join(abspath(dirname(__file__)) "../eng_to_ipa/resources/CMU_dict.db"))<line_sep>c=conn.cursor()<def_stmt>create_dictionary_table <block_start><try_stmt><block_start>c.execute("""CREATE TABLE eng_ipa
(id INTEGER PRIMARY KEY,
word text NOT NULL,
phonemes text NOT NULL,
ipa text NOT NULL
)""")<line_sep>conn.commit()<block_end><except_stmt>sqlite3.OperationalError<block_start>c.execute("DROP TABLE eng_ipa;")<line_sep>conn.commit()<line_sep>create_dictionary_table()<block_end><block_end><def_stmt>insert_dictionary_values <block_start>"""takes the prepared data and places it into the database"""<line_sep>dictionary_data=[]<with_stmt>open(join(abspath(dirname(__file__)) '..\\eng_to_ipa\\resources\\CMU_source_files/cmudict-0.7b.txt') encoding="UTF-8")<as>source_file<block_start><for_stmt>line source_file.readlines()<block_start>word=re.sub(r"\(\d\)" "" line.split(" ")[0]).lower()<line_sep>phonemes=line.split(" ")[1].replace("\n" "").lower()<line_sep>ipa=transcribe.cmu_to_ipa([[phonemes]] stress_marking="both")[0][0]<line_sep>dictionary_data.append((str(word) str(phonemes) str(ipa)))<block_end><block_end>c.executemany("INSERT INTO eng_ipa(word, phonemes, ipa) VALUES (?, ?, ?)" dictionary_data)<line_sep>conn.commit()<block_end><if_stmt>__name__<eq>"__main__"# create_dictionary_table()
# insert_dictionary_values()
# test
<block_start>c.execute("SELECT * FROM eng_ipa WHERE "<concat>"REPLACE(REPLACE(ipa, 'ˌ', ''), 'ˈ', '') "<concat>"LIKE \"%nstr%\"")<for_stmt>r c.fetchall()<block_start>print(str(r))<block_end><block_end> |
<def_stmt>libsi_keyword <block_start>print('libsi keyword')<block_end> |
<import_stmt>torch<import_stmt>torch.nn.functional<as>F<import_stmt>numpy<import_from_stmt>babyai.rl.utils DictList<line_sep># dictionary that defines what head is required for each extra info used for auxiliary supervision
required_heads={'seen_state':'binary' 'see_door':'binary' 'see_obj':'binary' 'obj_in_instr':'binary' 'in_front_of_what':'multiclass9' # multi class classifier with 9 possible classes
'visit_proportion':'continuous01' # continous regressor with outputs in [0, 1]
'bot_action':'binary'}<class_stmt>ExtraInfoCollector<block_start>'''
This class, used in rl.algos.base, allows connecting the extra information from the environment, and the
corresponding predictions using the specific heads in the model. It transforms them so that they are easy to use
to evaluate losses
'''<def_stmt>__init__ self aux_info shape device<block_start>self.aux_info=aux_info<line_sep>self.shape=shape<line_sep>self.device=device<line_sep>self.collected_info=dict()<line_sep>self.extra_predictions=dict()<for_stmt>info self.aux_info<block_start>self.collected_info[info]=torch.zeros(*shape device=self.device)<if_stmt>required_heads[info]<eq>'binary'<or>required_heads[info].startswith('continuous')# we predict one number only
<block_start>self.extra_predictions[info]=torch.zeros(*shape 1 device=self.device)<block_end><elif_stmt>required_heads[info].startswith('multiclass')# means that this is a multi-class classification and we need to predict the whole proba distr
<block_start>n_classes=int(required_heads[info].replace('multiclass' ''))<line_sep>self.extra_predictions[info]=torch.zeros(*shape n_classes device=self.device)<block_end><else_stmt><block_start><raise>ValueError("{} not supported".format(required_heads[info]))<block_end><block_end><block_end><def_stmt>process self env_info# env_info is now a tuple of dicts
<block_start>env_info=[{k:v<for>k,v dic.items()<if>k<in>self.aux_info}<for>dic env_info]<line_sep>env_info={k:[env_info[_][k]<for>_ range(len(env_info))]<for>k env_info[0].keys()}<line_sep># env_info is now a dict of lists
<return>env_info<block_end><def_stmt>fill_dictionaries self index env_info extra_predictions<block_start><for_stmt>info self.aux_info<block_start>dtype=torch.long<if>required_heads[info].startswith('multiclass')<else>torch.float<line_sep>self.collected_info[info][index]=torch.tensor(env_info[info] dtype=dtype device=self.device)<line_sep>self.extra_predictions[info][index]=extra_predictions[info]<block_end><block_end><def_stmt>end_collection self exps<block_start>collected_info=dict()<line_sep>extra_predictions=dict()<for_stmt>info self.aux_info# T x P -> P x T -> P * T
<block_start>collected_info[info]=self.collected_info[info].transpose(0 1).reshape(-1)<if_stmt>required_heads[info]<eq>'binary'<or>required_heads[info].startswith('continuous')# T x P x 1 -> P x T x 1 -> P * T
<block_start>extra_predictions[info]=self.extra_predictions[info].transpose(0 1).reshape(-1)<block_end><elif_stmt>type(required_heads[info])<eq>int# T x P x k -> P x T x k -> (P * T) x k
<block_start>k=required_heads[info]# number of classes
extra_predictions[info]=self.extra_predictions[info].transpose(0 1).reshape(-1 k)<block_end><block_end># convert the dicts to DictLists, and add them to the exps DictList.
exps.collected_info=DictList(collected_info)<line_sep>exps.extra_predictions=DictList(extra_predictions)<line_sep><return>exps<block_end><block_end><class_stmt>SupervisedLossUpdater<block_start>'''
This class, used by PPO, allows the evaluation of the supervised loss when using extra information from the
environment. It also handles logging accuracies/L2 distances/etc...
'''<def_stmt>__init__ self aux_info supervised_loss_coef recurrence device<block_start>self.aux_info=aux_info<line_sep>self.supervised_loss_coef=supervised_loss_coef<line_sep>self.recurrence=recurrence<line_sep>self.device=device<line_sep>self.log_supervised_losses=[]<line_sep>self.log_supervised_accuracies=[]<line_sep>self.log_supervised_L2_losses=[]<line_sep>self.log_supervised_prevalences=[]<line_sep>self.batch_supervised_loss=0<line_sep>self.batch_supervised_accuracy=0<line_sep>self.batch_supervised_L2_loss=0<line_sep>self.batch_supervised_prevalence=0<block_end><def_stmt>init_epoch self<block_start>self.log_supervised_losses=[]<line_sep>self.log_supervised_accuracies=[]<line_sep>self.log_supervised_L2_losses=[]<line_sep>self.log_supervised_prevalences=[]<block_end><def_stmt>init_batch self<block_start>self.batch_supervised_loss=0<line_sep>self.batch_supervised_accuracy=0<line_sep>self.batch_supervised_L2_loss=0<line_sep>self.batch_supervised_prevalence=0<block_end><def_stmt>eval_subbatch self extra_predictions sb<block_start>supervised_loss=torch.tensor(0. device=self.device)<line_sep>supervised_accuracy=torch.tensor(0. device=self.device)<line_sep>supervised_L2_loss=torch.tensor(0. device=self.device)<line_sep>supervised_prevalence=torch.tensor(0. device=self.device)<line_sep>binary_classification_tasks=0<line_sep>classification_tasks=0<line_sep>regression_tasks=0<for_stmt>pos,info enumerate(self.aux_info)<block_start>coef=self.supervised_loss_coef[pos]<line_sep>pred=extra_predictions[info]<line_sep>target=dict.__getitem__(sb.collected_info info)<if_stmt>required_heads[info]<eq>'binary'<block_start>binary_classification_tasks<augadd>1<line_sep>classification_tasks<augadd>1<line_sep>supervised_loss<augadd>coef<times>F.binary_cross_entropy_with_logits(pred.reshape(-1) target)<line_sep>supervised_accuracy<augadd>((pred.reshape(-1)<g>0).float()<eq>target).float().mean()<line_sep>supervised_prevalence<augadd>target.mean()<block_end><elif_stmt>required_heads[info].startswith('continuous')<block_start>regression_tasks<augadd>1<line_sep>mse=F.mse_loss(pred.reshape(-1) target)<line_sep>supervised_loss<augadd>coef<times>mse<line_sep>supervised_L2_loss<augadd>mse<block_end><elif_stmt>required_heads[info].startswith('multiclass')<block_start>classification_tasks<augadd>1<line_sep>supervised_accuracy<augadd>(pred.argmax(1).float()<eq>target).float().mean()<line_sep>supervised_loss<augadd>coef<times>F.cross_entropy(pred target.long())<block_end><else_stmt><block_start><raise>ValueError("{} not supported".format(required_heads[info]))<block_end><block_end><if_stmt>binary_classification_tasks<g>0<block_start>supervised_prevalence<augdiv>binary_classification_tasks<block_end><else_stmt><block_start>supervised_prevalence=torch.tensor(-1)<block_end><if_stmt>classification_tasks<g>0<block_start>supervised_accuracy<augdiv>classification_tasks<block_end><else_stmt><block_start>supervised_accuracy=torch.tensor(-1)<block_end><if_stmt>regression_tasks<g>0<block_start>supervised_L2_loss<augdiv>regression_tasks<block_end><else_stmt><block_start>supervised_L2_loss=torch.tensor(-1)<block_end>self.batch_supervised_loss<augadd>supervised_loss.item()<line_sep>self.batch_supervised_accuracy<augadd>supervised_accuracy.item()<line_sep>self.batch_supervised_L2_loss<augadd>supervised_L2_loss.item()<line_sep>self.batch_supervised_prevalence<augadd>supervised_prevalence.item()<line_sep><return>supervised_loss<block_end><def_stmt>update_batch_values self<block_start>self.batch_supervised_loss<augdiv>self.recurrence<line_sep>self.batch_supervised_accuracy<augdiv>self.recurrence<line_sep>self.batch_supervised_L2_loss<augdiv>self.recurrence<line_sep>self.batch_supervised_prevalence<augdiv>self.recurrence<block_end><def_stmt>update_epoch_logs self<block_start>self.log_supervised_losses.append(self.batch_supervised_loss)<line_sep>self.log_supervised_accuracies.append(self.batch_supervised_accuracy)<line_sep>self.log_supervised_L2_losses.append(self.batch_supervised_L2_loss)<line_sep>self.log_supervised_prevalences.append(self.batch_supervised_prevalence)<block_end><def_stmt>end_training self logs<block_start>logs["supervised_loss"]=numpy.mean(self.log_supervised_losses)<line_sep>logs["supervised_accuracy"]=numpy.mean(self.log_supervised_accuracies)<line_sep>logs["supervised_L2_loss"]=numpy.mean(self.log_supervised_L2_losses)<line_sep>logs["supervised_prevalence"]=numpy.mean(self.log_supervised_prevalences)<line_sep><return>logs<block_end><block_end> |
# HTK Imports
<import_from_stmt>htk.utils htk_setting<import_from_stmt>htk.utils.general resolve_model_dynamically<def_stmt>get_translation_model translation<block_start>translations_map=htk_setting('HTK_BIBLE_TRANSLATIONS_MAP')<line_sep>translation_model_class=translations_map.get(translation.upper())<line_sep>translation_model=(resolve_model_dynamically(translation_model_class)<if>translation_model_class<else><none>)<line_sep><return>translation_model<block_end> |
<import_from_future_stmt> print_function<import_stmt>sys<line_sep>sys.path.insert(1 "../../")<import_stmt>h2o<import_from_stmt>tests pyunit_utils<def_stmt>pyunit_pop <block_start>pros=h2o.import_file(pyunit_utils.locate("smalldata/prostate/prostate.csv"))<line_sep>nc=pros.ncol<line_sep>popped_col=pros.pop(pros.names[0])<line_sep>print(pros.dim)<line_sep>print(popped_col.dim)<assert_stmt>popped_col.ncol<eq>1<assert_stmt>pros.ncol<eq>nc-1<block_end><if_stmt>__name__<eq>"__main__"<block_start>pyunit_utils.standalone_test(pyunit_pop)<block_end><else_stmt><block_start>pyunit_pop()<block_end> |
# Copyright 2020 JD.com, Inc. Galileo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
<import_stmt>os<import_from_stmt>distutils.sysconfig get_config_var<if_stmt>__file__<eq>'setup.py'# when setup
<block_start>project_root_dir=os.path.dirname(os.path.abspath(__file__))<block_end><else_stmt><block_start>project_root_dir=os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))<block_end>galileo_src_dir=os.path.join(project_root_dir 'galileo')<line_sep>engine_src_dir=os.path.join(project_root_dir 'engine')<line_sep>engine_build_dir=os.path.join(project_root_dir 'build' 'engine')<line_sep>engine_client_dir=os.path.join(engine_build_dir 'client')<line_sep>engine_proto_dir=os.path.join(engine_build_dir 'proto')<line_sep>engine_python_dir=os.path.join(engine_build_dir 'python')<line_sep>libs_dir=os.path.join(project_root_dir 'galileo' 'framework' 'libs')<line_sep>pywrap_dir=os.path.join(project_root_dir 'galileo' 'framework' 'pywrap')<def_stmt>get_tf_ops <block_start>suffix=get_config_var('EXT_SUFFIX')<line_sep><return>os.path.join(pywrap_dir 'tf_ops'+suffix)<block_end><def_stmt>get_cpp_targets <block_start><return>[os.path.join(engine_client_dir 'libclient.so') os.path.join(engine_proto_dir 'libproto.so') ]<block_end><def_stmt>get_py_targets <block_start>suffix=get_config_var('EXT_SUFFIX')<line_sep><return>[os.path.join(engine_python_dir 'py_client'+suffix) os.path.join(engine_python_dir 'py_service'+suffix) os.path.join(engine_python_dir 'py_convertor'+suffix) ]<block_end><def_stmt>get_all_targets <block_start><return>get_cpp_targets()+get_py_targets()<block_end><def_stmt>is_targets_exists <block_start><return>all([os.path.isfile(f)<for>f get_all_targets()])<block_end> |
names=['David' 'Herry' 'Army']<line_sep>message1="hello "+names[0]<line_sep>print(message1)<line_sep>message1="hello "+names[1]<line_sep>print(message1)<line_sep>message1="hello "+names[2]<line_sep>print(message1)<line_sep> |
# Copyright 2020 QuantStack, Codethink Ltd
# Distributed under the terms of the Modified BSD License.
<import_stmt>json<import_from_stmt>typing Any List overload<import_from_stmt>urllib.parse quote<import_from_stmt>quetz.config Config ConfigEntry ConfigSection<import_from_stmt>.oauth2 OAuthAuthenticator<class_stmt>JupyterConfigEntry<block_start>config_section="jupyterhubauthenticator"<line_sep>registered_entries:List[ConfigEntry]=[]<line_sep>config=<none><def_stmt>__init__ self dtype default=<none> required=<true><block_start>self.dtype=dtype<line_sep>self.default=default<line_sep>self.required=required<block_end># these type annotations dont work yet, but I leave them for now
# maybe someone will find a solution later
# https://github.com/python/mypy/issues/2566#issuecomment-703998877
@overload<def_stmt>__get__ self instance:<none> owner:Any<arrow>"JupyterConfigEntry"<block_start><ellipsis><block_end>@overload<def_stmt>__get__ self instance:object owner:Any<arrow>str<block_start><ellipsis><block_end><def_stmt>__get__ self obj objtype<arrow>str<block_start><return>getattr(self.config self.config_attr_name)<block_end><def_stmt>__set_name__ self owner name<block_start>self.attr_name=name<line_sep>self.config_attr_name=f"{self.config_section}_{name}"<line_sep>entry=ConfigEntry(name self.dtype default=self.default required=self.required)<line_sep>self.registered_entries.append(entry)<block_end>@classmethod<def_stmt>_make_config cls<block_start>section=ConfigSection(cls.config_section cls.registered_entries required=<false> )<line_sep><return>[section]<block_end>@classmethod<def_stmt>register cls config:Config<block_start>cls.config=config<line_sep>config_options=cls._make_config()<line_sep>config.register(config_options)<line_sep><return>config.configured_section(cls.config_section)<block_end><block_end><class_stmt>JupyterhubAuthenticator(OAuthAuthenticator)<block_start>"""Use Oauth2 protcol to authenticate with jupyterhub server, which acts
as identity provider.
To activate add the following section to the ``config.toml`` (see :ref:`configfile`):
.. code::
[jupyterhubauthenticator]
# client credentials, they need to be registered with
# jupyterhub by adding an external service
client_id = "quetz_client"
client_secret = "<PASSWORD>-secret"
# token enpoint of Jupyterhub, needs to be accessible from Quetz server
access_token_url = "http://JUPYTERHUB_HOST:PORT/hub/api/oauth2/token"
# authorize endpoint of JupyterHub, needs to be accessible from users' browser
authorize_url = "http://JUPYTERHUB_HOST:PORT/hub/api/oauth2/authorize"
# API root, needs to be accesible from Quetz server
api_base_url = "http://JUPYTERHUB_HOST:PORT/hub/api/"
To configure quetz as an oauth client in JupyterHub, you will need to define
a `JupyterHub service <https://jupyterhub.readthedocs.io/en/stable/reference/services.html#externally-managed-services>`_. You can achieve it by adding the following to the
``jupyterhub_config.py`` file of your JupyterHub instance:
.. code::
c.JupyterHub.services = [
{
# service name, it will be used to setup routers
'name': 'quetz',
# quetz URL to setup redirections, only required if you use
# JupyterHub url scheme
'url': 'http://QUETZ_HOST:PORT',
# any secret >8 characters, you will also need to set
# the client_secret in the authenticator config with this
# string
'api_token': '<PASSWORD>',
# client_id in the authenticator config
'oauth_client_id': 'quetz_client',
# URL of the callback endpoint on the quetz server
'oauth_redirect_uri': 'http://QUETZ_HOST:PORT/auth/jupyterhub/authorize',
}
]
"""<line_sep># noqa
provider='jupyterhub'<line_sep># TODO: need to figure out how to use type annotations with descriptors
# see also: https://github.com/python/mypy/pull/2266
client_id=JupyterConfigEntry(str required=<true>)# type: ignore
client_secret=JupyterConfigEntry(str required=<true>)# type: ignore
access_token_url=JupyterConfigEntry(str required=<true>)# type: ignore
validate_token_url="authorizations/token/{}"<line_sep>authorize_url=JupyterConfigEntry(str required=<true>)# type: ignore
api_base_url=JupyterConfigEntry(str required=<true>)# type: ignore
client_kwargs={"token_endpoint_auth_method":"client_secret_post" "token_placement":"uri" }<async_keyword><def_stmt>userinfo self request token<block_start>response=<await>self._get_user_for_token(token)<line_sep>profile=response.json()<line_sep>github_profile={"id":profile["name"]+'_id' "name":profile["name"] "avatar_url":"" "login":profile["name"] }<line_sep><return>github_profile<block_end><async_keyword><def_stmt>_get_user_for_token self token<block_start>headers={'Authorization':'token {}'.format(self.client_secret)}<line_sep>access_token=quote(token['access_token'] safe='')<line_sep># authlib client will be place token in query params
# which are ignored by jupyterhub
# this workaround is required to implement jupyterhub API
# which puts the token as path parameter
# https://jupyterhub.readthedocs.io/en/stable/_static/rest-api/index.html#path--authorizations-token--token- # noqa
resp=<await>self.client.get(f'authorizations/token/{access_token}' token=token headers=headers)<line_sep><return>resp<block_end><async_keyword><def_stmt>validate_token self token# access_token = json.loads(token)["access_token"]
<block_start>token=json.loads(token)<line_sep>resp=<await>self._get_user_for_token(token)<line_sep><return>resp.status_code<eq>200<block_end><def_stmt>configure self config:Config<block_start>self.is_enabled=JupyterConfigEntry.register(config)<line_sep>super().configure(config)<block_end><block_end> |
"""HACS Startup constrains."""<line_sep># pylint: disable=bad-continuation
<import_stmt>os<import_from_stmt>.const CUSTOM_UPDATER_LOCATIONS CUSTOM_UPDATER_WARNING<import_from_stmt>.helpers.misc version_left_higher_then_right<import_from_stmt>custom_components.hacs.globals get_hacs<line_sep>MINIMUM_HA_VERSION="0.110.0"<def_stmt>check_constrains <block_start>"""Check HACS constrains."""<if_stmt><not>constrain_translations()<block_start><return><false><block_end><if_stmt><not>constrain_custom_updater()<block_start><return><false><block_end><if_stmt><not>constrain_version()<block_start><return><false><block_end><return><true><block_end><def_stmt>constrain_custom_updater <block_start>"""Check if custom_updater exist."""<line_sep>hacs=get_hacs()<for_stmt>location CUSTOM_UPDATER_LOCATIONS<block_start><if_stmt>os.path.exists(location.format(hacs.system.config_path))<block_start>msg=CUSTOM_UPDATER_WARNING.format(location.format(hacs.system.config_path))<line_sep>hacs.logger.critical(msg)<line_sep><return><false><block_end><block_end><return><true><block_end><def_stmt>constrain_version <block_start>"""Check if the version is valid."""<line_sep>hacs=get_hacs()<if_stmt><not>version_left_higher_then_right(hacs.system.ha_version MINIMUM_HA_VERSION)<block_start>hacs.logger.critical(f"You need HA version {MINIMUM_HA_VERSION} or newer to use this integration.")<line_sep><return><false><block_end><return><true><block_end><def_stmt>constrain_translations <block_start>"""Check if traslations exist."""<line_sep>hacs=get_hacs()<if_stmt><not>os.path.exists(f"{hacs.system.config_path}/custom_components/hacs/translations")<block_start>hacs.logger.critical("You are missing the translations directory.")<line_sep><return><false><block_end><return><true><block_end> |
<import_stmt>typing<import_from_stmt>river base<line_sep>__all__=["FuncTransformer"]<class_stmt>FuncTransformer(base.Transformer)<block_start>"""Wraps a function to make it usable in a pipeline.
There is often a need to apply an arbitrary transformation to a set of features. For instance,
this could involve parsing a date and then extracting the hour from said date. If you're
processing a stream of data, then you can do this yourself by calling the necessary code at
your leisure. On the other hand, if you want to do this as part of a pipeline, then you need to
follow a simple convention.
To use a function as part of a pipeline, take as input a `dict` of features and output a `dict`.
Once you have initialized this class with your function, then you can use it like you would use
any other (unsupervised) transformer.
It is up to you if you want your function to be pure or not. By pure we refer to a function
that doesn't modify its input. However, we recommend writing pure functions because this
reduces the chances of inserting bugs into your pipeline.
Parameters
----------
func
A function that takes as input a `dict` and outputs a `dict`.
Examples
--------
>>> from pprint import pprint
>>> import datetime as dt
>>> from river import compose
>>> x = {'date': '2019-02-14'}
>>> def parse_date(x):
... date = dt.datetime.strptime(x['date'], '%Y-%m-%d')
... x['is_weekend'] = date.day in (5, 6)
... x['hour'] = date.hour
... return x
>>> t = compose.FuncTransformer(parse_date)
>>> pprint(t.transform_one(x))
{'date': '2019-02-14', 'hour': 0, 'is_weekend': False}
The above example is not pure because it modifies the input. The following example is pure
and produces the same output:
>>> def parse_date(x):
... date = dt.datetime.strptime(x['date'], '%Y-%m-%d')
... return {'is_weekend': date.day in (5, 6), 'hour': date.hour}
>>> t = compose.FuncTransformer(parse_date)
>>> pprint(t.transform_one(x))
{'hour': 0, 'is_weekend': False}
The previous example doesn't include the `date` feature because it returns a new `dict`.
However, a common usecase is to add a feature to an existing set of features. You can do
this in a pure way by unpacking the input `dict` into the output `dict`:
>>> def parse_date(x):
... date = dt.datetime.strptime(x['date'], '%Y-%m-%d')
... return {'is_weekend': date.day in (5, 6), 'hour': date.hour, **x}
>>> t = compose.FuncTransformer(parse_date)
>>> pprint(t.transform_one(x))
{'date': '2019-02-14', 'hour': 0, 'is_weekend': False}
You can add `FuncTransformer` to a pipeline just like you would with any other transformer.
>>> from river import naive_bayes
>>> pipeline = compose.FuncTransformer(parse_date) | naive_bayes.MultinomialNB()
>>> pipeline
Pipeline (
FuncTransformer (
func="parse_date"
),
MultinomialNB (
alpha=1.
)
)
If you provide a function without wrapping it, then the pipeline will do it for you:
>>> pipeline = parse_date | naive_bayes.MultinomialNB()
"""<def_stmt>__init__ self func:typing.Callable[[dict] dict]<block_start>self.func=func<block_end><def_stmt>transform_one self x<block_start><return>self.func(x)<block_end><def_stmt>__str__ self<block_start><return>self.func.__name__<block_end><block_end> |
<import_from_future_stmt> unicode_literals<import_stmt>os<import_from_stmt>os.path dirname<import_stmt>platform<import_stmt>shutil<import_stmt>sys<import_stmt>tempfile<import_from_stmt>textwrap dedent<import_stmt>unittest<try_stmt><block_start><import_from_stmt>unittest.mock MagicMock patch<block_end><except_stmt><block_start><import_from_stmt>mock MagicMock patch<block_end><import_from_stmt>green loader<import_from_stmt>green.loader GreenTestLoader flattenTestSuite<class_stmt>TestToProtoTestList(unittest.TestCase)<block_start><def_stmt>test_moduleImportFailure self<block_start>"""
toProtoTestList() raises import errors normally
"""<line_sep>suite=MagicMock()<line_sep>suite.__class__.__name__=str("ModuleImportFailure")<line_sep>suite.__str__.return_value="exception_method (other_stuff)"<line_sep>suite.exception_method.side_effect=AttributeError<line_sep>self.assertRaises(AttributeError loader.toProtoTestList (suite ))<block_end><def_stmt>test_moduleImportFailureIgnored self<block_start>"""
toProtoTestList() does not raise errors when doing completions
"""<line_sep>suite=MagicMock()<line_sep>suite.__class__.__name__=str("ModuleImportFailure")<line_sep>suite.__str__.return_value="exception_method other_stuff"<line_sep>suite.exception_method.side_effect=AttributeError<line_sep>self.assertEqual(loader.toProtoTestList(suite doing_completions=<true>) [])<block_end><block_end><class_stmt>TestToParallelTargets(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>super(TestToParallelTargets self).setUp()<class_stmt>FakeModule(object)<block_start><pass><block_end><class_stmt>FakeModule2(object)<block_start><pass><block_end>self._fake_module_name="my_test_module"<line_sep>self._fake_module_name2="my_test_module2"<line_sep>sys.modules[self._fake_module_name]=FakeModule<line_sep>sys.modules[self._fake_module_name2]=FakeModule2<block_end><def_stmt>tearDown self<block_start><del_stmt>sys.modules[self._fake_module_name]<del_stmt>sys.modules[self._fake_module_name2]<line_sep>super(TestToParallelTargets self).tearDown()<block_end><def_stmt>test_methods_with_no_constraints self<block_start>"""
toParallelTargets() returns only module names.
"""<class_stmt>NormalTestCase(unittest.TestCase)<block_start><def_stmt>runTest self<block_start><pass><block_end><block_end>NormalTestCase.__module__=self._fake_module_name<line_sep>targets=loader.toParallelTargets(NormalTestCase() [])<line_sep>self.assertEqual(targets [self._fake_module_name])<block_end><def_stmt>test_methods_with_constraints self<block_start>"""
toParallelTargets() returns test names when constrained.
"""<class_stmt>NormalTestCase(unittest.TestCase)<block_start><def_stmt>runTest self<block_start><pass><block_end><block_end>NormalTestCase.__module__=self._fake_module_name<line_sep>full_name="my_test_module.NormalTestCase.runTest"<line_sep>targets=loader.toParallelTargets(NormalTestCase() [full_name])<line_sep>self.assertEqual(targets [full_name])<block_end><def_stmt>test_filter_out_dot self<block_start>"""
toParallelTargets() correctly returns modules when '.' is in target list
"""<class_stmt>NormalTestCase(unittest.TestCase)<block_start><def_stmt>runTest self<block_start><pass><block_end><block_end><class_stmt>NormalTestCase2(unittest.TestCase)<block_start><def_stmt>runTest self<block_start><pass><block_end><block_end>NormalTestCase.__module__=self._fake_module_name<line_sep>NormalTestCase2.__module__=self._fake_module_name2<line_sep>targets=loader.toParallelTargets([NormalTestCase() NormalTestCase2()] ["."])<line_sep>self.assertEqual(targets ["my_test_module" "my_test_module2"])<block_end><def_stmt>test_ignore_doctest self<block_start>"""
toParallelTargets() ignores"""<block_end><block_end><class_stmt>TestCompletions(unittest.TestCase)<block_start><def_stmt>test_completionBad self<block_start>"""
Bad match generates no completions
"""<line_sep>self.assertEqual("" loader.getCompletions("garbage.in"))<block_end><def_stmt>test_completionExact self<block_start>"""
Correct completions are generated for an exact match.
"""<line_sep>c=set(loader.getCompletions("green").split("\n"))<line_sep>self.assertIn("green" c)<line_sep>self.assertIn("green.test" c)<line_sep>self.assertIn("green.test.test_loader" c)<line_sep>self.assertIn("green.test.test_loader.TestCompletions" c)<line_sep>self.assertIn("green.test.test_loader.TestCompletions.test_completionExact" c)<block_end><def_stmt>test_completionPartialShort self<block_start>"""
Correct completions generated for short partial match.
"""<line_sep>cwd=os.getcwd()<line_sep>green_parent=dirname(dirname(dirname(os.path.abspath(__file__))))<line_sep>os.chdir(green_parent)<line_sep>self.addCleanup(os.chdir cwd)<line_sep>c=set(loader.getCompletions("gre").split("\n"))<line_sep>self.assertIn("green" c)<line_sep>self.assertIn("green.test" c)<line_sep>self.assertIn("green.test.test_loader" c)<line_sep>self.assertIn("green.test.test_loader.TestCompletions" c)<line_sep>self.assertIn("green.test.test_loader.TestCompletions.test_completionPartialShort" c)<block_end><def_stmt>test_completionPartial self<block_start>"""
Correct completions generated for partial match. 2nd target ignored.
"""<line_sep>c=set(loader.getCompletions(["green.te" "green"]).split("\n"))<line_sep>self.assertIn("green.test" c)<line_sep>self.assertIn("green.test.test_loader" c)<line_sep>self.assertIn("green.test.test_loader.TestCompletions" c)<line_sep>self.assertIn("green.test.test_loader.TestCompletions.test_completionPartial" c)<line_sep>self.assertNotIn("green" c)<block_end><def_stmt>test_completionEmpty self<block_start>"""
An empty target generates completions for the whole directory
"""<line_sep>cwd=os.getcwd()<line_sep>tmpdir=tempfile.mkdtemp()<line_sep>self.addCleanup(shutil.rmtree tmpdir)<line_sep>os.chdir(tmpdir)<line_sep>self.addCleanup(os.chdir cwd)<line_sep>os.mkdir("the_pkg")<line_sep>fh=open(os.path.join("the_pkg" "__init__.py") "w")<line_sep>fh.write("")<line_sep>fh.close()<line_sep>fh=open(os.path.join("the_pkg" "test_things.py") "w")<line_sep>fh.write(dedent("""
import unittest
class A(unittest.TestCase):
def testOne(self):
pass
def testTwo(self):
pass
"""))<line_sep>fh.close()<line_sep>c=set(loader.getCompletions("").split("\n"))<line_sep>self.assertIn("the_pkg" c)<line_sep>self.assertIn("the_pkg.test_things" c)<line_sep>self.assertIn("the_pkg.test_things.A.testOne" c)<line_sep>self.assertIn("the_pkg.test_things.A.testTwo" c)<block_end><def_stmt>test_completionDot self<block_start>"""
A '.' target generates completions for the whole directory
"""<line_sep>cwd=os.getcwd()<line_sep>tmpdir=tempfile.mkdtemp()<line_sep>self.addCleanup(shutil.rmtree tmpdir)<line_sep>os.chdir(tmpdir)<line_sep>self.addCleanup(os.chdir cwd)<line_sep>os.mkdir("my_pkg")<line_sep>fh=open(os.path.join("my_pkg" "__init__.py") "w")<line_sep>fh.write("")<line_sep>fh.close()<line_sep>fh=open(os.path.join("my_pkg" "test_things.py") "w")<line_sep>fh.write(dedent("""
import unittest
class A(unittest.TestCase):
def testOne(self):
pass
def testTwo(self):
pass
"""))<line_sep>fh.close()<line_sep>c=set(loader.getCompletions(".").split("\n"))<line_sep>self.assertIn("my_pkg" c)<line_sep>self.assertIn("my_pkg.test_things" c)<line_sep>self.assertIn("my_pkg.test_things.A.testOne" c)<line_sep>self.assertIn("my_pkg.test_things.A.testTwo" c)<block_end><def_stmt>test_completionIgnoresErrors self<block_start>"""
Errors in one module don't block the remaining completions
"""<line_sep>cwd=os.getcwd()<line_sep>tmpdir=tempfile.mkdtemp()<line_sep>self.addCleanup(shutil.rmtree tmpdir)<line_sep>os.chdir(tmpdir)<line_sep>self.addCleanup(os.chdir cwd)<line_sep>os.mkdir("my_pkg2")<line_sep>fh=open(os.path.join("my_pkg2" "__init__.py") "w")<line_sep>fh.write("")<line_sep>fh.close()<line_sep>fh=open(os.path.join("my_pkg2" "test_crash01.py") "w")<line_sep>contents=dedent("""
import unittest
class A(unittest.TestCase):
def testOne(self):
pass
def testTwo(self):
pass
""")<line_sep>fh.write(contents)<line_sep>fh.close()<line_sep>fh=open(os.path.join("my_pkg2" "test_crash02.py") "w")<line_sep>fh.write("import moocow")<line_sep>fh.close()<line_sep>fh=open(os.path.join("my_pkg2" "test_crash03.py") "w")<line_sep>fh.write(contents)<line_sep>fh.close()<line_sep>c=set(loader.getCompletions(".").split("\n"))<line_sep>self.assertIn("my_pkg2" c)<line_sep>self.assertIn("my_pkg2.test_crash01" c)<line_sep>self.assertIn("my_pkg2.test_crash01.A.testOne" c)<line_sep>self.assertIn("my_pkg2.test_crash01.A.testTwo" c)<line_sep>self.assertIn("my_pkg2.test_crash03" c)<line_sep>self.assertIn("my_pkg2.test_crash03.A.testOne" c)<line_sep>self.assertIn("my_pkg2.test_crash03.A.testTwo" c)<block_end><block_end><class_stmt>TestIsPackage(unittest.TestCase)<block_start><def_stmt>test_yes self<block_start>"""
A package is identified.
"""<line_sep>tmpdir=tempfile.mkdtemp()<line_sep>self.addCleanup(shutil.rmtree tmpdir)<line_sep>fh=open(os.path.join(tmpdir "__init__.py") "w")<line_sep>fh.write("pass\n")<line_sep>fh.close()<line_sep>self.assertTrue(loader.isPackage(tmpdir))<block_end><def_stmt>test_no self<block_start>"""
A non-package is identified
"""<line_sep>tmpdir=tempfile.mkdtemp()<line_sep>self.addCleanup(shutil.rmtree tmpdir)<line_sep>self.assertFalse(loader.isPackage(tmpdir))<block_end><block_end><class_stmt>TestDottedModule(unittest.TestCase)<block_start><def_stmt>test_bad_path self<block_start>"""
A bad path causes an exception
"""<line_sep>self.assertRaises(ValueError loader.findDottedModuleAndParentDir tempfile.tempdir)<block_end><def_stmt>test_good_path self<block_start>"""
A good path gets (dotted_module, parent) properly returned
"""<line_sep>tmpdir=tempfile.mkdtemp()<line_sep>os.makedirs(os.path.join(tmpdir "a" "b" "c" "d"))<line_sep>package_init=os.path.join(tmpdir "a" "b" "c" "__init__.py")<line_sep>subpkg_init=os.path.join(tmpdir "a" "b" "c" "d" "__init__.py")<line_sep>module_name="stuff.py"<line_sep>module=os.path.join(tmpdir "a" "b" "c" "d" module_name)<for_stmt>filename [package_init subpkg_init module]<block_start>fh=open(filename "w")<line_sep>fh.write("pass\n")<line_sep>fh.close()<block_end>self.assertEqual(loader.findDottedModuleAndParentDir(module) ("c.d.stuff" os.path.join(tmpdir "a" "b")) )<block_end><block_end><class_stmt>TestLoadTestsFromTestCase(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.loader=GreenTestLoader()<block_end><def_stmt>test_runTest self<block_start>"""
When a testcase has no matching method names, but does have a runTest,
use that instead.
"""<class_stmt>MyTestCase(unittest.TestCase)<block_start><def_stmt>helper1 self<block_start><pass><block_end><def_stmt>helper2 self<block_start><pass><block_end><def_stmt>runTest self<block_start><pass><block_end><block_end>suite=self.loader.loadTestsFromTestCase(MyTestCase)<line_sep>self.assertEqual(suite.countTestCases() 1)<line_sep>self.assertEqual(suite._tests[0]._testMethodName "runTest")<block_end><def_stmt>test_normal self<block_start>"""
Normal test methods get loaded
"""<class_stmt>Normal(unittest.TestCase)<block_start><def_stmt>test_method1 self<block_start><pass><block_end><def_stmt>test_method2 self<block_start><pass><block_end><block_end>suite=self.loader.loadTestsFromTestCase(Normal)<line_sep>self.assertEqual(suite.countTestCases() 2)<line_sep>self.assertEqual(set([x._testMethodName<for>x suite._tests]) set(["test_method1" "test_method2"]) )<block_end><def_stmt>test_isTestCaseDisabled self<block_start>"""
TestCases disabled by nose generators don't get loaded
"""<class_stmt>HasDisabled(unittest.TestCase)<block_start><def_stmt>test_method self<block_start><pass><block_end>test_method.__test__=<false><block_end>suite=self.loader.loadTestsFromTestCase(HasDisabled)<line_sep>self.assertEqual(suite.countTestCases() 0)<block_end><block_end><class_stmt>TestLoadFromModuleFilename(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.loader=GreenTestLoader()<block_end><def_stmt>test_skipped_module self<block_start>"""
A module that wants to be skipped gets skipped
"""<line_sep>tmpdir=tempfile.mkdtemp()<line_sep>self.addCleanup(shutil.rmtree tmpdir)<line_sep>filename=os.path.join(tmpdir "skipped_module.py")<line_sep>fh=open(filename "w")<line_sep>fh.write(dedent("""
import unittest
raise unittest.case.SkipTest
class NotReached(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
"""))<line_sep>fh.close()<line_sep>suite=self.loader.loadFromModuleFilename(filename)<line_sep>self.assertEqual(suite.countTestCases() 1)<line_sep>self.assertRaises(unittest.case.SkipTest getattr(suite._tests[0] suite._tests[0]._testMethodName) )<block_end><block_end><class_stmt>TestDiscover(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.loader=GreenTestLoader()<block_end>@patch("green.loader.os.path.isdir")@patch("green.loader.debug")@patch("green.loader.os.listdir")<def_stmt>test_oserror self mock_listdir mock_debug mock_isdir<block_start>"""
discover() prints a debug message and moves on when ecountering an OSError
"""<line_sep>mock_isdir.return_value=<true><line_sep>mock_listdir.side_effect=OSError()<line_sep>tmpdir=tempfile.mkdtemp()<line_sep>self.addCleanup(shutil.rmtree tmpdir)<line_sep>self.loader.discover(os.path.join(tmpdir "garbage_in"))<line_sep>self.assertEqual(len(mock_debug.mock_calls) 1)<block_end><def_stmt>test_bad_input self<block_start>"""
discover() raises ImportError when passed a non-directory
"""<line_sep>tmpdir=tempfile.mkdtemp()<line_sep>self.addCleanup(shutil.rmtree tmpdir)<line_sep>self.assertRaises(ImportError self.loader.discover os.path.join(tmpdir "garbage_in"))<line_sep>filename=os.path.join(tmpdir "some_file.py")<line_sep>fh=open(filename "w")<line_sep>fh.write("pass\n")<line_sep>fh.close()<line_sep>self.assertRaises(ImportError self.loader.discover filename)<block_end><def_stmt>test_bad_pkg_name self<block_start>"""
If the directory is an invalid package name, don't bother looking in
it.
"""<line_sep>tmpdir=tempfile.mkdtemp()<line_sep>self.addCleanup(shutil.rmtree tmpdir)<line_sep>startdir=os.getcwd()<line_sep>os.chdir(tmpdir)<line_sep>self.addCleanup(os.chdir startdir)<line_sep>bad_pkg_name="1badname"<line_sep>os.mkdir(bad_pkg_name)<line_sep>tmp_subdir=os.path.join(tmpdir bad_pkg_name)<line_sep>fh=open(os.path.join(tmp_subdir "__init__.py") "w")<line_sep>fh.write("\n")<line_sep>fh.close()<line_sep>named_module=os.path.join(os.path.basename(tmp_subdir) "named_module.py")<line_sep>fh=open(named_module "w")<line_sep>fh.write(dedent("""
import unittest
class A(unittest.TestCase):
def testPass(self):
pass
"""))<line_sep>fh.close()<line_sep>self.assertEqual(self.loader.discover(tmpdir) <none>)<block_end><def_stmt>test_symlink self<block_start>"""
If the directory is a symlink, it should be skipped.
"""<if_stmt>platform.system()<eq>"Windows"# pragma: no cover
<block_start>self.skipTest("This test is for posix-specific behavior")<block_end>tmpdir=tempfile.mkdtemp()<line_sep>tmpdir2=tempfile.mkdtemp()<line_sep>os.symlink(tmpdir os.path.join(tmpdir2 "link"))<line_sep>self.addCleanup(shutil.rmtree tmpdir)<line_sep>startdir=os.getcwd()<line_sep>os.chdir(tmpdir)<line_sep>self.addCleanup(os.chdir startdir)<line_sep>pkg_name="realpkg"<line_sep>os.mkdir(pkg_name)<line_sep>tmp_subdir=os.path.join(tmpdir pkg_name)<line_sep>fh=open(os.path.join(tmp_subdir "__init__.py") "w")<line_sep>fh.write("\n")<line_sep>fh.close()<line_sep>named_module=os.path.join(os.path.basename(tmp_subdir) "test_module.py")<line_sep>fh=open(named_module "w")<line_sep>fh.write(dedent("""
import unittest
class A(unittest.TestCase):
def testPass(self):
pass
"""))<line_sep>fh.close()<line_sep>self.assertEqual(self.loader.discover(tmpdir2) <none>)<block_end><block_end><class_stmt>TestLoadTargets(unittest.TestCase)# Setup
<block_start>@classmethod<def_stmt>setUpClass cls<block_start>cls.startdir=os.getcwd()<line_sep>cls.container_dir=tempfile.mkdtemp()<block_end>@classmethod<def_stmt>tearDownClass cls<block_start><if_stmt>os.getcwd()<ne>cls.startdir<block_start>os.chdir(cls.startdir)<block_end>cls.startdir=<none><line_sep>shutil.rmtree(cls.container_dir)<block_end><def_stmt>setUp self<block_start>os.chdir(self.container_dir)<line_sep>self.tmpdir=tempfile.mkdtemp(dir=self.container_dir)<line_sep>self.loader=GreenTestLoader()<block_end><def_stmt>tearDown self<block_start>os.chdir(self.container_dir)<line_sep>shutil.rmtree(self.tmpdir)<block_end># Tests
<def_stmt>test_returnIsLoadable self<block_start>"""
Results returned by toParallelTargets should be loadable by
loadTargets(), even if they aren't directly loadable through a package
relative to the current working directory.
"""<line_sep>tests_dir=tempfile.mkdtemp(dir=self.tmpdir)<line_sep># No __init__.py in the directory!
fh=open(os.path.join(tests_dir "test_not_in_pkg.py") "w")<line_sep>fh.write(dedent("""
import unittest
class A(unittest.TestCase):
def testPass(self):
pass
"""))<line_sep>fh.close()<line_sep># Discover stuff
suite=self.loader.loadTargets(".")<line_sep># This should resolve it to the module that's not importable from here
test=loader.toParallelTargets(suite [])[0]<line_sep>self.loader.loadTargets(test)<block_end><def_stmt>test_emptyDirAbsolute self<block_start>"""
Absolute path to empty directory returns None
"""<line_sep>tests=self.loader.loadTargets(self.tmpdir)<line_sep>self.assertTrue(tests<is><none>)<block_end><def_stmt>test_emptyDirRelative self<block_start>"""
Relative path to empty directory returns None
"""<line_sep>os.chdir(self.tmpdir)<line_sep>os.chdir("..")<line_sep>tests=self.loader.loadTargets(os.path.dirname(self.tmpdir))<line_sep>self.assertEqual(tests <none>)<block_end><def_stmt>test_emptyDirDot self<block_start>"""
'.' while in an empty directory returns None
"""<line_sep>os.chdir(self.tmpdir)<line_sep>tests=self.loader.loadTargets(".")<line_sep>self.assertTrue(tests<is><none>)<block_end><def_stmt>test_relativeDotDir self<block_start>"""
Dotted relative path to empty directory returns None
"""<line_sep>os.chdir(self.tmpdir)<line_sep>os.chdir("..")<line_sep>target=os.path.join("." os.path.basename(self.tmpdir))<line_sep>tests=self.loader.loadTargets(target)<line_sep>self.assertTrue(tests<is><none>)<block_end><def_stmt>test_BigDirWithAbsoluteImports self<block_start>"""
Big dir discovers tests and doesn't crash on absolute import
"""<line_sep>sub_tmpdir=tempfile.mkdtemp(dir=self.tmpdir)<line_sep>pkg_name=os.path.basename(sub_tmpdir)<line_sep># Child setup
# pkg/__init__.py
fh=open(os.path.join(sub_tmpdir "__init__.py") "w")<line_sep>fh.write("\n")<line_sep>fh.close()<line_sep># pkg/target_module.py
fh=open(os.path.join(sub_tmpdir "target_module.py") "w")<line_sep>fh.write("a = 1\n")<line_sep>fh.close()<line_sep># pkg/test/__init__.py
os.mkdir(os.path.join(sub_tmpdir "test"))<line_sep>fh=open(os.path.join(sub_tmpdir "test" "__init__.py") "w")<line_sep>fh.write("\n")<line_sep>fh.close()<line_sep># pkg/test/test_target_module.py
fh=open(os.path.join(sub_tmpdir "test" "test_target_module.py") "w")<line_sep>fh.write(dedent("""
import unittest
import {}.target_module
class A(unittest.TestCase):
def testPass(self):
pass
""".format(pkg_name)))<line_sep>fh.close()<line_sep># Load the tests
os.chdir(self.tmpdir)<line_sep>test_suite=self.loader.loadTargets(pkg_name)<line_sep>self.assertEqual(test_suite.countTestCases() 1)<line_sep># Dotted name should start with the package!
self.assertEqual(pkg_name+".test.test_target_module.A.testPass" loader.toProtoTestList(test_suite)[0].dotted_name )<block_end><def_stmt>test_DirWithInit self<block_start>"""
Dir empty other than blank __init__.py returns None
"""<line_sep># Parent directory setup
os.chdir(self.tmpdir)<line_sep>os.chdir("..")<line_sep># Child setup
target=os.path.join(self.tmpdir "__init__.py")<line_sep>fh=open(target "w")<line_sep>fh.write("\n")<line_sep>fh.close()<line_sep>fh=open(os.path.join(self.tmpdir "test_module_with_init.py") "w")<line_sep>fh.write(dedent("""
import unittest
class A(unittest.TestCase):
def testPass(self):
pass
"""))<line_sep>fh.close()<line_sep># Load the tests
module_name=os.path.basename(self.tmpdir)<line_sep>tests=self.loader.loadTargets(module_name)<line_sep>self.assertEqual(tests.countTestCases() 1)<block_end><def_stmt>test_DottedName self<block_start>"""
Importing a module via dotted name loads the tests.
"""<line_sep># Parent directory setup
os.chdir(self.tmpdir)<line_sep>sub_tmpdir=tempfile.mkdtemp(dir=self.tmpdir)<line_sep>basename=os.path.basename(sub_tmpdir)<line_sep># Child setup
fh=open(os.path.join(basename "__init__.py") "w")<line_sep>fh.write("\n")<line_sep>fh.close()<line_sep>fh=open(os.path.join(basename "test_module_dotted_name.py") "w")<line_sep>fh.write(dedent("""
import unittest
class A(unittest.TestCase):
def testPass(self):
pass
"""))<line_sep>fh.close()<line_sep># Load the tests
module_name=basename+".test_module_dotted_name"<line_sep>tests=self.loader.loadTargets(module_name)<line_sep>self.assertEqual(tests.countTestCases() 1)<block_end><def_stmt>test_DottedNamePackageFromPath self<block_start>"""
Importing a package from path loads the tests.
"""<line_sep># Child setup
tmp_subdir=tempfile.mkdtemp(dir=self.tmpdir)<line_sep>fh=open(os.path.join(tmp_subdir "__init__.py") "w")<line_sep>fh.write("\n")<line_sep>fh.close()<line_sep>fh=open(os.path.join(tmp_subdir "test_module.py") "w")<line_sep>fh.write(dedent("""
import unittest
class A(unittest.TestCase):
def testPass(self):
pass
"""))<line_sep>fh.close()<line_sep># Go somewhere else, but setup the path
os.chdir(self.startdir)<line_sep>sys.path.insert(0 self.tmpdir)<line_sep># Load the tests
tests=self.loader.loadTargets(os.path.basename(tmp_subdir))<line_sep>sys.path.remove(self.tmpdir)<line_sep>self.assertTrue(tests.countTestCases() 1)<block_end><def_stmt>test_ModuleByName self<block_start>"""
A module in a package can be loaded by filename.
"""<line_sep>os.chdir(self.tmpdir)<line_sep>tmp_subdir=tempfile.mkdtemp(dir=self.tmpdir)<line_sep>fh=open(os.path.join(tmp_subdir "__init__.py") "w")<line_sep>fh.write("\n")<line_sep>fh.close()<line_sep>named_module=os.path.join(os.path.basename(tmp_subdir) "named_module.py")<line_sep>fh=open(named_module "w")<line_sep>fh.write(dedent("""
import unittest
class A(unittest.TestCase):
def testPass(self):
pass
"""))<line_sep>fh.close()<line_sep># Load the tests
tests=self.loader.loadTargets(named_module)<try_stmt><block_start>self.assertEqual(tests.countTestCases() 1)<block_end><except_stmt><block_start><raise><block_end><finally_stmt><block_start>shutil.rmtree(tmp_subdir)<block_end><block_end><def_stmt>test_MalformedModuleByName self<block_start>"""
Importing malformed module by name creates test that raises
ImportError.
"""<line_sep>fh=open(os.path.join(self.tmpdir "__init__.py") "w")<line_sep>fh.write("\n")<line_sep>fh.close()<line_sep>malformed_module=os.path.join(os.path.basename(self.tmpdir) "malformed_module.py")<line_sep>fh=open(malformed_module "w")<line_sep>fh.write("This is a malformed module.")<line_sep>fh.close()<line_sep># Load the tests
tests=self.loader.loadTargets(malformed_module)<line_sep>self.assertEqual(tests.countTestCases() 1)<line_sep>test=tests._tests[0]<line_sep>test_method=getattr(test test._testMethodName)<line_sep>self.assertRaises(ImportError test_method)<block_end><def_stmt>test_partiallyGoodName self<block_start>"""
Don't crash loading module.object with existing module but not object.
"""<line_sep># Parent directory setup
os.chdir(self.tmpdir)<line_sep>sub_tmpdir=tempfile.mkdtemp(dir=self.tmpdir)<line_sep>basename=os.path.basename(sub_tmpdir)<line_sep># Child setup
fh=open(os.path.join(basename "__init__.py") "w")<line_sep>fh.write("\n")<line_sep>fh.close()<line_sep>fh=open(os.path.join(basename "existing_module.py") "w")<line_sep>fh.write(dedent("""
import unittest
class A(unittest.TestCase):
def testPass(self):
pass
"""))<line_sep>fh.close()<line_sep># Load the tests
module_name=basename+".existing_module.nonexistant_object"<line_sep>tests=self.loader.loadTargets(module_name)<line_sep>self.assertEqual(tests <none>)<block_end><def_stmt>test_multiple_targets self<block_start>"""
Specifying multiple targets causes them all to be tested.
"""<line_sep>sub_tmpdir=tempfile.mkdtemp(dir=self.tmpdir)<line_sep># pkg/__init__.py
fh=open(os.path.join(sub_tmpdir "__init__.py") "w")<line_sep>fh.write("\n")<line_sep>fh.close()<line_sep># pkg/test/test_target1.py
fh=open(os.path.join(sub_tmpdir "test_target1.py") "w")<line_sep>fh.write(dedent("""
import unittest
class A(unittest.TestCase):
def testPasses(self):
pass
"""))<line_sep>fh.close()<line_sep># pkg/test/test_target2.py
fh=open(os.path.join(sub_tmpdir "test_target2.py") "w")<line_sep>fh.write(dedent("""
import unittest
class A(unittest.TestCase):
def testPasses(self):
pass
"""))<line_sep>fh.close()<line_sep># Load the tests
os.chdir(self.tmpdir)<line_sep>pkg=os.path.basename(sub_tmpdir)<line_sep>tests=self.loader.loadTargets([pkg+"."+"test_target1" pkg+"."+"test_target2"])<line_sep>self.assertEqual(tests.countTestCases() 2)<block_end><def_stmt>test_duplicate_targets self<block_start>"""
Specifying duplicate targets does not cause duplicate loading.
"""<line_sep>sub_tmpdir=tempfile.mkdtemp(dir=self.tmpdir)<line_sep>fh=open(os.path.join(sub_tmpdir "__init__.py") "w")<line_sep>fh.write("\n")<line_sep>fh.close()<line_sep>fh=open(os.path.join(sub_tmpdir "test_dupe_target.py") "w")<line_sep>fh.write(dedent("""
import unittest
class A(unittest.TestCase):
def testPasses(self):
pass
"""))<line_sep>fh.close()<line_sep>os.chdir(self.tmpdir)<line_sep>pkg=os.path.basename(sub_tmpdir)<line_sep>tests=self.loader.loadTargets([pkg+"."+"test_dupe_target" pkg+"."+"test_dupe_target" pkg+"."+"test_dupe_target" ])<line_sep>self.assertEqual(tests.countTestCases() 1)<block_end><def_stmt>test_explicit_filename_error self<block_start>"""
Loading a module by name with a syntax error produces a failure, not a
silent absence of its tests.
"""<line_sep>sub_tmpdir=tempfile.mkdtemp(dir=self.tmpdir)<line_sep>fh=open(os.path.join(sub_tmpdir "mod_with_import_error.py") "w")<line_sep>fh.write("this is a syntax error")<line_sep>fh.close()<line_sep>os.chdir(sub_tmpdir)<line_sep>tests=self.loader.loadTargets("mod_with_import_error.py")<line_sep>self.assertEqual(tests.countTestCases() 1)<block_end><def_stmt>test_file_pattern self<block_start>"""
Specifying a file pattern causes only matching files to be loaded
"""<line_sep>sub_tmpdir=tempfile.mkdtemp(dir=self.tmpdir)<line_sep># pkg/__init__.py
fh=open(os.path.join(sub_tmpdir "__init__.py") "w")<line_sep>fh.write("\n")<line_sep>fh.close()<line_sep># pkg/test/target1_tests.py
fh=open(os.path.join(sub_tmpdir "target1_tests.py") "w")<line_sep>fh.write(dedent("""
import unittest
class A(unittest.TestCase):
def testPasses(self):
pass
"""))<line_sep>fh.close()<line_sep># pkg/test/target2_tests.py
fh=open(os.path.join(sub_tmpdir "target2_tests.py") "w")<line_sep>fh.write(dedent("""
import unittest
class A(unittest.TestCase):
def testPasses(self):
pass
"""))<line_sep>fh.close()<line_sep># pkg/test/test_target999.py: NOT a match.
fh=open(os.path.join(sub_tmpdir "test_target999.py") "w")<line_sep>fh.write(dedent("""
import unittest
class A(unittest.TestCase):
def testPasses(self):
pass
"""))<line_sep>fh.close()<line_sep># Load the tests
os.chdir(self.tmpdir)<line_sep>pkg=os.path.basename(sub_tmpdir)<line_sep>tests=self.loader.loadTargets(pkg file_pattern="*_tests.py")<line_sep>self.assertEqual(tests.countTestCases() 2)<block_end><block_end><class_stmt>TestFlattenTestSuite(unittest.TestCase)# Setup
<block_start>@classmethod<def_stmt>setUpClass cls<block_start>cls.startdir=os.getcwd()<line_sep>cls.container_dir=tempfile.mkdtemp()<block_end>@classmethod<def_stmt>tearDownClass cls<block_start><if_stmt>os.getcwd()<ne>cls.startdir<block_start>os.chdir(cls.startdir)<block_end>cls.startdir=<none><line_sep>shutil.rmtree(cls.container_dir)<block_end><def_stmt>setUp self<block_start>os.chdir(self.container_dir)<line_sep>self.tmpdir=tempfile.mkdtemp(dir=self.container_dir)<line_sep>self.loader=GreenTestLoader()<block_end><def_stmt>tearDown self<block_start>os.chdir(self.container_dir)<line_sep>shutil.rmtree(self.tmpdir)<block_end>@patch("green.loader.GreenTestLoader.suiteClass")@patch("green.loader.DocTestSuite")<def_stmt>test_docTests self mock_doc_test_suite mock_suite_class<block_start>"""
flattenTestSuite injects the test module name into the doctest's .__module__
"""<line_sep>mock_test=MagicMock()<line_sep>mock_iter=MagicMock(return_value=iter([mock_test]))<line_sep>mock_suite=MagicMock()<line_sep>mock_suite.__iter__=mock_iter<line_sep>mock_doc_test_suite.return_value=mock_suite<line_sep>module=MagicMock()<line_sep>test_module_name="test.module"<line_sep>module.__name__=test_module_name<line_sep>module.doctest_modules=["real.module"]<line_sep>flattenTestSuite(() module)<line_sep>self.assertEqual(mock_test.__module__ test_module_name)<block_end><block_end> |
LocalValueDim=18446744073709551613<line_sep>dataTypes={-1:'unknown' 0:'byte' 1:'short' 2:'integer' 4:'long' 50:'unsigned_byte' 51:'unsigned_short' 52:'unsigned_integer' 54:'unsigned_long' 5:'real' 6:'double' 7:'long_double' 9:'string' 10:'complex' 11:'double_complex' 12:'string_array' 55:'char'}<line_sep>dataTypeSize={-1:0 0:1 1:2 2:4 4:8 50:1 51:2 52:4 54:8 5:4 6:8 7:16 9:0 10:8 11:16 12:0 55:1}<def_stmt>GetTypeName typeID<block_start>name=dataTypes.get(typeID)<if_stmt>name<is><none><block_start>name="unknown type"<block_end><return>name<block_end><def_stmt>GetTypeSize typeID<block_start>size=dataTypeSize.get(typeID)<if_stmt>size<is><none><block_start>size=0<block_end><return>size<block_end>CharacteristicNames={0:'value' 1:'min' 2:'max' 3:'offset' 4:'dimensions' 5:'var_id' 6:'payload_offset' 7:'file_index' 8:'time_index' 9:'bitmap' 10:'stat' 11:'transform_type' 12:'minmax'}<def_stmt>GetCharacteristicName cID<block_start>name=CharacteristicNames.get(cID)<if_stmt>name<is><none><block_start>name="unknown characteristic"<block_end><return>name<block_end><def_stmt>GetCharacteristicDataLength cID typeID<block_start>name=CharacteristicNames.get(cID)<if_stmt>(name<eq>'value'<or>name<eq>'min'<or>name<eq>'max'<or>name<eq>'minmax')<block_start><return>dataTypeSize[typeID]<block_end><elif_stmt>(name<eq>'offset'<or>name<eq>'payload_offset')<block_start><return>8<block_end><elif_stmt>(name<eq>'file_index'<or>name<eq>'time_index')<block_start><return>4<block_end><else_stmt><block_start><return>0<block_end><block_end># Read Header info 64 bytes
# fileType: Data, Metadata, Index Table
<def_stmt>ReadHeader f fileSize fileType<block_start>status=<true><if_stmt>fileSize<l>64<block_start>print("ERROR: Invalid "+fileType+". File is smaller "<concat>"than the header (64 bytes)")<line_sep><return><false><block_end>header=f.read(64)<line_sep>hStr=header.decode('ascii')<line_sep>versionStr=hStr[0:32].replace('\0' ' ')<line_sep>major=hStr[32]<line_sep>minor=hStr[33]<line_sep>micro=hStr[34]<line_sep># unused = hStr[35]
endianValue=header[36]<if_stmt>endianValue<eq>0<block_start>endian='yes'<block_end><elif_stmt>endianValue<eq>1<block_start>endian=' no'<block_end><else_stmt><block_start>print("ERROR: byte 28 must be 0 or 1 to indicate endianness of "<concat>"the data. It is however {0} in this file".format(endianValue))<line_sep>status=<false><block_end>bpversion=int(header[37])<line_sep>active=int(header[38])<if_stmt>active<eq>0<block_start>activeStr=' no'<block_end><else_stmt><block_start>activeStr='yes'<block_end># unused = hStr[39]
WriterCount=int(header[40])<line_sep>aggregatorcount=int(header[44])<line_sep>iscolumnmajor=header[49]<line_sep># 45..63 unused
print("-----------------------------------------------------------"<concat>"-----------------------------------------------------------")<line_sep>print("| Version string | Major | Minor | Patch "<concat>"| unused | Endian | BP version | Active | WriterCount | AggCount"+" | ColumnMajor | unused |")<line_sep>print("| 32 bytes | 1B | 1B | 1B "<concat>"| 1B | 1B | 1B | 1B | 4b | 4b "+"| 1b | 16B |")<line_sep>print("+----------------------------------------------------------"<concat>"----------------------------------------------------------+")<line_sep>print("| {0} | {1} | {2} | {3} | | {4} "<concat>"| {5} | {6} | {7:d} | {8:d} | "+"{9} | |".format(versionStr major minor micro endian bpversion activeStr WriterCount aggregatorcount iscolumnmajor))<line_sep>print("-----------------------------------------------------------"<concat>"-----------------------------------------------------------")<line_sep><return>[status WriterCount]<block_end><if_stmt>__name__<eq>"__main__"<block_start>print("ERROR: Utility main program is bp5dbg.py")<block_end> |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
<import_stmt>torch<import_from_stmt>torch.optim Optimizer<class_stmt>QHM(Optimizer)<block_start>r"""
Stochastic gradient method with Quasi-Hyperbolic Momentum (QHM):
h(k) = (1 - \beta) * g(k) + \beta * h(k-1)
d(k) = (1 - \nu) * g(k) + \nu * h(k)
x(k+1) = x(k) - \alpha * d(k)
"Quasi-hyperbolic momentum and Adam for deep learning"
by <NAME> and <NAME>, ICLR 2019
optimizer = QHM(params, lr=-1, momentum=0, qhm_nu=1, weight_decay=0)
Args:
params (iterable): iterable params to optimize or dict of param groups
lr (float): learning rate, \alpha in QHM update (default:-1 need input)
momentum (float, optional): \beta in QHM update, range[0,1) (default:0)
qhm_nu (float, optional): \nu in QHM update, range[0,1] (default: 1)
\nu = 0: SGD without momentum (\beta is ignored)
\nu = 1: SGD with momentum \beta and dampened gradient (1-\beta)
\nu = \beta: SGD with "Nesterov momentum" \beta
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
Example:
>>> optimizer = torch.optim.QHM(model.parameters(), lr=0.1, momentum=0.9)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
"""<def_stmt>__init__ self params lr=-1 momentum=0 qhm_nu=1 weight_decay=0# nu can take values outside of the interval [0,1], but no guarantee of convergence?
<block_start><if_stmt>lr<le>0<block_start><raise>ValueError("Invalid value for learning rate (>0): {}".format(lr))<block_end><if_stmt>momentum<l>0<or>momentum<g>1<block_start><raise>ValueError("Invalid value for momentum [0,1): {}".format(momentum))<block_end><if_stmt>weight_decay<l>0<block_start><raise>ValueError("Invalid value for weight_decay (>=0): {}".format(weight_decay))<block_end>defaults=dict(lr=lr momentum=momentum qhm_nu=qhm_nu weight_decay=weight_decay)<line_sep>super(QHM self).__init__(params defaults)<line_sep># extra_buffer == True only in SSLS with momentum > 0 and nu != 1
self.state['allocate_step_buffer']=<false><block_end><def_stmt>step self closure=<none><block_start>"""
Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates model and returns loss.
"""<line_sep>loss=<none><if_stmt>closure<is><not><none><block_start>loss=closure()<block_end>self.add_weight_decay()<line_sep>self.qhm_direction()<line_sep>self.qhm_update()<line_sep><return>loss<block_end><def_stmt>add_weight_decay self# weight_decay is the same as adding L2 regularization
<block_start><for_stmt>group self.param_groups<block_start>weight_decay=group['weight_decay']<for_stmt>p group['params']<block_start><if_stmt>p.grad<is><none><block_start><continue><block_end><if_stmt>weight_decay<g>0<block_start>p.grad.data.add_(p.data alpha=weight_decay)<block_end><block_end><block_end><block_end><def_stmt>qhm_direction self<block_start><for_stmt>group self.param_groups<block_start>momentum=group['momentum']<line_sep>qhm_nu=group['qhm_nu']<for_stmt>p group['params']<block_start><if_stmt>p.grad<is><none><block_start><continue><block_end>x=p.data# Optimization parameters
g=p.grad.data# Stochastic gradient
# Compute the (negative) step directoin d and necessary momentum
state=self.state[p]<if_stmt>abs(momentum)<l>1e-12<or>abs(qhm_nu)<l>1e-12# simply SGD if beta=0 or nu=0
<block_start>d=state['step_buffer']=g<block_end><else_stmt><block_start><if_stmt>'momentum_buffer'<not><in>state<block_start>h=state['momentum_buffer']=torch.zeros_like(x)<block_end><else_stmt><block_start>h=state['momentum_buffer']<block_end># Update momentum buffer: h(k) = (1 - \beta) * g(k) + \beta * h(k-1)
h.mul_(momentum).add_(g alpha=1-momentum)<if_stmt>abs(qhm_nu-1)<l>1e-12# if nu=1, then same as SGD with momentum
<block_start>d=state['step_buffer']=h<block_end><else_stmt><block_start><if_stmt>self.state['allocate_step_buffer']# copy from gradient
<block_start><if_stmt>'step_buffer'<not><in>state<block_start>state['step_buffer']=torch.zeros_like(g)<block_end>d=state['step_buffer'].copy_(g)<block_end><else_stmt># use gradient buffer
<block_start>d=state['step_buffer']=g<block_end># Compute QHM momentum: d(k) = (1 - \nu) * g(k) + \nu * h(k)
d.mul_(1-qhm_nu).add_(h alpha=qhm_nu)<block_end><block_end><block_end><block_end><block_end><def_stmt>qhm_update self<block_start>"""
Perform QHM update, need to call compute_qhm_direction() before calling this.
"""<for_stmt>group self.param_groups<block_start><for_stmt>p group['params']<block_start><if_stmt>p.grad<is><not><none><block_start>p.data.add_(self.state[p]['step_buffer'] alpha=-group['lr'])<block_end><block_end><block_end><block_end><block_end> |
<import_from_stmt>.collect_env collect_env<import_from_stmt>.logger get_root_logger<import_from_stmt>.optimizer OptimizerHook<line_sep>__all__=['get_root_logger' 'collect_env' 'OptimizerHook']<line_sep> |
<import_from_stmt>tests app<import_from_stmt>flask redirect<line_sep>@app.route('/follow-redirect')<def_stmt>follow_redirect <block_start><return>redirect('http://localhost:8000/following-redirect')<block_end>@app.route('/following-redirect')<def_stmt>following_redirect <block_start><return>redirect('http://localhost:8000/followed-redirect')<block_end>@app.route('/followed-redirect')<def_stmt>followed_redirect <block_start><return>'Followed redirect!'<block_end> |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
<def_stmt>check_c_count expected_count<block_start>test.assertEqual(expected_count len(reality.resources_by_logical_name('C')))<block_end>example_template=Template({'A':RsrcDef({'a':'initial'} []) 'B':RsrcDef({} []) 'C':RsrcDef({'!a':GetAtt('A' 'a')} ['B']) 'D':RsrcDef({'c':GetRes('C')} []) 'E':RsrcDef({'ca':GetAtt('C' '!a')} []) })<line_sep>engine.create_stack('foo' example_template)<line_sep>engine.noop(5)<line_sep>engine.call(verify example_template)<line_sep>example_template_shrunk=Template({'A':RsrcDef({'a':'updated'} []) 'B':RsrcDef({} []) 'C':RsrcDef({'!a':GetAtt('A' 'a')} ['B']) 'D':RsrcDef({'c':GetRes('C')} []) 'E':RsrcDef({'ca':GetAtt('C' '!a')} []) })<line_sep>engine.update_stack('foo' example_template_shrunk)<line_sep>engine.noop(7)<line_sep>example_template_long=Template({'A':RsrcDef({'a':'updated'} []) 'B':RsrcDef({} []) 'C':RsrcDef({'!a':GetAtt('A' 'a')} ['B']) 'D':RsrcDef({'c':GetRes('C')} []) 'E':RsrcDef({'ca':GetAtt('C' '!a')} []) 'F':RsrcDef({} ['D' 'E']) })<line_sep>engine.update_stack('foo' example_template_long)<line_sep>engine.call(check_c_count 2)<line_sep>engine.noop(11)<line_sep>engine.call(verify example_template_long)<line_sep>engine.delete_stack('foo')<line_sep>engine.noop(12)<line_sep>engine.call(verify Template({}))<line_sep> |
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
<import_from_stmt>blinkpy.common.net.git_cl CLStatus GitCL<import_from_stmt>blinkpy.common.system.executive ScriptError<line_sep># pylint: disable=unused-argument
<class_stmt>MockGitCL(object)<block_start><def_stmt>__init__ self host try_job_results=<none> status='closed' issue_number='1234' time_out=<false> git_error_output=<none><block_start>"""Constructs a fake GitCL with canned return values.
Args:
host: Host object, used for builder names.
try_job_results: A dict of Build to TryJobStatus.
status: CL status string.
issue_number: CL issue number as a string.
time_out: Whether to simulate timing out while waiting.
git_error_output: A dict of git-cl args to exception output.
"""<line_sep>self._builders=host.builders.all_try_builder_names()<line_sep>self._status=status<line_sep>self._try_job_results=try_job_results<line_sep>self._issue_number=issue_number<line_sep>self._time_out=time_out<line_sep>self._git_error_output=git_error_output<line_sep>self.calls=[]<block_end><def_stmt>run self args<block_start>self.calls.append(['git' 'cl']+args)<line_sep>arg_key="".join(args)<if_stmt>self._git_error_output<and>arg_key<in>self._git_error_output.keys()<block_start><raise>ScriptError(output=self._git_error_output[arg_key])<block_end><return>'mock output'<block_end><def_stmt>trigger_try_jobs self builders bucket=<none><block_start>bucket=bucket<or>'luci.chromium.try'<line_sep>command=['try' '-B' bucket]<for_stmt>builder sorted(builders)<block_start>command.extend(['-b' builder])<block_end>self.run(command)<block_end><def_stmt>get_issue_number self<block_start><return>self._issue_number<block_end><def_stmt>try_job_results self **_<block_start><return>self._try_job_results<block_end><def_stmt>wait_for_try_jobs self **_<block_start><if_stmt>self._time_out<block_start><return><none><block_end><return>CLStatus(self._status self.filter_latest(self._try_job_results))<block_end><def_stmt>wait_for_closed_status self **_<block_start><if_stmt>self._time_out<block_start><return><none><block_end><return>'closed'<block_end><def_stmt>latest_try_jobs self builder_names=<none> **_<block_start><return>self.filter_latest(self._try_job_results)<block_end>@staticmethod<def_stmt>filter_latest try_results<block_start><return>GitCL.filter_latest(try_results)<block_end>@staticmethod<def_stmt>all_finished try_results<block_start><return>GitCL.all_finished(try_results)<block_end>@staticmethod<def_stmt>all_success try_results<block_start><return>GitCL.all_success(try_results)<block_end>@staticmethod<def_stmt>some_failed try_results<block_start><return>GitCL.some_failed(try_results)<block_end><block_end> |
"""
IpaupgradeLog - file ``/var/log/ipaupgrade.log``
================================================
This file records the information of IPA server upgrade process while
executing command ``ipa-server-upgrade``
"""<import_from_stmt>.. LogFileOutput parser<import_from_stmt>insights.specs Specs<line_sep>@parser(Specs.ipaupgrade_log)<class_stmt>IpaupgradeLog(LogFileOutput)<block_start>"""
This parser is used to parse the content of file `/var/log/ipaupgrade.log`.
.. note::
Please refer to its super-class :class:`insights.core.LogFileOutput`
Typical content of ``ipaupgrade.log`` file is::
2017-08-07T07:36:50Z DEBUG Starting external process
2017-08-07T07:36:50Z DEBUG args=/bin/systemctl is-active [email protected]
2017-08-07T07:36:50Z DEBUG Process finished, return code=0
2017-08-07T07:36:50Z DEBUG stdout=active
2017-08-07T07:41:50Z ERROR IPA server upgrade failed: Inspect /var/log/ipaupgrade.log and run command ipa-server-upgrade manually.
Example:
>>> ipaupgradelog = shared[IpaupgradeLog]
>>> len(list(log.get('DEBUG')))
4
>>> from datetime import datetime
>>> len(log.get_after(datetime(2017, 8, 7, 7, 37, 30)))
1
"""<line_sep>time_format='%Y-%m-%dT%H:%M:%SZ'<block_end> |
# analog of list.py for regex tasks. Responsible for actually running the task.
<import_from_stmt>dreamcoder.domains.regex.makeRegexTasks makeOldTasks makeLongTasks makeShortTasks makeWordTasks makeNumberTasks makeHandPickedTasks makeNewTasks makeNewNumberTasks<import_from_stmt>dreamcoder.domains.regex.regexPrimitives basePrimitives altPrimitives easyWordsPrimitives alt2Primitives concatPrimitives reducedConcatPrimitives strConstConcatPrimitives PRC<import_from_stmt>dreamcoder.dreamcoder explorationCompression Task<import_from_stmt>dreamcoder.grammar Grammar<import_from_stmt>dreamcoder.likelihoodModel add_cutoff_values add_string_constants<import_from_stmt>dreamcoder.program Abstraction Application<import_from_stmt>dreamcoder.type tpregex<import_from_stmt>dreamcoder.utilities eprint flatten testTrainSplit POSITIVEINFINITY<import_stmt>random<import_stmt>math<import_stmt>pregex<as>pre<import_stmt>os<try_stmt><block_start><import_from_stmt>dreamcoder.recognition RecurrentFeatureExtractor JSONFeatureExtractor<class_stmt>LearnedFeatureExtractor(RecurrentFeatureExtractor)<block_start>H=64<line_sep>special='regex'<def_stmt>tokenize self examples<block_start><def_stmt>sanitize l<block_start><return>[z<if>z<in>self.lexicon<else>"?"<for>z_ l<for>z (z_<if>isinstance(z_ list)<else>[z_])]<block_end>tokenized=[]<for_stmt>xs,y examples<block_start><if_stmt>isinstance(y list)<block_start>y=["LIST_START"]+y+["LIST_END"]<block_end><else_stmt><block_start>y=[y]<block_end>y=sanitize(y)<if_stmt>len(y)<g>self.maximumLength<block_start><return><none><block_end>serializedInputs=[]<for_stmt>xi,x enumerate(xs)<block_start><if_stmt>isinstance(x list)<block_start>x=["LIST_START"]+x+["LIST_END"]<block_end><else_stmt><block_start>x=[x]<block_end>x=sanitize(x)<if_stmt>len(x)<g>self.maximumLength<block_start><return><none><block_end>serializedInputs.append(x)<block_end>tokenized.append((tuple(serializedInputs) y))<block_end><return>tokenized<block_end><def_stmt>__init__ self tasks testingTasks=[] cuda=<false><block_start>self.lexicon=set(flatten((t.examples<for>t tasks+testingTasks) abort=<lambda>x:isinstance(x str))).union({"LIST_START" "LIST_END" "?"})<line_sep>self.num_examples_list=[len(t.examples)<for>t tasks]<line_sep># Calculate the maximum length
self.maximumLength=POSITIVEINFINITY<line_sep>self.maximumLength=max(len(l)<for>t tasks+testingTasks<for>xs,y self.tokenize(t.examples)<for>l [y]+[x<for>x xs])<line_sep>super(LearnedFeatureExtractor self).__init__(lexicon=list(self.lexicon) tasks=tasks cuda=cuda H=self.H bidirectional=<true>)<line_sep>self.parallelTaskOfProgram=<false><block_end><def_stmt>taskOfProgram self p t#raise NotImplementedError
<block_start>num_examples=random.choice(self.num_examples_list)<line_sep>p=p.visit(ConstantInstantiateVisitor.SINGLE)<line_sep>preg=p.evaluate([])(pre.String(""))<line_sep>t=Task("Helm" t [(() list(preg.sample()))<for>_ range(num_examples)])<line_sep><return>t<block_end><block_end><block_end><except_stmt><block_start><pass><block_end>#in init: loop over tasks, save lengths,
<class_stmt>ConstantInstantiateVisitor(object)<block_start><def_stmt>__init__ self<block_start>self.regexes=[pre.create(".+") pre.create("\d+") pre.create("\w+") pre.create("\s+") pre.create("\\u+") pre.create("\l+")]<block_end><def_stmt>primitive self e<block_start><if_stmt>e.name<eq>"r_const"#return Primitive("STRING", e.tp, random.choice(self.words))
<block_start>s=random.choice(self.regexes).sample()#random string const
s=pre.String(s)<line_sep>e.value=PRC(s arity=0)<block_end><return>e<block_end><def_stmt>invented self e<block_start><return>e.body.visit(self)<block_end><def_stmt>index self e<block_start><return>e<block_end><def_stmt>application self e<block_start><return>Application(e.f.visit(self) e.x.visit(self))<block_end><def_stmt>abstraction self e<block_start><return>Abstraction(e.body.visit(self))<block_end><block_end>#TODO fix
<class_stmt>MyJSONFeatureExtractor(JSONFeatureExtractor)<block_start>N_EXAMPLES=5<def_stmt>_featuresOfProgram self program tp<block_start><try_stmt><block_start>preg=program.evaluate([])<line_sep># if 'left_paren' in program.show(False):
#eprint("string_pregex:", string_pregex)
#eprint("string_pregex:", string_pregex)
<block_end><except_stmt>IndexError# free variable
<block_start><return><none><block_end><except_stmt>Exception<as>e<block_start>eprint("Exception during evaluation:" e)<if_stmt>"Attempt to evaluate fragment variable"<in>e<block_start>eprint("program (bc fragment error)" program)<block_end><return><none><block_end>examples=[]<for_stmt>_ range(self.N_EXAMPLES<times>5)# oh this is arbitrary ig
<block_start><try_stmt><block_start>y=preg.sample()# TODO
#this line should keep inputs short, so that helmholtzbatch can be large
#allows it to try other samples
#(Could also return None off the bat... idk which is better)
#if len(y) > 20:
# continue
#eprint(tp, program, x, y)
examples.append(y)<block_end><except_stmt>BaseException<block_start>continues<block_end><if_stmt>len(examples)<ge>self.N_EXAMPLES<block_start><break><block_end><block_end><else_stmt><block_start><return><none><block_end><return>examples<block_end><block_end># changed to list_features(examples) from examples
<def_stmt>regex_options parser<block_start>parser.add_argument("--maxTasks" type=int default=500 help="truncate tasks to fit within this boundary")<line_sep>parser.add_argument("--maxExamples" type=int default=10 help="truncate number of examples per task to fit within this boundary")<line_sep>parser.add_argument("--tasks" default="long" help="which tasks to use" choices=["old" "short" "long" "words" "number" "handpicked" "new" "newNumber"])<line_sep>parser.add_argument("--primitives" default="concat" help="Which primitive set to use" choices=["base" "alt1" "easyWords" "alt2" "concat" "reduced" "strConst"])<line_sep>parser.add_argument("--extractor" type=str choices=["hand" "deep" "learned" "json"] default="learned")<line_sep># if i switch to json it breaks
parser.add_argument("--split" metavar="TRAIN_RATIO" type=float default=0.8 help="split test/train")<line_sep>parser.add_argument("-H" "--hidden" type=int default=256 help="number of hidden units")<line_sep>parser.add_argument("--likelihoodModel" default="probabilistic" help="likelihood Model" choices=["probabilistic" "all-or-nothing"])<line_sep>parser.add_argument("--topk_use_map" dest="topk_use_only_likelihood" action="store_false")<line_sep>parser.add_argument("--debug" dest="debug" action="store_true")<line_sep>parser.add_argument("--ll_cutoff" dest="use_ll_cutoff" nargs='*' default=<false> help="use ll cutoff for training tasks (for probabilistic likelihood model only). default is False,")<line_sep>parser.add_argument("--use_str_const" action="store_true" help="use string constants")<line_sep>"""parser.add_argument("--stardecay",
type=float,
dest="stardecay",
default=0.5,
help="p value for kleenestar and plus")"""<block_end># Lucas recommends putting a struct with the definitions of the primitives here.
# TODO:
# Build likelihood funciton
# modify NN
# make primitives
# make tasks
<def_stmt>main args<block_start>"""
Takes the return value of the `commandlineArguments()` function as input and
trains/tests the model on regular expressions.
"""<line_sep>#for dreaming
#parse use_ll_cutoff
use_ll_cutoff=args.pop('use_ll_cutoff')<if_stmt><not>use_ll_cutoff<is><false>#if use_ll_cutoff is a list of strings, then train_ll_cutoff and train_ll_cutoff
#will be tuples of that string followed by the actual model
<block_start><if_stmt>len(use_ll_cutoff)<eq>1<block_start>train_ll_cutoff=use_ll_cutoff[0]# make_cutoff_model(use_ll_cutoff[0], tasks))
test_ll_cutoff=use_ll_cutoff[0]# make_cutoff_model(use_ll_cutoff[0], tasks))
<block_end><else_stmt><block_start><assert_stmt>len(use_ll_cutoff)<eq>2<line_sep>train_ll_cutoff=use_ll_cutoff[0]#make_cutoff_model(use_ll_cutoff[0], tasks))
test_ll_cutoff=use_ll_cutoff[1]#make_cutoff_model(use_ll_cutoff[1], tasks))
<block_end><block_end><else_stmt><block_start>train_ll_cutoff=<none><line_sep>test_ll_cutoff=<none><block_end>regexTasks={"old":makeOldTasks "short":makeShortTasks "long":makeLongTasks "words":makeWordTasks "number":makeNumberTasks "handpicked":makeHandPickedTasks "new":makeNewTasks "newNumber":makeNewNumberTasks}[args.pop("tasks")]<line_sep>tasks=regexTasks()# TODO
eprint("Generated" len(tasks) "tasks")<line_sep>maxTasks=args.pop("maxTasks")<if_stmt>len(tasks)<g>maxTasks<block_start>eprint("Unwilling to handle {} tasks, truncating..".format(len(tasks)))<line_sep>seed=42# previously this was hardcoded and never changed
random.seed(seed)<line_sep>random.shuffle(tasks)<del_stmt>tasks[maxTasks:]<block_end>maxExamples=args.pop("maxExamples")<line_sep>split=args.pop("split")<line_sep>test,train=testTrainSplit(tasks split)<line_sep>eprint("Split tasks into %d/%d test/train"%(len(test) len(train)))<line_sep>test=add_cutoff_values(test test_ll_cutoff)<line_sep>train=add_cutoff_values(train train_ll_cutoff)<line_sep>eprint("added cutoff values to tasks, train: " train_ll_cutoff ", test:" test_ll_cutoff)<if_stmt>args.pop("use_str_const")<block_start><assert_stmt>args["primitives"]<eq>"strConst"<or>args["primitives"]<eq>"reduced"<line_sep>ConstantInstantiateVisitor.SINGLE=ConstantInstantiateVisitor()<line_sep>test=add_string_constants(test)<line_sep>train=add_string_constants(train)<line_sep>eprint("added string constants to test and train")<block_end><for_stmt>task test+train<block_start><if_stmt>len(task.examples)<g>maxExamples<block_start>task.examples=task.examples[:maxExamples]<block_end>task.specialTask=("regex" {"cutoff":task.ll_cutoff "str_const":task.str_const})<line_sep>task.examples=[(xs [y<for>y ys])<for>xs,ys task.examples]<line_sep>task.maxParameters=1<block_end># from list stuff
primtype=args.pop("primitives")<line_sep>prims={"base":basePrimitives "alt1":altPrimitives "alt2":alt2Primitives "easyWords":easyWordsPrimitives "concat":concatPrimitives "reduced":reducedConcatPrimitives "strConst":strConstConcatPrimitives}[primtype]<line_sep>extractor={"learned":LearnedFeatureExtractor "json":MyJSONFeatureExtractor}[args.pop("extractor")]<line_sep>extractor.H=args.pop("hidden")<line_sep>#stardecay = args.stardecay
#stardecay = args.pop('stardecay')
#decaystr = 'd' + str(stardecay)
<import_stmt>datetime<line_sep>timestamp=datetime.datetime.now().isoformat()<line_sep>outputDirectory="experimentOutputs/regex/%s"%timestamp<line_sep>os.system("mkdir -p %s"%outputDirectory)<line_sep>args.update({"featureExtractor":extractor "outputPrefix":"%s/regex"%(outputDirectory) "evaluationTimeout":0.005 "topk_use_only_likelihood":<true> "maximumFrontier":10 "compressor":args.get("compressor" "ocaml")})<line_sep>####
# use the
#prim_list = prims(stardecay)
prim_list=prims()<line_sep>specials=["r_kleene" "r_plus" "r_maybe" "r_alt" "r_concat"]<line_sep>n_base_prim=len(prim_list)-len(specials)<line_sep>productions=[(math.log(0.5/float(n_base_prim)) prim)<if>prim.name<not><in>specials<else>(math.log(0.10) prim)<for>prim prim_list]<line_sep>baseGrammar=Grammar.fromProductions(productions continuationType=tpregex)<line_sep>#baseGrammar = Grammar.uniform(prims())
#for i in range(100):
# eprint(baseGrammar.sample(tpregex))
#eprint(baseGrammar)
#explore
test_stuff=args.pop("debug")<if_stmt>test_stuff<block_start>eprint(baseGrammar)<line_sep>eprint("sampled programs from prior:")<for_stmt>i range(100)#100
<block_start>eprint(baseGrammar.sample(test[0].request maximumDepth=1000))<block_end>eprint("""half the probability mass is on higher-order primitives.
Therefore half of enumerated programs should have more than one node.
However, we do not observe this.
Instead we see a very small fraction of programs have more than one node.
So something seems to be wrong with grammar.sample.
Furthermore: observe the large print statement above.
This prints the candidates for sampleDistribution in grammar.sample.
the first element of each tuple is the probability passed into sampleDistribution.
Half of the probability mass should be on the functions, but instead they are equally
weighted with the constants. If you look at the grammar above, this is an error!!!!
""")<assert_stmt><false><block_end><del_stmt>args["likelihoodModel"]<line_sep>explorationCompression(baseGrammar train testingTasks=test **args)<block_end> |
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>logging<import_from_stmt>ufora.networking.ChannelDemuxer Message ChannelDemuxer<import_stmt>logging<class_stmt>DemuxedTransport(object)<block_start><def_stmt>__init__ self<block_start>self.clients={}<block_end><def_stmt>onMessageReceived_ self content channelId<block_start><try_stmt><block_start>channel=self.clients[channelId]<line_sep>message=Message(channel.channelGroup channelId channel.hostId channel.outgoingSequenceNumber content)<line_sep>channel.outgoingSequenceNumber<augadd>1<line_sep>self.onMessageReceived(ChannelDemuxer.encodeMessage(message))<block_end><except_stmt>Exception<block_start><import_stmt>traceback<line_sep>logging.error('ERROR: failed to dispatch received message\n%s'%traceback.format_exc())<block_end><block_end><block_end> |
# -*- coding: utf-8 -*-
<import_from_future_stmt> unicode_literals<import_from_future_stmt> division<import_from_stmt>. text<import_from_stmt>. letter<import_from_stmt>. checker<import_from_stmt>. josa<line_sep> |
# -*- coding: utf-8 -*-
# Time : 2021/7/25 13:59
# Author : QIN2DIM
# Github : https://github.com/QIN2DIM
# Description:
<import_stmt>json<import_stmt>os<import_from_stmt>datetime datetime<import_from_stmt>bs4 BeautifulSoup<import_from_stmt>selenium.common.exceptions StaleElementReferenceException WebDriverException <import_from_stmt>selenium.webdriver Chrome<import_from_stmt>src.BusinessCentralLayer.setting logger SERVER_DIR_DATABASE TIME_ZONE_CN<import_from_stmt>src.BusinessLogicLayer.cluster.master ActionMasterGeneral<class_stmt>SSPanelParser(ActionMasterGeneral)<block_start><def_stmt>__init__ self url silence=<false> assault=<true> anti_slider=<true><block_start>super(SSPanelParser self).__init__(url silence assault anti_slider=anti_slider )<line_sep>self.obj_parser={}<line_sep>self.cache_db_name="parser_cache"<line_sep>self.cache_db_path=self.create_cache_db(database_dir=SERVER_DIR_DATABASE)<block_end><def_stmt>create_cache_db self database_dir=<none><block_start>database_dir="database"<if>database_dir<is><none><else>database_dir<if_stmt><not>os.path.exists(database_dir)<block_start>os.mkdir(database_dir)<block_end>cache_db=os.path.join(database_dir self.cache_db_name)<if_stmt><not>os.path.exists(cache_db)<block_start>os.mkdir(cache_db)<block_end><return>cache_db<block_end><def_stmt>capture_cache self signs flow<block_start>output_path=os.path.join(self.cache_db_path signs)<with_stmt>open(output_path "w" encoding="utf8")<as>f<block_start>f.write(flow)<block_end><block_end><def_stmt>parse self **kwargs<block_start>"""
:return:
"""<line_sep>api:Chrome=kwargs.get("api")<line_sep>self.obj_parser.update({"parse_url":self.register_url})<line_sep># ----------------------------------------
# 解析可用流量和可用时长
# 优先调用,等待流体动画加载完成[耗时任务]
# 以保证后续解析无需等待
# ----------------------------------------
fluid=set()<line_sep>fluid_density=[]<line_sep>i=0<while_stmt><true><block_start><try_stmt><block_start>i<augadd>1<line_sep>card_body=api.find_elements_by_xpath("//div[@class='card-body']")[:2]<line_sep>card_body=[tag.text.strip()<for>tag card_body]<line_sep>fluid.update(card_body)<line_sep>fluid_density.append(len(fluid))<line_sep># 流体释放
<if_stmt>len(fluid_density)<l>10<or>len(fluid)<l>3<block_start><continue><block_end># 流体相对均衡
<if_stmt>max(fluid_density[:10])<eq>min(fluid_density[:10])<block_start>self.obj_parser.update({"time":card_body[0] "flow":card_body[-1]})<line_sep><break><block_end><block_end><except_stmt>StaleElementReferenceException<block_start><pass><block_end><block_end># 存储cookie
<with_stmt>open("123.json" "w" encoding="utf8")<as>f<block_start>f.write(json.dumps(api.get_cookies()))<block_end># 读取cookie
# cookie_json = " ".join([f"{i['name']}={i['value']};" for i in json.loads(f.read())])
# ----------------------------------------
# 解析站点名称
# ----------------------------------------
<try_stmt><block_start>parse_name=api.find_element_by_xpath("//aside//div[@class='sidebar-brand']").text.strip()<line_sep>self.obj_parser.update({"parse_name":parse_name})<block_end><except_stmt>WebDriverException<block_start>logger.error(f"<SSPanelParserError> Site name resolution failed -- {self.register_url}")<block_end># ----------------------------------------
# 解析站点公告
# ----------------------------------------
reference_links={}<try_stmt><block_start>card_body=api.find_elements_by_xpath("//div[@class='card-body']")[4]<line_sep>self.obj_parser.update({"desc":card_body.text.strip()})<line_sep>related_href=card_body.find_elements_by_tag_name("a")<for_stmt>tag related_href<block_start>href=tag.get_attribute("href")<if_stmt>href<block_start>href=href.strip()<if_stmt>"https"<not><in>href<block_start>href=f"{self.register_url}{href}"<block_end>href_desc=tag.text.strip()<if>tag.text<else>href<line_sep>reference_links.update({href:href_desc})<block_end><block_end>self.obj_parser.update({"reference_links":reference_links})<block_end><except_stmt>WebDriverException<block_start>logger.error(f"<SSPanelParserError> Site announcement parsing error -- {self.register_url}")<block_end># ----------------------------------------
# 解析[链接导入]
# ----------------------------------------
subscribes={}<line_sep>support=[]<try_stmt># 清洗订阅链接
<block_start>soup=BeautifulSoup(api.page_source "html.parser")<for_stmt>i soup.find_all("a")<block_start><if_stmt>i.get("data-clipboard-text")<block_start>subscribes.update({i.get("data-clipboard-text"):i.text.strip()})<block_end><block_end># 识别支持的订阅类型
buttons=api.find_elements_by_xpath("//div[@class='card'][2]//a")<for_stmt>tag buttons<block_start>support_=tag.get_attribute("class")<if_stmt>support_<block_start>support_=[i<for>i [i<for>i support_.split()<if>i.startswith("btn-")]<if>i<not><in>["btn-icon" "btn-primary" "btn-lg" "btn-round" "btn-progress" ]]<if_stmt>len(support_)<eq>1<block_start>class_name=support_[0].replace("btn-" "")<line_sep>support.append(class_name)<block_end><block_end><block_end># 残差补全
<for_stmt>tag subscribes.values()<block_start><if_stmt>"surge"<in>tag.lower()<block_start>support.append("surge")<block_end><if_stmt>"ssr"<in>tag.lower()<block_start>support.append("ssr")<block_end><block_end>self.obj_parser.update({"subscribes":subscribes "support":list(set(support))})<block_end><except_stmt>WebDriverException<block_start>logger.error(f"<SSPanelParserError> Site subscription resolution failed -- {self.register_url}")<block_end>self.obj_parser.update({"email":self.email "password":self.password "recently_login":datetime.now(tz=TIME_ZONE_CN) })<line_sep><return>self.obj_parser<block_end><def_stmt>parse_by_login self **kwargs<arrow>dict<block_start><return>self.seep("login" self.parse **kwargs)<block_end><def_stmt>parse_by_register self **kwargs<block_start><return>self.seep("register" self.parse **kwargs)<block_end><def_stmt>refresh_cookie self **kwargs<block_start><def_stmt>get_cookie <block_start>cookies=kwargs.get("api")<line_sep><return>json.dumps(cookies.get_cookies())<if>cookies<else>{}<block_end><return>self.seep("login" get_cookie **kwargs)<block_end><def_stmt>seep self method business **kwargs# 获取任务设置
<block_start>api=self.set_spider_option()<line_sep># 执行核心业务逻辑
<try_stmt><block_start>self.get_html_handle(api=api url=self.register_url wait_seconds=45)<if_stmt>method<eq>"login"<block_start>self.sign_in(api **kwargs)<block_end><elif_stmt>method<eq>"register"<block_start>self.sign_up(api)<block_end>self.wait(api 40 "//div[@class='card-body']")<line_sep>kwargs.setdefault("api" api)<line_sep><return>business(**kwargs)<block_end><finally_stmt><block_start>api.quit()<block_end><block_end><block_end> |
<import_stmt>click<import_from_stmt>virl.api ViewerPlugin NoPluginError<import_from_stmt>virl.api.github get_repos<import_from_stmt>virl.cli.views.search repo_table<line_sep>@click.command()@click.argument("query" required=<false>)@click.option("--org" default="virlfiles" required=<false> help="GitHub organization to search (default: virlfiles)")<def_stmt>search query=<none> **kwargs<block_start>"""
list topologies available via github
"""<line_sep>repos=get_repos(org=kwargs["org"] query=query)<if_stmt>query<is><not><none><block_start>click.secho("Displaying {} Results For {}".format(len(repos) query))<block_end><else_stmt><block_start>click.secho("Displaying {} Results".format(len(repos)))<block_end><try_stmt><block_start>pl=ViewerPlugin(viewer="search")<line_sep>pl.visualize(repos=repos)<block_end><except_stmt>NoPluginError<block_start>repo_table(repos)<block_end><block_end> |
"""
Customize the behavior of a fixture by allowing special code to be
executed before or after each test, and before or after each suite.
"""<import_from_future_stmt> absolute_import<import_stmt>os<import_stmt>sys<import_stmt>bson<import_stmt>pymongo<import_from_stmt>. fixtures<import_from_stmt>. testcases<import_from_stmt>.. errors<import_from_stmt>.. logging<import_from_stmt>.. utils<def_stmt>make_custom_behavior class_name *args **kwargs<block_start>"""
Factory function for creating CustomBehavior instances.
"""<if_stmt>class_name<not><in>_CUSTOM_BEHAVIORS<block_start><raise>ValueError("Unknown custom behavior class '%s'"%(class_name))<block_end><return>_CUSTOM_BEHAVIORS[class_name](*args **kwargs)<block_end><class_stmt>CustomBehavior(object)<block_start>"""
The common interface all CustomBehaviors will inherit from.
"""<line_sep>@staticmethod<def_stmt>start_dynamic_test test_case test_report<block_start>"""
If a CustomBehavior wants to add a test case that will show up
in the test report, it should use this method to add it to the
report, since we will need to count it as a dynamic test to get
the stats in the summary information right.
"""<line_sep>test_report.startTest(test_case dynamic=<true>)<block_end><def_stmt>__init__ self logger fixture<block_start>"""
Initializes the CustomBehavior with the specified fixture.
"""<if_stmt><not>isinstance(logger logging.Logger)<block_start><raise>TypeError("logger must be a Logger instance")<block_end>self.logger=logger<line_sep>self.fixture=fixture<block_end><def_stmt>before_suite self test_report<block_start>"""
The test runner calls this exactly once before they start
running the suite.
"""<line_sep><pass><block_end><def_stmt>after_suite self test_report<block_start>"""
The test runner calls this exactly once after all tests have
finished executing. Be sure to reset the behavior back to its
original state so that it can be run again.
"""<line_sep><pass><block_end><def_stmt>before_test self test_report<block_start>"""
Each test will call this before it executes.
Raises a TestFailure if the test should be marked as a failure,
or a ServerFailure if the fixture exits uncleanly or
unexpectedly.
"""<line_sep><pass><block_end><def_stmt>after_test self test_report<block_start>"""
Each test will call this after it executes.
Raises a TestFailure if the test should be marked as a failure,
or a ServerFailure if the fixture exits uncleanly or
unexpectedly.
"""<line_sep><pass><block_end><block_end><class_stmt>CleanEveryN(CustomBehavior)<block_start>"""
Restarts the fixture after it has ran 'n' tests.
On mongod-related fixtures, this will clear the dbpath.
"""<line_sep>DEFAULT_N=20<def_stmt>__init__ self logger fixture n=DEFAULT_N<block_start>CustomBehavior.__init__(self logger fixture)<line_sep># Try to isolate what test triggers the leak by restarting the fixture each time.
<if_stmt>"detect_leaks=1"<in>os.getenv("ASAN_OPTIONS" "")<block_start>self.logger.info("ASAN_OPTIONS environment variable set to detect leaks, so restarting"<concat>" the fixture after each test instead of after every %d." n)<line_sep>n=1<block_end>self.n=n<line_sep>self.tests_run=0<block_end><def_stmt>after_test self test_report<block_start>self.tests_run<augadd>1<if_stmt>self.tests_run<ge>self.n<block_start>self.logger.info("%d tests have been run against the fixture, stopping it..." self.tests_run)<line_sep>self.tests_run=0<line_sep>teardown_success=self.fixture.teardown()<line_sep>self.logger.info("Starting the fixture back up again...")<line_sep>self.fixture.setup()<line_sep>self.fixture.await_ready()<line_sep># Raise this after calling setup in case --continueOnFailure was specified.
<if_stmt><not>teardown_success<block_start><raise>errors.TestFailure("%s did not exit cleanly"%(self.fixture))<block_end><block_end><block_end><block_end><class_stmt>CheckReplDBHash(CustomBehavior)<block_start>"""
Waits for replication after each test, then checks that the dbhahses
of all databases other than "local" match on the primary and all of
the secondaries. If any dbhashes do not match, logs information
about what was different (e.g. Different numbers of collections,
missing documents in a collection, mismatching documents, etc).
Compatible only with ReplFixture subclasses.
"""<def_stmt>__init__ self logger fixture<block_start><if_stmt><not>isinstance(fixture fixtures.ReplFixture)<block_start><raise>TypeError("%s does not support replication"%(fixture.__class__.__name__))<block_end>CustomBehavior.__init__(self logger fixture)<line_sep>self.test_case=testcases.TestCase(self.logger "Hook" "#dbhash#")<line_sep>self.started=<false><block_end><def_stmt>after_test self test_report<block_start>"""
After each test, check that the dbhash of the test database is
the same on all nodes in the replica set or master/slave
fixture.
"""<try_stmt><block_start><if_stmt><not>self.started<block_start>CustomBehavior.start_dynamic_test(self.test_case test_report)<line_sep>self.started=<true><block_end># Wait until all operations have replicated.
self.fixture.await_repl()<line_sep>success=<true><line_sep>sb=[]# String builder.
primary=self.fixture.get_primary()<line_sep>primary_conn=utils.new_mongo_client(port=primary.port)<for_stmt>secondary self.fixture.get_secondaries()<block_start>read_preference=pymongo.ReadPreference.SECONDARY<line_sep>secondary_conn=utils.new_mongo_client(port=secondary.port read_preference=read_preference)<line_sep># Skip arbiters.
<if_stmt>secondary_conn.admin.command("isMaster").get("arbiterOnly" <false>)<block_start><continue><block_end>all_matched=CheckReplDBHash._check_all_db_hashes(primary_conn secondary_conn sb)<if_stmt><not>all_matched<block_start>sb.insert(0 "One or more databases were different between the primary on port %d"<concat>" and the secondary on port %d:"%(primary.port secondary.port))<block_end>success=all_matched<and>success<block_end><if_stmt><not>success# Adding failures to a TestReport requires traceback information, so we raise
# a 'self.test_case.failureException' that we will catch ourselves.
<block_start>self.test_case.logger.info("\n ".join(sb))<line_sep><raise>self.test_case.failureException("The dbhashes did not match")<block_end><block_end><except_stmt>self.test_case.failureException<as>err<block_start>self.test_case.logger.exception("The dbhashes did not match.")<line_sep>self.test_case.return_code=1<line_sep>test_report.addFailure(self.test_case sys.exc_info())<line_sep>test_report.stopTest(self.test_case)<line_sep><raise>errors.ServerFailure(err.args[0])<block_end><except_stmt>pymongo.errors.WTimeoutError<block_start>self.test_case.logger.exception("Awaiting replication timed out.")<line_sep>self.test_case.return_code=2<line_sep>test_report.addError(self.test_case sys.exc_info())<line_sep>test_report.stopTest(self.test_case)<line_sep><raise>errors.StopExecution("Awaiting replication timed out")<block_end><block_end><def_stmt>after_suite self test_report<block_start>"""
If we get to this point, the #dbhash# test must have been
successful, so add it to the test report.
"""<if_stmt>self.started<block_start>self.test_case.logger.info("The dbhashes matched for all tests.")<line_sep>self.test_case.return_code=0<line_sep>test_report.addSuccess(self.test_case)<line_sep># TestReport.stopTest() has already been called if there was a failure.
test_report.stopTest(self.test_case)<block_end>self.started=<false><block_end>@staticmethod<def_stmt>_check_all_db_hashes primary_conn secondary_conn sb<block_start>"""
Returns true if for each non-local database, the dbhash command
returns the same MD5 hash on the primary as it does on the
secondary. Returns false otherwise.
Logs a message describing the differences if any database's
dbhash did not match.
"""<line_sep># Overview of how we'll check that everything replicated correctly between these two nodes:
#
# - Check whether they have the same databases.
# - If not, log which databases are missing where, and dump the contents of any that are
# missing.
#
# - Check whether each database besides "local" gives the same md5 field as the result of
# running the dbhash command.
# - If not, check whether they have the same collections.
# - If not, log which collections are missing where, and dump the contents of any
# that are missing.
# - If so, check that the hash of each non-capped collection matches.
# - If any do not match, log the diff of the collection between the two nodes.
success=<true><if_stmt><not>CheckReplDBHash._check_dbs_present(primary_conn secondary_conn sb)<block_start><return><false><block_end><for_stmt>db_name primary_conn.database_names()<block_start><if_stmt>db_name<eq>"local"<block_start><continue><block_end># We don't expect this to match across different nodes.
matched=CheckReplDBHash._check_db_hash(primary_conn secondary_conn db_name sb)<line_sep>success=matched<and>success<block_end><return>success<block_end>@staticmethod<def_stmt>_check_dbs_present primary_conn secondary_conn sb<block_start>"""
Returns true if the list of databases on the primary is
identical to the list of databases on the secondary, and false
otherwise.
"""<line_sep>success=<true><line_sep>primary_dbs=primary_conn.database_names()<line_sep># Can't run database_names() on secondary, so instead use the listDatabases command.
# TODO: Use database_names() once PYTHON-921 is resolved.
list_db_output=secondary_conn.admin.command("listDatabases")<line_sep>secondary_dbs=[db["name"]<for>db list_db_output["databases"]]<line_sep># There may be a difference in databases which is not considered an error, when
# the database only contains system collections. This difference is only logged
# when others are encountered, i.e., success = False.
missing_on_primary,missing_on_secondary=CheckReplDBHash._check_difference(set(primary_dbs) set(secondary_dbs) "database")<for_stmt>missing_db missing_on_secondary<block_start>db=primary_conn[missing_db]<line_sep>coll_names=db.collection_names()<line_sep>non_system_colls=[name<for>name coll_names<if><not>name.startswith("system.")]<line_sep># It is only an error if there are any non-system collections in the database,
# otherwise it's not well defined whether they should exist or not.
<if_stmt>non_system_colls<block_start>sb.append("Database %s present on primary but not on secondary."%(missing_db))<line_sep>CheckReplDBHash._dump_all_collections(db non_system_colls sb)<line_sep>success=<false><block_end><block_end><for_stmt>missing_db missing_on_primary<block_start>db=secondary_conn[missing_db]<line_sep># Can't run collection_names() on secondary, so instead use the listCollections command.
# TODO: Always use collection_names() once PYTHON-921 is resolved. Then much of the
# logic that is duplicated here can be consolidated.
list_coll_output=db.command("listCollections")["cursor"]["firstBatch"]<line_sep>coll_names=[coll["name"]<for>coll list_coll_output]<line_sep>non_system_colls=[name<for>name coll_names<if><not>name.startswith("system.")]<line_sep># It is only an error if there are any non-system collections in the database,
# otherwise it's not well defined if it should exist or not.
<if_stmt>non_system_colls<block_start>sb.append("Database %s present on secondary but not on primary."%(missing_db))<line_sep>CheckReplDBHash._dump_all_collections(db non_system_colls sb)<line_sep>success=<false><block_end><block_end><return>success<block_end>@staticmethod<def_stmt>_check_db_hash primary_conn secondary_conn db_name sb<block_start>"""
Returns true if the dbhash for 'db_name' matches on the primary
and the secondary, and false otherwise.
Appends a message to 'sb' describing the differences if the
dbhashes do not match.
"""<line_sep>primary_hash=primary_conn[db_name].command("dbhash")<line_sep>secondary_hash=secondary_conn[db_name].command("dbhash")<if_stmt>primary_hash["md5"]<eq>secondary_hash["md5"]<block_start><return><true><block_end>success=CheckReplDBHash._check_dbs_eq(primary_conn secondary_conn primary_hash secondary_hash db_name sb)<if_stmt><not>success<block_start>sb.append("Database %s has a different hash on the primary and the secondary"<concat>" ([ %s ] != [ %s ]):"%(db_name primary_hash["md5"] secondary_hash["md5"]))<block_end><return>success<block_end>@staticmethod<def_stmt>_check_dbs_eq primary_conn secondary_conn primary_hash secondary_hash db_name sb<block_start>"""
Returns true if all non-capped collections had the same hash in
the dbhash response, and false otherwise.
Appends information to 'sb' about the differences between the
'db_name' database on the primary and the 'db_name' database on
the secondary, if any.
"""<line_sep>success=<true><line_sep>primary_db=primary_conn[db_name]<line_sep>secondary_db=secondary_conn[db_name]<line_sep>primary_coll_hashes=primary_hash["collections"]<line_sep>secondary_coll_hashes=secondary_hash["collections"]<line_sep>primary_coll_names=set(primary_coll_hashes.keys())<line_sep>secondary_coll_names=set(secondary_coll_hashes.keys())<line_sep>missing_on_primary,missing_on_secondary=CheckReplDBHash._check_difference(primary_coll_names secondary_coll_names "collection" sb=sb)<if_stmt>missing_on_primary<or>missing_on_secondary# 'sb' already describes which collections are missing where.
<block_start><for_stmt>coll_name missing_on_primary<block_start>CheckReplDBHash._dump_all_documents(secondary_db coll_name sb)<block_end><for_stmt>coll_name missing_on_secondary<block_start>CheckReplDBHash._dump_all_documents(primary_db coll_name sb)<block_end><return><block_end><for_stmt>coll_name primary_coll_names&secondary_coll_names<block_start>primary_coll_hash=primary_coll_hashes[coll_name]<line_sep>secondary_coll_hash=secondary_coll_hashes[coll_name]<if_stmt>primary_coll_hash<eq>secondary_coll_hash<block_start><continue><block_end># Ignore capped collections because they are not expected to match on all nodes.
<if_stmt>primary_db.command({"collStats":coll_name})["capped"]# Still fail if the collection is not capped on the secondary.
<block_start><if_stmt><not>secondary_db.command({"collStats":coll_name})["capped"]<block_start>success=<false><line_sep>sb.append("%s.%s collection is capped on primary but not on secondary."%(primary_db.name coll_name))<block_end>sb.append("%s.%s collection is capped, ignoring."%(primary_db.name coll_name))<line_sep><continue><block_end># Still fail if the collection is capped on the secondary, but not on the primary.
<elif_stmt>secondary_db.command({"collStats":coll_name})["capped"]<block_start>success=<false><line_sep>sb.append("%s.%s collection is capped on secondary but not on primary."%(primary_db.name coll_name))<line_sep><continue><block_end>success=<false><line_sep>sb.append("Collection %s.%s has a different hash on the primary and the secondary"<concat>" ([ %s ] != [ %s ]):"%(db_name coll_name primary_coll_hash secondary_coll_hash))<line_sep>CheckReplDBHash._check_colls_eq(primary_db secondary_db coll_name sb)<block_end><if_stmt>success<block_start>sb.append("All collections that were expected to match did.")<block_end><return>success<block_end>@staticmethod<def_stmt>_check_colls_eq primary_db secondary_db coll_name sb<block_start>"""
Appends information to 'sb' about the differences or between
the 'coll_name' collection on the primary and the 'coll_name'
collection on the secondary, if any.
"""<line_sep>codec_options=bson.CodecOptions(document_class=TypeSensitiveSON)<line_sep>primary_coll=primary_db.get_collection(coll_name codec_options=codec_options)<line_sep>secondary_coll=secondary_db.get_collection(coll_name codec_options=codec_options)<line_sep>primary_docs=CheckReplDBHash._extract_documents(primary_coll)<line_sep>secondary_docs=CheckReplDBHash._extract_documents(secondary_coll)<line_sep>CheckReplDBHash._get_collection_diff(primary_docs secondary_docs sb)<block_end>@staticmethod<def_stmt>_extract_documents collection<block_start>"""
Returns a list of all documents in the collection, sorted by
their _id.
"""<line_sep><return>[doc<for>doc collection.find().sort("_id" pymongo.ASCENDING)]<block_end>@staticmethod<def_stmt>_get_collection_diff primary_docs secondary_docs sb<block_start>"""
Returns true if the documents in 'primary_docs' exactly match
the documents in 'secondary_docs', and false otherwise.
Appends information to 'sb' about what matched or did not match.
"""<line_sep>matched=<true><line_sep># These need to be lists instead of sets because documents aren't hashable.
missing_on_primary=[]<line_sep>missing_on_secondary=[]<line_sep>p_idx=0# Keep track of our position in 'primary_docs'.
s_idx=0# Keep track of our position in 'secondary_docs'.
<while_stmt>p_idx<l>len(primary_docs)<and>s_idx<l>len(secondary_docs)<block_start>primary_doc=primary_docs[p_idx]<line_sep>secondary_doc=secondary_docs[s_idx]<if_stmt>primary_doc<eq>secondary_doc<block_start>p_idx<augadd>1<line_sep>s_idx<augadd>1<line_sep><continue><block_end># We have mismatching documents.
matched=<false><if_stmt>primary_doc["_id"]<eq>secondary_doc["_id"]<block_start>sb.append("Mismatching document:")<line_sep>sb.append(" primary: %s"%(primary_doc))<line_sep>sb.append(" secondary: %s"%(secondary_doc))<line_sep>p_idx<augadd>1<line_sep>s_idx<augadd>1<block_end># One node was missing a document. Since the documents are sorted by _id, the doc with
# the smaller _id was the one that was skipped.
<elif_stmt>primary_doc["_id"]<l>secondary_doc["_id"]<block_start>missing_on_secondary.append(primary_doc)<line_sep># Only move past the doc that we know was skipped.
p_idx<augadd>1<block_end><else_stmt># primary_doc["_id"] > secondary_doc["_id"]
<block_start>missing_on_primary.append(secondary_doc)<line_sep># Only move past the doc that we know was skipped.
s_idx<augadd>1<block_end><block_end># Check if there are any unmatched documents left.
<while_stmt>p_idx<l>len(primary_docs)<block_start>matched=<false><line_sep>missing_on_secondary.append(primary_docs[p_idx])<line_sep>p_idx<augadd>1<block_end><while_stmt>s_idx<l>len(secondary_docs)<block_start>matched=<false><line_sep>missing_on_primary.append(secondary_docs[s_idx])<line_sep>s_idx<augadd>1<block_end><if_stmt><not>matched<block_start>CheckReplDBHash._append_differences(missing_on_primary missing_on_secondary "document" sb)<block_end><else_stmt><block_start>sb.append("All documents matched.")<block_end><block_end>@staticmethod<def_stmt>_check_difference primary_set secondary_set item_type_name sb=<none><block_start>"""
Returns true if the contents of 'primary_set' and
'secondary_set' are identical, and false otherwise. The sets
contain information about the primary and secondary,
respectively, e.g. the database names that exist on each node.
Appends information about anything that differed to 'sb'.
"""<line_sep>missing_on_primary=set()<line_sep>missing_on_secondary=set()<for_stmt>item primary_set-secondary_set<block_start>missing_on_secondary.add(item)<block_end><for_stmt>item secondary_set-primary_set<block_start>missing_on_primary.add(item)<block_end><if_stmt>sb<is><not><none><block_start>CheckReplDBHash._append_differences(missing_on_primary missing_on_secondary item_type_name sb)<block_end><return>(missing_on_primary missing_on_secondary)<block_end>@staticmethod<def_stmt>_append_differences missing_on_primary missing_on_secondary item_type_name sb<block_start>"""
Given two iterables representing items that were missing on the
primary or the secondary respectively, append the information
about which items were missing to 'sb', if any.
"""<if_stmt>missing_on_primary<block_start>sb.append("The following %ss were present on the secondary, but not on the"<concat>" primary:"%(item_type_name))<for_stmt>item missing_on_primary<block_start>sb.append(str(item))<block_end><block_end><if_stmt>missing_on_secondary<block_start>sb.append("The following %ss were present on the primary, but not on the"<concat>" secondary:"%(item_type_name))<for_stmt>item missing_on_secondary<block_start>sb.append(str(item))<block_end><block_end><block_end>@staticmethod<def_stmt>_dump_all_collections database coll_names sb<block_start>"""
Appends the contents of each of the collections in 'coll_names'
to 'sb'.
"""<if_stmt>coll_names<block_start>sb.append("Database %s contains the following collections: %s"%(database.name coll_names))<for_stmt>coll_name coll_names<block_start>CheckReplDBHash._dump_all_documents(database coll_name sb)<block_end><block_end><else_stmt><block_start>sb.append("No collections in database %s."%(database.name))<block_end><block_end>@staticmethod<def_stmt>_dump_all_documents database coll_name sb<block_start>"""
Appends the contents of 'coll_name' to 'sb'.
"""<line_sep>docs=CheckReplDBHash._extract_documents(database[coll_name])<if_stmt>docs<block_start>sb.append("Documents in %s.%s:"%(database.name coll_name))<for_stmt>doc docs<block_start>sb.append(" %s"%(doc))<block_end><block_end><else_stmt><block_start>sb.append("No documents in %s.%s."%(database.name coll_name))<block_end><block_end><block_end><class_stmt>TypeSensitiveSON(bson.SON)<block_start>"""
Extends bson.SON to perform additional type-checking of document values
to differentiate BSON types.
"""<def_stmt>items_with_types self<block_start>"""
Returns a list of triples. Each triple consists of a field name, a
field value, and a field type for each field in the document.
"""<line_sep><return>[(key self[key] type(self[key]))<for>key self]<block_end><def_stmt>__eq__ self other<block_start>"""
Comparison to another TypeSensitiveSON is order-sensitive and
type-sensitive while comparison to a regular dictionary ignores order
and type mismatches.
"""<if_stmt>isinstance(other TypeSensitiveSON)<block_start><return>(len(self)<eq>len(other)<and>self.items_with_types()<eq>other.items_with_types())<block_end><raise>TypeError("TypeSensitiveSON objects cannot be compared to other types")<block_end><block_end><class_stmt>ValidateCollections(CustomBehavior)<block_start>"""
Runs full validation (db.collection.validate(true)) on all collections
in all databases on every standalone, or primary mongod. If validation
fails (validate.valid), then the validate return object is logged.
Compatible with all subclasses.
"""<line_sep>DEFAULT_FULL=<true><line_sep>DEFAULT_SCANDATA=<true><def_stmt>__init__ self logger fixture full=DEFAULT_FULL scandata=DEFAULT_SCANDATA<block_start>CustomBehavior.__init__(self logger fixture)<if_stmt><not>isinstance(full bool)<block_start><raise>TypeError("Fixture option full is not specified as type bool")<block_end><if_stmt><not>isinstance(scandata bool)<block_start><raise>TypeError("Fixture option scandata is not specified as type bool")<block_end>self.test_case=testcases.TestCase(self.logger "Hook" "#validate#")<line_sep>self.started=<false><line_sep>self.full=full<line_sep>self.scandata=scandata<block_end><def_stmt>after_test self test_report<block_start>"""
After each test, run a full validation on all collections.
"""<try_stmt><block_start><if_stmt><not>self.started<block_start>CustomBehavior.start_dynamic_test(self.test_case test_report)<line_sep>self.started=<true><block_end>sb=[]# String builder.
# The self.fixture.port can be used for client connection to a
# standalone mongod, a replica-set primary, or mongos.
# TODO: Run collection validation on all nodes in a replica-set.
port=self.fixture.port<line_sep>conn=utils.new_mongo_client(port=port)<line_sep>success=ValidateCollections._check_all_collections(conn sb self.full self.scandata)<if_stmt><not>success# Adding failures to a TestReport requires traceback information, so we raise
# a 'self.test_case.failureException' that we will catch ourselves.
<block_start>self.test_case.logger.info("\n ".join(sb))<line_sep><raise>self.test_case.failureException("Collection validation failed")<block_end><block_end><except_stmt>self.test_case.failureException<as>err<block_start>self.test_case.logger.exception("Collection validation failed")<line_sep>self.test_case.return_code=1<line_sep>test_report.addFailure(self.test_case sys.exc_info())<line_sep>test_report.stopTest(self.test_case)<line_sep><raise>errors.ServerFailure(err.args[0])<block_end><block_end><def_stmt>after_suite self test_report<block_start>"""
If we get to this point, the #validate# test must have been
successful, so add it to the test report.
"""<if_stmt>self.started<block_start>self.test_case.logger.info("Collection validation passed for all tests.")<line_sep>self.test_case.return_code=0<line_sep>test_report.addSuccess(self.test_case)<line_sep># TestReport.stopTest() has already been called if there was a failure.
test_report.stopTest(self.test_case)<block_end>self.started=<false><block_end>@staticmethod<def_stmt>_check_all_collections conn sb full scandata<block_start>"""
Returns true if for all databases and collections validate_collection
succeeds. Returns false otherwise.
Logs a message if any database's collection fails validate_collection.
"""<line_sep>success=<true><for_stmt>db_name conn.database_names()<block_start><for_stmt>coll_name conn[db_name].collection_names()<block_start><try_stmt><block_start>conn[db_name].validate_collection(coll_name full=full scandata=scandata)<block_end><except_stmt>pymongo.errors.CollectionInvalid<as>err<block_start>sb.append("Database %s, collection %s failed to validate:\n%s"%(db_name coll_name err.args[0]))<line_sep>success=<false><block_end><block_end><block_end><return>success<block_end><block_end>_CUSTOM_BEHAVIORS={"CleanEveryN":CleanEveryN "CheckReplDBHash":CheckReplDBHash "ValidateCollections":ValidateCollections }<line_sep> |
_base_=['../_base_/base_openvino_dynamic-800x1344.py']<line_sep> |
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
<import_from_future_stmt> unicode_literals<import_from_stmt>elasticsearch Elasticsearch<import_from_stmt>..test_cases DummyTransportTestCase<class_stmt>TestClient(DummyTransportTestCase)<block_start><def_stmt>test_request_timeout_is_passed_through_unescaped self<block_start>self.client.ping(request_timeout=0.1)<line_sep>calls=self.assert_url_called("HEAD" "/")<assert_stmt>[({"request_timeout":0.1} {} <none>)]<eq>calls<block_end><def_stmt>test_params_is_copied_when self<block_start>rt=object()<line_sep>params=dict(request_timeout=rt)<line_sep>self.client.ping(params=params)<line_sep>self.client.ping(params=params)<line_sep>calls=self.assert_url_called("HEAD" "/" 2)<assert_stmt>[({"request_timeout":rt} {} <none>) ({"request_timeout":rt} {} <none>) ]<eq>calls<assert_stmt><not>(calls[0][0]<is>calls[1][0])<block_end><def_stmt>test_headers_is_copied_when self<block_start>hv="value"<line_sep>headers=dict(Authentication=hv)<line_sep>self.client.ping(headers=headers)<line_sep>self.client.ping(headers=headers)<line_sep>calls=self.assert_url_called("HEAD" "/" 2)<assert_stmt>[({} {"authentication":hv} <none>) ({} {"authentication":hv} <none>) ]<eq>calls<assert_stmt><not>(calls[0][0]<is>calls[1][0])<block_end><def_stmt>test_from_in_search self<block_start>self.client.search(index="i" from_=10)<line_sep>calls=self.assert_url_called("POST" "/i/_search")<assert_stmt>[({"from":"10"} {} <none>)]<eq>calls<block_end><def_stmt>test_repr_contains_hosts self<block_start><assert_stmt>"<Elasticsearch([{}])>"<eq>repr(self.client)<block_end><def_stmt>test_repr_subclass self<block_start><class_stmt>OtherElasticsearch(Elasticsearch)<block_start><pass><block_end><assert_stmt>"<OtherElasticsearch([{}])>"<eq>repr(OtherElasticsearch())<block_end><def_stmt>test_repr_contains_hosts_passed_in self<block_start><assert_stmt>"es.org"<in>repr(Elasticsearch(["es.org:123"]))<block_end><def_stmt>test_repr_truncates_host_to_5 self<block_start>hosts=[{"host":"es"+str(i)}<for>i range(10)]<line_sep>es=Elasticsearch(hosts)<assert_stmt>"es5"<not><in>repr(es)<assert_stmt>"..."<in>repr(es)<block_end><def_stmt>test_index_uses_post_if_id_is_empty self<block_start>self.client.index(index="my-index" id="" body={})<line_sep>self.assert_url_called("POST" "/my-index/_doc")<block_end><def_stmt>test_index_uses_put_if_id_is_not_empty self<block_start>self.client.index(index="my-index" id=0 body={})<line_sep>self.assert_url_called("PUT" "/my-index/_doc/0")<block_end><block_end> |
<import_from_stmt>fastapi HTTPException<import_from_stmt>fastapi.responses StreamingResponse<import_from_stmt>sh docker_compose<import_stmt>os<import_stmt>yaml<import_stmt>pathlib<import_stmt>shutil<import_stmt>docker<import_stmt>io<import_stmt>zipfile<import_from_stmt>api.settings Settings<import_from_stmt>api.utils.compose find_yml_files<line_sep>settings=Settings()<line_sep>"""
Runs an action on the specified compose project.
"""<def_stmt>compose_action name action<block_start>files=find_yml_files(settings.COMPOSE_DIR)<line_sep>compose=get_compose(name)<line_sep>env=os.environ.copy()<if_stmt>action<eq>"up"<block_start><try_stmt><block_start>_action=docker_compose(action "-d" _cwd=os.path.dirname(compose["path"]) _env=check_dockerhost(env) )<block_end><except_stmt>Exception<as>exc<block_start><if_stmt>hasattr(exc "stderr")<block_start><raise>HTTPException(400 exc.stderr.decode("UTF-8").rstrip())<block_end><else_stmt><block_start><raise>HTTPException(400 exc)<block_end><block_end><block_end><elif_stmt>action<eq>"create"<block_start><try_stmt><block_start>_action=docker_compose("up" "--no-start" _cwd=os.path.dirname(compose["path"]) _env=check_dockerhost(env) )<block_end><except_stmt>Exception<as>exc<block_start><if_stmt>hasattr(exc "stderr")<block_start><raise>HTTPException(400 exc.stderr.decode("UTF-8").rstrip())<block_end><else_stmt><block_start><raise>HTTPException(400 exc)<block_end><block_end><block_end><else_stmt><block_start><try_stmt><block_start>_action=docker_compose(action _cwd=os.path.dirname(compose["path"]) _env=check_dockerhost(env) )<block_end><except_stmt>Exception<as>exc<block_start><if_stmt>hasattr(exc "stderr")<block_start><raise>HTTPException(400 exc.stderr.decode("UTF-8").rstrip())<block_end><else_stmt><block_start><raise>HTTPException(400 exc)<block_end><block_end><block_end><if_stmt>_action.stdout.decode("UTF-8").rstrip()<block_start>_output=_action.stdout.decode("UTF-8").rstrip()<block_end><elif_stmt>_action.stderr.decode("UTF-8").rstrip()<block_start>_output=_action.stderr.decode("UTF-8").rstrip()<block_end><else_stmt><block_start>_output="No Output"<block_end>print(f"""Project {compose['name']} {action} successful.""")<line_sep>print(f"""Output: """)<line_sep>print(_output)<line_sep><return>get_compose_projects()<block_end>"""
Used to include the DOCKER_HOST in the shell env
when someone ups a compose project or returns a
useless var to just clear the shell env.
"""<def_stmt>check_dockerhost environment<block_start><if_stmt>environment.get("DOCKER_HOST")<block_start><return>{"DOCKER_HOST":environment["DOCKER_HOST"]}<block_end><else_stmt><block_start><return>{"clear_env":"true"}<block_end><block_end>"""
Used to run docker-compose commands on specific
apps in compose projects.
"""<def_stmt>compose_app_action name action app <block_start>files=find_yml_files(settings.COMPOSE_DIR)<line_sep>compose=get_compose(name)<line_sep>env=os.environ.copy()<line_sep>print("RUNNING: "+compose["path"]+" docker-compose "+" "+action+" "+app)<if_stmt>action<eq>"up"<block_start><try_stmt><block_start>_action=docker_compose("up" "-d" app _cwd=os.path.dirname(compose["path"]) _env=check_dockerhost(env) )<block_end><except_stmt>Exception<as>exc<block_start><if_stmt>hasattr(exc "stderr")<block_start><raise>HTTPException(400 exc.stderr.decode("UTF-8").rstrip())<block_end><else_stmt><block_start><raise>HTTPException(400 exc)<block_end><block_end><block_end><elif_stmt>action<eq>"create"<block_start><try_stmt><block_start>_action=docker_compose("up" "--no-start" app _cwd=os.path.dirname(compose["path"]) _env=check_dockerhost(env) )<block_end><except_stmt>Exception<as>exc<block_start><if_stmt>hasattr(exc "stderr")<block_start><raise>HTTPException(400 exc.stderr.decode("UTF-8").rstrip())<block_end><else_stmt><block_start><raise>HTTPException(400 exc)<block_end><block_end><block_end><elif_stmt>action<eq>"rm"<block_start><try_stmt><block_start>_action=docker_compose("rm" "--force" "--stop" app _cwd=os.path.dirname(compose["path"]) _env=check_dockerhost(env) )<block_end><except_stmt>Exception<as>exc<block_start><if_stmt>hasattr(exc "stderr")<block_start><raise>HTTPException(400 exc.stderr.decode("UTF-8").rstrip())<block_end><else_stmt><block_start><raise>HTTPException(400 exc)<block_end><block_end><block_end><else_stmt><block_start><try_stmt><block_start>_action=docker_compose(action app _cwd=os.path.dirname(compose["path"]) _env=check_dockerhost(env) )<block_end><except_stmt>Exception<as>exc<block_start><if_stmt>hasattr(exc "stderr")<block_start><raise>HTTPException(400 exc.stderr.decode("UTF-8").rstrip())<block_end><else_stmt><block_start><raise>HTTPException(400 exc)<block_end><block_end><block_end><if_stmt>_action.stdout.decode("UTF-8").rstrip()<block_start>output=_action.stdout.decode("UTF-8").rstrip()<block_end><elif_stmt>_action.stderr.decode("UTF-8").rstrip()<block_start>output=_action.stderr.decode("UTF-8").rstrip()<block_end><else_stmt><block_start>output="No Output"<block_end>print(f"""Project {compose['name']} App {name} {action} successful.""")<line_sep>print(f"""Output: """)<line_sep>print(output)<line_sep><return>get_compose_projects()<block_end>"""
Checks for compose projects in the COMPOSE_DIR and
returns most of the info inside them.
"""<def_stmt>get_compose_projects <block_start>files=find_yml_files(settings.COMPOSE_DIR)<line_sep>projects=[]<for_stmt>project,file files.items()<block_start>volumes=[]<line_sep>networks=[]<line_sep>services={}<line_sep>compose=open(file)<line_sep>loaded_compose=yaml.load(compose Loader=yaml.SafeLoader)<if_stmt>loaded_compose<block_start><if_stmt>loaded_compose.get("volumes")<block_start><for_stmt>volume loaded_compose.get("volumes")<block_start>volumes.append(volume)<block_end><block_end><if_stmt>loaded_compose.get("networks")<block_start><for_stmt>network loaded_compose.get("networks")<block_start>networks.append(network)<block_end><block_end><if_stmt>loaded_compose.get("services")<block_start><for_stmt>service loaded_compose.get("services")<block_start>services[service]=loaded_compose["services"][service]<block_end><block_end>_project={"name":project "path":file "version":loaded_compose.get("version" "3.9") "services":services "volumes":volumes "networks":networks }<line_sep>projects.append(_project)<block_end><else_stmt><block_start>print("ERROR: "+file+" is invalid or empty!")<block_end><block_end><return>projects<block_end>"""
Returns detailed information on a specific compose
project.
"""<def_stmt>get_compose name<block_start><try_stmt><block_start>files=find_yml_files(settings.COMPOSE_DIR+name)<block_end><except_stmt>Exception<as>exc<block_start><raise>HTTPException(exc.status_code exc.detail)<block_end><for_stmt>project,file files.items()<block_start><if_stmt>name<eq>project<block_start>networks=[]<line_sep>volumes=[]<line_sep>services={}<line_sep>compose=open(file)<try_stmt><block_start>loaded_compose=yaml.load(compose Loader=yaml.SafeLoader)<block_end><except_stmt>yaml.scanner.ScannerError<as>exc<block_start><raise>HTTPException(422 f"{exc.problem_mark.line}:{exc.problem_mark.column} - {exc.problem}")<block_end><if_stmt>loaded_compose.get("volumes")<block_start><for_stmt>volume loaded_compose.get("volumes")<block_start>volumes.append(volume)<block_end><block_end><if_stmt>loaded_compose.get("networks")<block_start><for_stmt>network loaded_compose.get("networks")<block_start>networks.append(network)<block_end><block_end><if_stmt>loaded_compose.get("services")<block_start><for_stmt>service loaded_compose.get("services")<block_start>services[service]=loaded_compose["services"][service]<block_end><block_end>_content=open(file)<line_sep>content=_content.read()<line_sep>compose_object={"name":project "path":file "version":loaded_compose.get("version" "-") "services":services "volumes":volumes "networks":networks "content":content }<line_sep><return>compose_object<block_end><block_end><else_stmt><block_start><raise>HTTPException(404 "Project "+name+" not found")<block_end><block_end>"""
Creates a compose directory (if one isn't there
already) with the name of the project. Then writes
the content of compose.content to it.
"""<def_stmt>write_compose compose<block_start><if_stmt><not>os.path.exists(settings.COMPOSE_DIR+compose.name)<block_start><try_stmt><block_start>pathlib.Path(settings.COMPOSE_DIR+compose.name).mkdir(parents=<true>)<block_end><except_stmt>Exception<as>exc<block_start><raise>HTTPException(exc.status_code exc.detail)<block_end><block_end><with_stmt>open(settings.COMPOSE_DIR+compose.name+"/docker-compose.yml" "w")<as>f<block_start><try_stmt><block_start>f.write(compose.content)<line_sep>f.close()<block_end><except_stmt>TypeError<as>exc<block_start><if_stmt>exc.args[0]<eq>"write() argument must be str, not None"<block_start><raise>HTTPException(status_code=422 detail="Compose file cannot be empty.")<block_end><block_end><except_stmt>Exception<as>exc<block_start><raise>HTTPException(exc.status_code exc.detail)<block_end><block_end><return>get_compose(name=compose.name)<block_end>"""
Deletes a compose project after checking to see if
it exists. This also deletes all files in the folder.
"""<def_stmt>delete_compose project_name<block_start><if_stmt><not>os.path.exists("/"+settings.COMPOSE_DIR+project_name)<block_start><raise>HTTPException(404 "Project directory not found.")<block_end><elif_stmt><not>os.path.exists("/"+settings.COMPOSE_DIR+project_name+"/docker-compose.yml")<block_start><raise>HTTPException(404 "Project docker-compose.yml not found.")<block_end><else_stmt><block_start><try_stmt><block_start><with_stmt>open("/"+settings.COMPOSE_DIR+project_name+"/docker-compose.yml")<block_start><pass><block_end><block_end><except_stmt>OSError<as>exc<block_start><raise>HTTPException(400 exc.strerror)<block_end><block_end><try_stmt><block_start>shutil.rmtree("/"+settings.COMPOSE_DIR+project_name)<block_end><except_stmt>Exception<as>exc<block_start><raise>HTTPException(exc.status_code exc.strerror)<block_end><return>get_compose_projects()<block_end><def_stmt>generate_support_bundle project_name<block_start>files=find_yml_files(settings.COMPOSE_DIR+project_name)<if_stmt>project_name<in>files<block_start>dclient=docker.from_env()<line_sep>stream=io.BytesIO()<with_stmt>zipfile.ZipFile(stream "w")<as>zf open(files[project_name] "r")<as>fp<block_start>compose=yaml.load(fp Loader=yaml.SafeLoader)<line_sep># print(compose)
# print(compose.get("services"))
<for_stmt>_service compose.get("services")<block_start>print()<if_stmt>len(compose.get("services").keys())<l>2<block_start><try_stmt><block_start><if_stmt>compose.get("services")[_service].get("container_name")<block_start>service=dclient.containers.get(compose.get("services")[_service].get("container_name"))<block_end><else_stmt><block_start>service=dclient.containers.get(_service)<block_end><block_end><except_stmt>docker.errors.NotFound<as>exc<block_start><raise>HTTPException(exc.status_code detail="container "+_service+" not found" )<block_end><block_end><else_stmt><block_start><try_stmt><block_start><if_stmt>compose.get("services")[_service].get("container_name")<block_start>service=dclient.containers.get(compose.get("services")[_service].get("container_name"))<block_end><else_stmt><block_start>service=dclient.containers.get(project_name.lower()+"_"+_service+"_1")<block_end><block_end><except_stmt>docker.errors.NotFound<as>exc<block_start><raise>HTTPException(exc.status_code detail="container "+_service+" not found" )<block_end><block_end>service_log=service.logs()<line_sep>zf.writestr(f"{_service}.log" service_log)<block_end>fp.seek(0)<line_sep># It is possible that ".write(...)" has better memory management here.
zf.writestr("docker-compose.yml" fp.read())<block_end>stream.seek(0)<line_sep><return>StreamingResponse(stream media_type="application/x-zip-compressed" headers={"Content-Disposition":f"attachment;filename={project_name}_bundle.zip"} )<block_end><else_stmt><block_start><raise>HTTPException(404 f"Project {project_name} not found.")<block_end><block_end> |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
<import_from_stmt>numpy.testing assert_allclose<import_from_stmt>astropy.coordinates Angle<import_from_stmt>gammapy.data observatory_locations<def_stmt>test_observatory_locations <block_start>location=observatory_locations["hess"]<line_sep>assert_allclose(location.lon.deg Angle("16d30m00.8s").deg)<line_sep>assert_allclose(location.lat.deg Angle("-23d16m18.4s").deg)<line_sep>assert_allclose(location.height.value 1835)<assert_stmt>str(location.height.unit)<eq>"m"<block_end> |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains training and sampling functions an autoregressive model."""<import_stmt>functools<import_from_stmt>typing Any Callable<import_from_stmt>absl logging<import_from_stmt>flax linen<as>nn<import_from_stmt>flax struct<import_from_stmt>flax.training common_utils<import_stmt>jax<import_stmt>jax.numpy<as>jnp<import_stmt>ml_collections<import_stmt>numpy<as>np<import_from_stmt>autoregressive_diffusion.model distributions<import_from_stmt>autoregressive_diffusion.model.autoregressive_diffusion ardm_utils<import_from_stmt>autoregressive_diffusion.utils util_fns<def_stmt>cross_entropy logits targets<block_start>"""Compute weighted cross entropy and entropy for log probs and targets.
Args:
logits: [batch, length, num_classes] float array.
targets: categorical targets [batch, length] int array.
Returns:
Tuple of scalar loss and batch normalizing factor.
"""<if_stmt>logits.ndim<ne>targets.ndim+1<block_start><raise>ValueError('Incorrect shapes. Got shape %s logits and %s targets'%(str(logits.shape) str(targets.shape)))<block_end>vocab_size=logits.shape[-1]<line_sep>onehot_targets=common_utils.onehot(targets vocab_size)<line_sep>loss=-jnp.sum(onehot_targets<times>nn.log_softmax(logits) axis=-1)<line_sep>d=np.prod(targets.shape[1:])<line_sep>loss=util_fns.sum_except_batch(loss)/d/np.log(2)<line_sep><return>loss<block_end><def_stmt>compute_accuracy logits targets<block_start>"""Compute weighted accuracy for log probs and targets.
Args:
logits: [batch, length, num_classes] float array.
targets: categorical targets [batch, length] int array.
Returns:
Tuple of scalar loss and batch normalizing factor.
"""<if_stmt>logits.shape[:-1]<ne>targets.shape[:-1]<block_start><raise>ValueError('Incorrect shapes. Got shape %s logits and %s targets'%(str(logits.shape) str(targets.shape)))<block_end>logits=logits[: : <none> :]# Insert empty channel axis.
d=float(np.prod(logits.shape[1:-1]))<line_sep>acc=jnp.equal(jnp.argmax(logits axis=-1) targets)/d<line_sep>acc=util_fns.sum_except_batch(acc)<line_sep><return>acc<block_end><class_stmt>ARM(struct.PyTreeNode)<block_start>"""Static model object that wraps important model functions."""<line_sep>config:ml_collections.config_dict.config_dict.ConfigDict<line_sep>apply_fn:Callable[Ellipsis Any]<line_sep>logprob_fn:Callable[Ellipsis Any]<line_sep>sample_fn:Callable[Ellipsis Any]<line_sep>neural_net:Any<line_sep>num_steps:int<line_sep>policy_support:bool=<false><line_sep>num_stages:int=1<line_sep>absorbing_state:int=0<line_sep>random_order:bool=<false><def_stmt>log_px self rng params x train context=<none><block_start>batch_size=x.shape[0]<if_stmt>self.random_order<block_start>logging.info('Log-likelihood for a random-order ARM XLNet style.')<line_sep>rng,rng_perm=jax.random.split(rng)<line_sep>permutations=ardm_utils.get_batch_permutations(rng_perm batch_size self.num_steps)<block_end><else_stmt><block_start>logging.info('Log-likelihood for a standard ARM.')<line_sep>permutations=<none><block_end>net_out=self.apply_fn({'params':params} x t=<none> mask=<none> train=train context=context permutations=permutations rngs={'dropout':rng}<if>train<else><none>)<line_sep>d=float(np.prod(net_out.shape[1:-1]))<line_sep>log_px_elementwise=util_fns.sum_except_batch(self.logprob_fn(x net_out))<line_sep>log_px=log_px_elementwise/d/np.log(2)<line_sep>neg_acc=-compute_accuracy(logits=net_out targets=x)<line_sep>t_batch_dummy=jnp.zeros((batch_size ) dtype=jnp.int32)<line_sep>loss_components_dummy=jnp.zeros((batch_size ))<line_sep><return>log_px loss_components_dummy neg_acc t_batch_dummy<block_end><def_stmt>elbo self rng params x train context=<none><block_start><return>self.log_px(rng params x train context)<block_end><def_stmt>sample self rng params batch_size context=<none><block_start>chain_sharded=self.p_sample(rng params batch_size context)<line_sep>chain=chain_sharded.reshape(chain_sharded.shape[0] batch_size *chain_sharded.shape[3:])<line_sep><return>chain<block_end>@functools.partial(jax.pmap in_axes=(<none> <none> 0 <none> 0) out_axes=1 static_broadcasted_argnums=(0 3) axis_name='batch')<def_stmt>p_sample self rng params batch_size context<block_start>"""Samples from the model, calls sample_step for every timestep."""<line_sep>rng=jax.random.fold_in(rng jax.lax.axis_index('batch'))<assert_stmt>batch_size%jax.local_device_count()<eq>0<line_sep>per_device_batch_size=batch_size<floordiv>jax.local_device_count()<line_sep>logging.info('Sampling from model, hope you are patient...')<if_stmt>self.random_order<block_start>rng,rng_perm=jax.random.split(rng)<line_sep>orders=ardm_utils.get_batch_permutations(rng_perm per_device_batch_size self.num_steps)<block_end><else_stmt><block_start>orders=jnp.arange(0 self.num_steps)[<none> :]<line_sep>orders=jnp.repeat(orders repeats=per_device_batch_size axis=0)<block_end>chain=[]<line_sep>x=jnp.full((per_device_batch_size *self.config.data_shape) fill_value=self.absorbing_state dtype=jnp.int32)<line_sep>chain.append(x)<def_stmt>next_sample_step x t<block_start>x=self.sample_step(jax.random.fold_in(rng t) x t orders params context)<line_sep><return>x x<block_end>ts=jnp.arange(self.num_steps)<line_sep>_,chain=jax.lax.scan(next_sample_step init=x xs=ts)<line_sep><return>chain<block_end><def_stmt>get_naive_policy self budget=250<block_start><assert_stmt>budget<le>self.num_steps<line_sep># We use budget+1 because a linspace contains the last step.
naive_policy=ardm_utils.integer_linspace(0 self.num_steps budget+1)<line_sep># Last index does not need to be in policy.
naive_policy=naive_policy[:-1]<line_sep><return>naive_policy<block_end><def_stmt>sample_with_naive_policy self rng params batch_size budget=250<block_start>logging.info('Sampling with naive policy.')<line_sep>naive_policy=self.get_naive_policy(budget)<line_sep><return>self.sample_with_policy(rng params batch_size naive_policy)<block_end><def_stmt>sample_with_policy self rng params batch_size policy<block_start>"""Wrapper for p_sample_with_policy that takes care of unsharding."""<line_sep>logging.info('Sampling from model (quickly)...')<line_sep>chain_sharded=self.p_sample_with_policy(rng params batch_size policy)<line_sep>chain=chain_sharded.reshape(chain_sharded.shape[0] batch_size *chain_sharded.shape[3:])<line_sep><return>chain<block_end>@functools.partial(jax.pmap in_axes=(<none> <none> 0 <none> <none>) out_axes=1 static_broadcasted_argnums=(0 3) axis_name='batch')<def_stmt>p_sample_with_policy self rng params batch_size policy<block_start>"""Samples from the model, calls sample_step for every policy step."""<line_sep>rng=jax.random.fold_in(rng jax.lax.axis_index('batch'))<assert_stmt>batch_size%jax.local_device_count()<eq>0<line_sep>per_device_batch_size=batch_size<floordiv>jax.local_device_count()<line_sep>rng,rng_perm=jax.random.split(rng)<line_sep>sigmas=ardm_utils.get_batch_permutations(rng_perm per_device_batch_size self.num_steps)<line_sep>policy_extended=jnp.concatenate([policy jnp.array([self.num_steps] dtype=jnp.int32)] axis=0)<line_sep>x=jnp.full((per_device_batch_size *self.config.data_shape) fill_value=self.absorbing_state dtype=jnp.int32)<def_stmt>next_sample_step x idx<block_start>left_t=policy_extended[idx]<line_sep>right_t=policy_extended[idx+1]<line_sep>x=self.sample_step_with_policy(jax.random.fold_in(rng idx) x left_t right_t sigmas params)<line_sep><return>x x<block_end>x,chain=jax.lax.scan(next_sample_step x jnp.arange(len(policy)))<line_sep><return>chain<block_end><def_stmt>sample_step_with_policy self rng x left_t right_t sigmas params<block_start>"""Sampling code for a single step starting at left_t until right_t."""<line_sep>batch_size=x.shape[0]<line_sep>left_t=jnp.full(batch_size fill_value=left_t)<line_sep>right_t=jnp.full(batch_size fill_value=right_t)<line_sep>prev_selection,current_selection=ardm_utils.get_selections_for_sigma_and_range(sigmas left_t right_t self.config.data_shape)<line_sep>params_px=self.apply_fn({'params':params} x left_t prev_selection train=<false>)<line_sep>new_x=self.sample_fn(rng params_px)<line_sep>x=(1-current_selection)<times>x+current_selection<times>new_x<line_sep>x=jnp.asarray(x jnp.int32)<line_sep><return>x<block_end><def_stmt>sample_step self rng x t sigmas params context<block_start>"""Sampling code for a single step t."""<line_sep>batch_size=x.shape[0]<line_sep>t_batch=jnp.full(batch_size fill_value=t)<line_sep>prev_selection,current_selection=ardm_utils.get_selection_for_sigma_and_t(sigmas t_batch self.config.data_shape)<if_stmt>self.random_order<block_start>permutations=sigmas<block_end><else_stmt><block_start>permutations=<none><block_end>params_px=self.apply_fn({'params':params} x t_batch prev_selection train=<false> context=context permutations=permutations)<line_sep>new_x=self.sample_fn(rng params_px)<line_sep>x=(1-current_selection)<times>x+current_selection<times>new_x<line_sep>x=jnp.asarray(x jnp.int32)<line_sep><return>x<block_end><def_stmt>init_architecture self init_rng tmp_x tmp_t context=<none><block_start>tmp_mask=<none><if_stmt>context<is><none><block_start><return>self.neural_net.init(init_rng tmp_x tmp_t tmp_mask train=<false>)<block_end><else_stmt><block_start><return>self.neural_net.init(init_rng tmp_x tmp_t tmp_mask train=<false> context=context)<block_end><block_end>@classmethod<def_stmt>create cls config get_architecture random_order<block_start>"""Creates a new instance with `step=0` and initialized `opt_state`."""<line_sep>required_num_outputs=config.num_classes<line_sep>num_steps=int(np.prod(config.data_shape))<line_sep># We set num_steps=0 since this disables time conditioning, which is not
# necessary for ARMs.
neural_net=get_architecture(config.num_classes required_num_outputs num_steps=0 is_causal=<true>)<line_sep>out_dist=distributions.SoftmaxCategorical(config.data_shape[-1] config.num_classes)<line_sep><return>cls(config apply_fn=neural_net.apply logprob_fn=out_dist.log_prob sample_fn=out_dist.sample neural_net=neural_net num_steps=num_steps random_order=random_order)<block_end><block_end> |
<import_from_stmt>maigret.errors CheckError<import_from_stmt>maigret.notify QueryNotifyPrint<import_from_stmt>maigret.result QueryStatus QueryResult<def_stmt>test_notify_illegal <block_start>n=QueryNotifyPrint(color=<false>)<assert_stmt>(n.update(QueryResult(username="test" status=QueryStatus.ILLEGAL site_name="TEST_SITE" site_url_user="http://example.com/test" ))<eq>"[-] TEST_SITE: Illegal Username Format For This Site!")<block_end><def_stmt>test_notify_claimed <block_start>n=QueryNotifyPrint(color=<false>)<assert_stmt>(n.update(QueryResult(username="test" status=QueryStatus.CLAIMED site_name="TEST_SITE" site_url_user="http://example.com/test" ))<eq>"[+] TEST_SITE: http://example.com/test")<block_end><def_stmt>test_notify_available <block_start>n=QueryNotifyPrint(color=<false>)<assert_stmt>(n.update(QueryResult(username="test" status=QueryStatus.AVAILABLE site_name="TEST_SITE" site_url_user="http://example.com/test" ))<eq>"[-] TEST_SITE: Not found!")<block_end><def_stmt>test_notify_unknown <block_start>n=QueryNotifyPrint(color=<false>)<line_sep>result=QueryResult(username="test" status=QueryStatus.UNKNOWN site_name="TEST_SITE" site_url_user="http://example.com/test" )<line_sep>result.error=CheckError('Type' 'Reason')<assert_stmt>n.update(result)<eq>"[?] TEST_SITE: Type error: Reason"<block_end> |
<import_from_future_stmt> absolute_import division print_function<line_sep># LIBTBX_SET_DISPATCHER_NAME phenix.fake_f_obs
<import_from_stmt>cctbx adptbx<import_from_stmt>cctbx.array_family flex<import_stmt>random math sys os<import_stmt>iotbx.pdb<import_stmt>mmtbx.utils<import_from_stmt>libtbx easy_run<import_stmt>mmtbx.dynamics.cartesian_dynamics<as>cartesian_dynamics<import_from_stmt>mmtbx monomer_library<import_stmt>mmtbx.monomer_library.pdb_interpretation<import_stmt>mmtbx.monomer_library.server<import_from_stmt>mmtbx.tls ladp<import_from_stmt>mmtbx.utils run_reduce_with_timeout<import_stmt>mmtbx.tls.tools<import_stmt>mmtbx.f_model<import_stmt>iotbx.phil<import_stmt>mmtbx.masks<import_from_stmt>libtbx.utils Sorry<import_from_stmt>six.moves range<import_stmt>mmtbx.model<if_stmt>(1)<block_start>random.seed(0)<line_sep>flex.set_random_seed(0)<block_end>master_params_str="""\
f_obs {
high_resolution = 2.0
.type = float
low_resolution = 15.0
.type = float
scattering_table = wk1995 it1992 *n_gaussian neutron
f_calc {
atomic_model {
ensemble_size = 20
.type = int
add_hydrogens = False
.type = bool
tls {
max_tl = 2
.type = float
min_tl = 0
.type = float
}
apply_cartesian_dynamics = True
.type = bool
regularize_geometry {
rmsd_bonds_target = 0.025
.type = float
rmsd_angles_target = 2.5
.type = float
}
ladp_angle = 3.0
.type = float
switch_rotamers = True
.type = bool
shake_sites_rmsd = 0.01
.type = float
rigid_body_shift {
rotation_angle = 1.0
.type = float
translation_length = 0.1
.type = float
}
stop_cartesian_dynamics_at_diff = 0.5
.type = float
use_ramachandran_plot_restraints = True
.type = bool
output_file_name = fake_model.pdb
.type = str
}
accuracy {
include scope mmtbx.f_model.sf_and_grads_accuracy_master_params
}
}
f_bulk {
k_sol = 0.35
.type = float
b_sol = 50.0
.type = float
mask {
include scope mmtbx.masks.mask_master_params
}
}
overall_scale = 1.0
overall_anisotropic_scale_matrix_b_cart {
max = 10
.type = float
min = 0
.type = float
}
experimental_noise {
add_random_error_to_amplitudes_percent = 5
.type = float
}
output_file_name = fake_f_obs.mtz
.type = str
}
"""<class_stmt>show(object)<block_start><def_stmt>__init__ self xrs xrs_start grm prefix=""<block_start>esg=grm.energies_sites(sites_cart=xrs.sites_cart() compute_gradients=<false>).geometry<line_sep>self.bond_rmsd=esg.bond_deviations()[2]<line_sep>self.angle_rmsd=esg.angle_deviations()[2]<line_sep>self.error=flex.mean(xrs.distances(other=xrs_start))<line_sep>print(" %s err=%8.3f rmsd: bonds=%6.3f angles=%6.3f"%(prefix self.error self.bond_rmsd self.angle_rmsd))<block_end><block_end><def_stmt>switch_rotamers xray_structure pdb_hierarchy<block_start>x=xray_structure.deep_copy_scatterers()<line_sep>p=pdb_hierarchy.deep_copy()<line_sep>p.atoms().reset_i_seq()<line_sep>p=mmtbx.utils.switch_rotamers(pdb_hierarchy=p mode="min_distant")<line_sep>x.set_sites_cart(sites_cart=p.atoms().extract_xyz())<line_sep><return>x p<block_end><def_stmt>set_ladp xray_structure pdb_hierarchy angle<block_start>axes_and_atoms_i_seqs=ladp.get_axes_and_atoms_i_seqs(pdb_hierarchy=pdb_hierarchy mon_lib_srv=monomer_library.server.server())<line_sep>xray_structure=xray_structure.set_b_iso(value=random.randrange(5 10))<line_sep>xray_structure.convert_to_isotropic()<line_sep>xray_structure=ladp.set_ladp(xray_structure=xray_structure axes_and_atoms_i_seqs=axes_and_atoms_i_seqs value=angle enable_recursion=<true> depth=0)<line_sep><return>xray_structure<block_end><def_stmt>random_aniso_adp space_group unit_cell u_scale=2 u_min=0<block_start><return>adptbx.u_star_as_u_cart(unit_cell space_group.average_u_star(u_star=adptbx.u_cart_as_u_star(unit_cell adptbx.random_u_cart(u_scale=u_scale u_min=u_min))))<block_end><def_stmt>apply_tls xray_structure params<block_start>uc=xray_structure.unit_cell()<line_sep>sg=xray_structure.space_group()<line_sep>selections_1d=flex.bool(xray_structure.scatterers().size() <true>)<line_sep>selections=[selections_1d.iselection()]<line_sep>T=random_aniso_adp(space_group=sg unit_cell=uc u_scale=params.max_tl u_min=params.min_tl)<line_sep>L=random_aniso_adp(space_group=sg unit_cell=uc u_scale=params.max_tl u_min=params.min_tl)<line_sep>print(" T: %s"%",".join([("%7.3f"%i).strip()<for>i T]))<line_sep>print(" L: %s"%",".join([("%7.3f"%i).strip()<for>i L]))<line_sep>tlsos=mmtbx.tls.tools.generate_tlsos(selections=selections xray_structure=xray_structure T=[T] L=[L] S=[[0 0 0 0 0 0 0 0 0]])<line_sep>u_cart_from_tls=mmtbx.tls.tools.u_cart_from_tls(sites_cart=xray_structure.sites_cart() selections=selections tlsos=tlsos)<line_sep>xray_structure.convert_to_anisotropic()<line_sep>u_cart=xray_structure.scatterers().extract_u_cart(uc)<line_sep>utot=u_cart_from_tls+u_cart<line_sep>xray_structure.set_u_cart(u_cart=utot selection=selections_1d.iselection())<line_sep>xray_structure.tidy_us()<line_sep><return>xray_structure<block_end><def_stmt>apply_rigid_body_shift xray_structure params<block_start><import_stmt>scitbx.matrix<line_sep>mt=flex#.mersenne_twister(seed=0)
rot_axis=scitbx.matrix.col(mt.random_double_point_on_sphere())<line_sep>rot_matrix=scitbx.math.r3_rotation_axis_and_angle_as_matrix(axis=rot_axis angle=params.rotation_angle deg=<true>)<line_sep>run_away_counter=0<while_stmt><true><block_start>transl=mt.random_double_point_on_sphere()<line_sep>transl_no_cont_sh=scitbx.matrix.col(xray_structure.crystal_symmetry().subtract_continuous_allowed_origin_shifts(translation_cart=transl))<line_sep>l=abs(transl_no_cont_sh)<if_stmt>(l<g>0.1)<block_start><break><block_end>run_away_counter<augadd>1<assert_stmt>run_away_counter<l>100<block_end>transl=transl_no_cont_sh<times>(params.translation_length/l)<line_sep>sites_cart=xray_structure.sites_cart()<line_sep>cm=xray_structure.center_of_mass()<line_sep>ns=rot_matrix<times>(sites_cart-cm)+transl+cm<line_sep>xray_structure.set_sites_cart(sites_cart=rot_matrix<times>(sites_cart-cm)+transl+cm)<line_sep><return>xray_structure<block_end><def_stmt>simulate_f_obs root crystal_symmetry params<block_start>f_calc_data=<none><line_sep>f_masks_data=[]<for_stmt>i_m,m enumerate(root.models())<block_start>raw_records=flex.std_string()<line_sep>raw_records.append(iotbx.pdb.format_cryst1_record(crystal_symmetry=crystal_symmetry))<for_stmt>atom m.atoms()<block_start>ra=atom.format_atom_record()<line_sep>ru=atom.format_anisou_record()<line_sep>raw_records.append(ra[:])<line_sep>raw_records.append(ru[:])<block_end>xrs=iotbx.pdb.input(lines=raw_records source_info=<none>).xray_structure_simple()<if_stmt>(i_m<eq>0)<block_start>dummy=abs(xrs.structure_factors(d_min=params.f_obs.high_resolution).f_calc())<line_sep>dummy=dummy.resolution_filter(d_max=params.f_obs.low_resolution)<block_end>fmodel=mmtbx.f_model.manager(f_obs=dummy xray_structure=xrs mask_params=params.f_obs.f_bulk.mask sf_and_grads_accuracy_params=params.f_obs.f_calc.accuracy)<line_sep>fcd=fmodel.f_calc().data()<line_sep>fms=fmodel.f_masks()<if_stmt>(i_m<eq>0)<block_start>f_calc_data=fcd<line_sep>f_masks_data=[]<for_stmt>f fms<block_start>f_masks_data.append(f.data())<block_end><block_end><else_stmt><block_start>f_calc_data<augadd>fcd<line_sep>fmsks=fms<assert_stmt>len(f_masks_data)<eq>len(fmsks)<for_stmt>ifmd range(len(f_masks_data))<block_start>f_masks_data[ifmd]<augadd>fmsks[ifmd].data()<block_end><block_end><block_end>fcalc_average=fmodel.f_obs().array(data=f_calc_data)<line_sep>f_masks_data_average=[]<for_stmt>f f_masks_data<block_start>f_masks_data_average.append(fmodel.f_obs().array(data=f/len(root.models())))<block_end>b_cart=<none><if_stmt>([params.f_obs.overall_anisotropic_scale_matrix_b_cart.max params.f_obs.overall_anisotropic_scale_matrix_b_cart.min].count(<none>)<eq>0)<block_start>b_cart=random_aniso_adp(space_group=crystal_symmetry.space_group() unit_cell=crystal_symmetry.unit_cell() u_scale=params.f_obs.overall_anisotropic_scale_matrix_b_cart.max u_min=params.f_obs.overall_anisotropic_scale_matrix_b_cart.min)<line_sep>print("\noverall_anisotropic_scale_matrix_b_cart: %s"%",".join([("%7.3f"%i).strip()<for>i b_cart]))<block_end>fmodel=mmtbx.f_model.manager(f_obs=dummy f_calc=fcalc_average f_mask=f_masks_data_average k_sol=params.f_obs.f_bulk.k_sol b_sol=params.f_obs.f_bulk.b_sol b_cart=b_cart)<line_sep>#
f_obs=abs(fmodel.f_model())<line_sep>f_obs.set_observation_type_xray_amplitude()<line_sep>mtz_dataset=f_obs.as_mtz_dataset(column_root_label="F(ake)obs")<line_sep>r_free_flags=f_obs.generate_r_free_flags()<line_sep>mtz_dataset.add_miller_array(miller_array=r_free_flags column_root_label="R-free-flags")<line_sep>mtz_object=mtz_dataset.mtz_object()<line_sep>mtz_object.write(file_name=params.f_obs.output_file_name)<block_end><def_stmt>regularize_geometry xray_structure restraints_manager params<block_start><import_from_stmt>mmtbx.refinement geometry_minimization<as>gm<import_stmt>scitbx.lbfgs<line_sep>sites_cart=xray_structure.sites_cart()<line_sep>minimized=gm.lbfgs(sites_cart=sites_cart correct_special_position_tolerance=1.0 geometry_restraints_manager=restraints_manager.geometry geometry_restraints_flags=gm.geometry_restraints.flags.flags(default=<true>) rmsd_bonds_termination_cutoff=params.rmsd_bonds_target rmsd_angles_termination_cutoff=params.rmsd_angles_target lbfgs_termination_params=scitbx.lbfgs.termination_parameters(max_iterations=500))<line_sep>xray_structure=xray_structure.replace_sites_cart(new_sites=sites_cart)<line_sep><return>xray_structure<block_end><def_stmt>cd xray_structure restraints_manager params<block_start>gradients_calculator=cartesian_dynamics.gradients_calculator_reciprocal_space(restraints_manager=restraints_manager sites_cart=xray_structure.sites_cart() wc=1)<line_sep>cartesian_dynamics.run(gradients_calculator=gradients_calculator xray_structure=xray_structure temperature=3000 n_steps=500000 time_step=0.0005 initial_velocities_zero_fraction=0 n_print=100 stop_cm_motion=<true> log=<none> stop_at_diff=params.stop_cartesian_dynamics_at_diff verbose=-1)<block_end><def_stmt>loop_2 params xray_structure pdb_hierarchy restraints_manager root<block_start>print("model:")<line_sep>amp=params.f_obs.f_calc.atomic_model<line_sep>grm=restraints_manager<line_sep>xrs=xray_structure.deep_copy_scatterers()<line_sep>show(xrs=xrs xrs_start=xrs grm=grm prefix="start:")<line_sep>xrs_sh=xrs.deep_copy_scatterers()<if_stmt>(amp.shake_sites_rmsd<is><not><none>)<block_start>xrs_sh.shake_sites_in_place(rms_difference=amp.shake_sites_rmsd)<block_end><if_stmt>(amp.apply_cartesian_dynamics)<block_start>cd(xray_structure=xrs_sh restraints_manager=grm params=amp)<line_sep>show(xrs=xrs_sh xrs_start=xrs grm=grm prefix="cd: ")<block_end><if_stmt>([amp.regularize_geometry.rmsd_bonds_target amp.regularize_geometry.rmsd_angles_target].count(<none>)<eq>0)<block_start>xrs_sh=regularize_geometry(xray_structure=xrs_sh restraints_manager=grm params=amp.regularize_geometry)<line_sep>show(xrs=xrs_sh xrs_start=xrs grm=grm prefix="min: ")<block_end><if_stmt>(amp.ladp_angle<is><not><none>)<block_start>xrs_sh=set_ladp(xray_structure=xrs_sh pdb_hierarchy=pdb_hierarchy angle=amp.ladp_angle)<block_end><if_stmt>([amp.tls.max_tl amp.tls.min_tl].count(<none>)<eq>0)<block_start>xrs_sh=apply_tls(xray_structure=xrs_sh params=amp.tls)<block_end><if_stmt>([amp.rigid_body_shift.rotation_angle amp.rigid_body_shift.translation_length].count(<none>)<eq>0)<block_start>xrs_sh=apply_rigid_body_shift(xray_structure=xrs_sh params=amp.rigid_body_shift)<line_sep>show(xrs=xrs_sh xrs_start=xrs grm=grm prefix="rb: ")<block_end>#
h=pdb_hierarchy.deep_copy()<line_sep>h.atoms().reset_i_seq()# XXX
h.atoms().set_xyz(xrs_sh.sites_cart().deep_copy())<line_sep>h.atoms().set_uij(xrs_sh.scatterers().extract_u_cart(xrs_sh.unit_cell()))<line_sep>h.atoms().set_b(xrs_sh.extract_u_iso_or_u_equiv()<times>adptbx.u_as_b(1.))<line_sep>m=h.models()[0].detached_copy()<line_sep>m.id=str(<none>)<line_sep>root.append_model(m)<block_end><def_stmt>loop_1 params root xray_structure pdb_hierarchy restraints_manager<block_start>xh=[(xray_structure pdb_hierarchy)]<if_stmt>(params.f_obs.f_calc.atomic_model.switch_rotamers)<block_start>xh.append(switch_rotamers(xray_structure=xray_structure.deep_copy_scatterers() pdb_hierarchy=pdb_hierarchy.deep_copy()))<block_end>counter=0<line_sep>size=int(math.ceil(params.f_obs.f_calc.atomic_model.ensemble_size/len(xh)))<for_stmt>xh_ xh<block_start>x_,h_=xh_<for_stmt>mc range(size)<block_start>loop_2(params=params xray_structure=x_ pdb_hierarchy=h_ restraints_manager=restraints_manager root=root)<block_end><block_end><for_stmt>i_model,model enumerate(root.models())<block_start>model.id=str(i_model)<block_end>root.atoms().set_occ(root.atoms().extract_occ()/len(root.models()))<block_end><def_stmt>defaults log<block_start>print("Default params::\n" file=log)<line_sep>parsed=iotbx.phil.parse(master_params_str process_includes=<true>)<line_sep>print(file=log)<line_sep><return>parsed<block_end><def_stmt>run args log=sys.stdout<block_start><if_stmt>(len(args)<eq>0)<block_start>parsed=defaults(log=log)<line_sep>parsed.show(prefix=" " out=log)<line_sep><return><block_end>parsed=defaults(log=log)<line_sep>processed_args=mmtbx.utils.process_command_line_args(args=args log=sys.stdout master_params=parsed)<line_sep>processed_args.params.show()<line_sep>params=processed_args.params.extract()<if_stmt>(len(processed_args.pdb_file_names)<eq>0)<block_start><raise>Sorry("No PDB file found.")<block_end><if_stmt>(len(processed_args.pdb_file_names)<g>1)<block_start><raise>Sorry("More than one PDB file found.")<block_end>pdb_file_name=processed_args.pdb_file_names[0]<if_stmt>(params.f_obs.f_calc.atomic_model.add_hydrogens)<block_start>pdb_file_name_r=os.path.basename(pdb_file_name)+"_reduce"<line_sep># easy_run.go("phenix.reduce %s > %s"% (pdb_file_name, pdb_file_name_r))
run_reduce_with_timeout(file_name=pdb_file_name parameters=" > %s"%pdb_file_name_r)<line_sep>pdb_file_name=pdb_file_name_r<block_end>pdbi_params=mmtbx.model.manager.get_default_pdb_interpretation_params()<if_stmt>(params.f_obs.f_calc.atomic_model.use_ramachandran_plot_restraints)<block_start>pdbi_params.pdb_interpretation.ramachandran_plot_restraints.enabled=<true><block_end>model=mmtbx.model.manager(model_input=iotbx.pdb.input(file_name=pdb_file_name))<line_sep>model.process(make_restraints=<true> pdb_interpretation_params=pdbi_params)<line_sep>root=iotbx.pdb.hierarchy.root()<line_sep>loop_1(params=params root=root xray_structure=model.get_xray_structure() pdb_hierarchy=model.get_hierarchy() restraints_manager=model.get_restraints_manager())<line_sep>root.write_pdb_file(file_name=params.f_obs.f_calc.atomic_model.output_file_name crystal_symmetry=model.crystal_symmetry())<line_sep>simulate_f_obs(root=root crystal_symmetry=model.crystal_symmetry() params=params)<block_end><if_stmt>(__name__<eq>"__main__")<block_start>run(sys.argv[1:])<block_end> |
<import_from_stmt>datetime date datetime<import_stmt>pandas<as>pd<import_stmt>pytest<import_from_stmt>alpaca_trade_api.rest TimeFrame<import_from_stmt>pytz timezone<import_from_stmt>liualgotrader.common config<import_from_stmt>liualgotrader.common.data_loader DataLoader# type: ignore
<import_from_stmt>liualgotrader.common.types DataConnectorType TimeScale<import_from_stmt>liualgotrader.data.alpaca AlpacaData AlpacaStream<line_sep>nyc=timezone("America/New_York")<line_sep>@pytest.mark.devtest<def_stmt>test_crypto_get_symbol <arrow>bool<block_start>alpaca_data=AlpacaData()<line_sep>start=date(2021 5 1)<line_sep>end=date(2021 10 1)<line_sep>_start,_end=alpaca_data._localize_start_end(start end)<line_sep>df=alpaca_data.crypto_get_symbol_data(symbol="BTCUSD" start=_start end=_end timeframe=TimeFrame.Day)<line_sep>print(df)<line_sep><return><true><block_end>@pytest.mark.devtest<def_stmt>test_btc_data_loader_day <arrow>bool<block_start>dl=DataLoader(TimeScale.day connector=DataConnectorType.alpaca)<line_sep>data=dl["BTCUSD"]["2021-05-01":"2021-10-01"]# type: ignore
print(data)<line_sep><return><true><block_end>@pytest.mark.devtest<def_stmt>test_btc_data_loader_min <arrow>bool<block_start>dl=DataLoader(connector=DataConnectorType.alpaca)<line_sep>data=dl["BTCUSD"]["2021-05-01":"2021-10-01"]# type: ignore
print(data)<line_sep><return><true><block_end>@pytest.mark.devtest<def_stmt>test_eth_data_loader_day <arrow>bool<block_start>dl=DataLoader(TimeScale.day connector=DataConnectorType.alpaca)<line_sep>data=dl["ETHUSD"]["2021-05-01":"2021-10-01"]# type: ignore
print(data)<line_sep><return><true><block_end> |
<import_from_stmt>.textProcessor TextProcessor<def_stmt>_use_text tok sent<block_start><return>tok['tok'].isalpha()<or>tok['tok'][1:].isalpha()<block_end><class_stmt>TextToArcs(TextProcessor)<block_start>"""
Transformer that outputs a collection of arcs in the dependency parses of each sentence of an utterance. The returned collection is a list where each element corresponds to a sentence in the utterance. Each sentence is represented in terms of its arcs, in a space-separated string.
Each arc, in turn, can be read as follows:
* `x_y` means that `x` is the parent and `y` is the child token (e.g., `agree_does` = `agree --> does`)
* `x_*` means that `x` is a token with at least one descendant, which we do not resolve (this is analogous to bigrams backing off to unigrams)
* `x>y` means that `x` and `y` are the first two tokens in the sentence
* `x>*` means that `x` is the first token in the sentence.
:param output_field: name of attribute to write arcs to.
:param input_field: name of field to use as input. defaults to 'parsed', which stores dependency parses as returned by the TextParser transformer; otherwise expects similarly-formatted input.
:param use_start: whether to also return the first and first two tokens of the sentence. defaults to `True`.
:param root_only: whether to return only the arcs from the root of the dependency parse. defaults to `False`.
:param follow_deps: if root_only is set to `True`, will nonetheless examine subtrees coming out of a dependency listed in follow_deps; by default will follow 'conj' dependencies (hence examining the parts of a sentence following conjunctions like "and").
:param filter_fn: a boolean function determining which tokens to use. arcs will only be included if filter_fn returns True for all tokens in the arc. the function is of signature filter_fn(token, sent) where tokens and sents are formatted according to the output of TextParser. by default, will use tokens which only contain alphabet letters, or only contain letters after the first character (allowing for contractions like you 're): i.e.: `tok['tok'].isalpha() or tok['tok'][1:].isalpha()`.
:param input_filter: a boolean function of signature `input_filter(utterance, aux_input)`. parses will only be computed for utterances where `input_filter` returns `True`. By default, will always return `True`, meaning that arcs will be computed for all utterances.
:param verbosity: frequency of status messages.
"""<def_stmt>__init__ self output_field input_field='parsed' use_start=<true> root_only=<false> follow_deps=('conj' ) filter_fn=_use_text input_filter=<lambda>utt aux:<true> verbosity=0<block_start>aux_input={'root_only':root_only 'use_start':use_start 'follow_deps':follow_deps 'filter_fn':filter_fn}<line_sep>TextProcessor.__init__(self proc_fn=self._get_arcs_per_message_wrapper output_field=output_field input_field=input_field aux_input=aux_input input_filter=input_filter verbosity=verbosity)<block_end><def_stmt>_get_arcs_per_message_wrapper self text_entry aux_input={}<block_start><return>get_arcs_per_message(text_entry aux_input['use_start'] aux_input['root_only'] aux_input['follow_deps'] aux_input['filter_fn'])<block_end><block_end><def_stmt>_get_arcs_at_root root sent use_start=<true> root_only=<false> follow_deps=('conj' ) filter_fn=_use_text<block_start>arcs=set()<if_stmt><not>filter_fn(root sent)<block_start><return>arcs<block_end>arcs.add(root['tok'].lower()+'_*')<line_sep>next_elems=[]<for_stmt>kid_idx root['dn']<block_start>kid=sent['toks'][kid_idx]<if_stmt>kid['dep']<in>['cc']<block_start><continue><block_end><if_stmt>filter_fn(kid sent)<block_start><if_stmt>(kid['dep']<not><in>follow_deps)<and>(root['tok'].lower()<ne>kid['tok'].lower())<block_start>arcs.add(root['tok'].lower()+'_'+kid['tok'].lower())<block_end><if_stmt>(<not>root_only)<or>(kid['dep']<in>follow_deps)<block_start>next_elems.append(kid)<block_end><block_end><block_end><if_stmt>use_start<block_start>first_elem=sent['toks'][0]<if_stmt>filter_fn(first_elem sent)<block_start>arcs.add(first_elem['tok'].lower()+'>*')<if_stmt>(1<not><in>first_elem['dn'])<and>(len(sent['toks'])<ge>2)<block_start>second_elem=sent['toks'][1]<if_stmt>0<not><in>second_elem['dn']<block_start><if_stmt>filter_fn(second_elem sent)<and>(first_elem['tok'].lower()<ne>second_elem['tok'].lower())<block_start>arcs.add(first_elem['tok'].lower()+'>'+second_elem['tok'].lower())<block_end><block_end><block_end><block_end><block_end><for_stmt>next_elem next_elems<block_start>arcs.update(_get_arcs_at_root(next_elem sent use_start=<false> root_only=root_only follow_deps=follow_deps filter_fn=filter_fn))<block_end><return>arcs<block_end><def_stmt>get_arcs_per_message message use_start=<true> root_only=<false> follow_deps=('conj' ) filter_fn=_use_text<block_start>"""
Stand-alone function that returns the arcs of parsed text.
:param message: parse to extract arcs from
:param use_start: whether to also return the first and first two tokens of the sentence. defaults to `True`.
:param root_only: whether to return only the arcs from the root of the dependency parse. defaults to `False`.
:param follow_deps: if root_only is set to `True`, will nonetheless examine subtrees coming out of a dependency listed in follow_deps; by default will follow 'conj' dependencies (hence examining the parts of a sentence following conjunctions like "and").
:param filter_fn: a boolean function determining which tokens to use. arcs will only be included if filter_fn returns True for all tokens in the arc. the function is of signature filter_fn(token, sent) where tokens and sents are formatted according to the output of TextParser. by default, will use tokens which only contain alphabet letters, or only contain letters after the first character (allowing for contractions like you 're): i.e.: `tok['tok'].isalpha() or tok['tok'][1:].isalpha()`.
:return: a list where each element corresponds to a sentence in the input message. Each sentence is represented in terms of its arcs, in a space-separated string.
"""<line_sep><return>[' '.join(sorted(_get_arcs_at_root(sent['toks'][sent['rt']] sent use_start=use_start root_only=root_only follow_deps=follow_deps filter_fn=filter_fn)))<for>sent message]<block_end> |
"""Scanning tests with fake mDNS responder.."""<import_from_stmt>ipaddress ip_address<import_stmt>pytest<import_from_stmt>pyatv.const Protocol<import_from_stmt>tests fake_udns<import_from_stmt>tests.conftest Scanner<import_from_stmt>tests.utils assert_device<line_sep>IP_1="10.0.0.1"<line_sep>AIRPLAY_NAME="AirPlay ATV"<line_sep>AIRPLAY_ID="AA:BB:CC:DD:EE:FF"<line_sep>pytestmark=pytest.mark.asyncio<async_keyword><def_stmt>test_multicast_scan_airplay_device udns_server multicast_scan:Scanner<block_start>udns_server.add_service(fake_udns.airplay_service(AIRPLAY_NAME AIRPLAY_ID addresses=[IP_1]))<line_sep>atvs=<await>multicast_scan()<assert_stmt>len(atvs)<eq>1<assert_stmt>atvs[0].name<eq>AIRPLAY_NAME<assert_stmt>atvs[0].identifier<eq>AIRPLAY_ID<assert_stmt>atvs[0].address<eq>ip_address(IP_1)<block_end><async_keyword><def_stmt>test_unicast_scan_airplay udns_server unicast_scan:Scanner<block_start>udns_server.add_service(fake_udns.airplay_service(AIRPLAY_NAME AIRPLAY_ID addresses=[IP_1] port=7000))<line_sep>atvs=<await>unicast_scan()<assert_stmt>len(atvs)<eq>1<line_sep>assert_device(atvs[0] AIRPLAY_NAME ip_address(IP_1) AIRPLAY_ID Protocol.AirPlay 7000 )<block_end> |
<import_stmt>functools<import_stmt>cudf<import_stmt>dask_cudf<import_stmt>logging<import_stmt>panel<as>pn<import_from_stmt>bokeh.models ColumnDataSource<import_from_stmt>panel.config panel_extension<import_from_stmt>panel.io state<import_from_stmt>panel.util edit_readonly<import_from_stmt>typing Dict<import_from_stmt>...assets datetime<as>dt<class_stmt>BaseChart<block_start>chart_type:str=<none><line_sep>x:str=<none><line_sep>y:str=<none><line_sep>aggregate_fn:str="count"<line_sep>color:str=<none><line_sep>_height:int=0<line_sep>_width:int=0<line_sep>add_interaction:bool=<true><line_sep>chart=<none><line_sep>source=<none><line_sep>source_backup=<none><line_sep>data_points:int=0<line_sep>filter_widget=<none><line_sep>_library_specific_params:Dict[str str]={}<line_sep>stride=<none><line_sep>stride_type=int<line_sep>min_value:float=0.0<line_sep>max_value:float=0.0<line_sep>x_label_map={}<line_sep>y_label_map={}<line_sep>_initialized=<false><line_sep># widget=False can only be rendered the main layout
is_widget=<false><line_sep>title=""<line_sep>@property<def_stmt>name self<block_start>chart_type=self.chart_type<if>self.chart_type<else>"chart"<line_sep><return>f"{self.x}_{chart_type}_{self.title}"<block_end>@property<def_stmt>width self<block_start><return>self._width<block_end>@width.setter<def_stmt>width self value<block_start>self._width=value<if_stmt>self.chart<is><not><none><block_start>self.update_dimensions(width=value)<block_end><if_stmt>self.filter_widget<is><not><none><block_start>self.filter_widget.width=value<block_end><block_end>@property<def_stmt>height self<block_start><return>self._height<block_end>@height.setter<def_stmt>height self value<block_start>self._height=value<if_stmt>self.chart<is><not><none><block_start>self.update_dimensions(height=value)<block_end><block_end>@property<def_stmt>library_specific_params self<block_start><return>self._library_specific_params<block_end>@property<def_stmt>x_dtype self<block_start><if_stmt>isinstance(self.source ColumnDataSource)<block_start><return>self.source.data[self.data_x_axis].dtype<block_end><elif_stmt>isinstance(self.source (cudf.DataFrame dask_cudf.DataFrame))<block_start><return>self.source[self.x].dtype<block_end><return><none><block_end>@property<def_stmt>y_dtype self<block_start><if_stmt>isinstance(self.source ColumnDataSource)<block_start><return>self.source.data[self.data_x_axis].dtype<block_end><elif_stmt>isinstance(self.source (cudf.DataFrame dask_cudf.DataFrame))<block_start><return>self.source[self.y].dtype<block_end><return><none><block_end>@library_specific_params.setter<def_stmt>library_specific_params self value<block_start>self._library_specific_params=value<line_sep>self.extract_mappers()<line_sep>self.set_color()<block_end><def_stmt>set_color self<block_start><if_stmt>"color"<in>self.library_specific_params<block_start>self.color=self.library_specific_params["color"]<block_end><block_end><def_stmt>extract_mappers self<block_start><if_stmt>"x_label_map"<in>self.library_specific_params<block_start>self.x_label_map=self.library_specific_params["x_label_map"]<line_sep>self.library_specific_params.pop("x_label_map")<block_end><if_stmt>"y_label_map"<in>self.library_specific_params<block_start>self.y_label_map=self.library_specific_params["y_label_map"]<line_sep>self.library_specific_params.pop("y_label_map")<block_end><block_end><def_stmt>_repr_mimebundle_ self include=<none> exclude=<none><block_start>view=self.view()<if_stmt>self._initialized<and>panel_extension._loaded<block_start><return>view._repr_mimebundle_(include exclude)<block_end><if_stmt>self._initialized<is><false><block_start>logging.warning("dashboard has not been initialized."<concat>"Please run cuxfilter.dashboard.Dashboard([...charts])"<concat>" to view this object in notebook")<block_end><if_stmt>panel_extension._loaded<is><false><block_start>logging.warning("notebooks assets not loaded."<concat>"Please run cuxfilter.load_notebooks_assets()"<concat>" to view this object in notebook")<if_stmt>isinstance(view pn.Column)<block_start><return>view.pprint()<block_end><block_end><return><none><block_end><def_stmt>_to_xaxis_type self dates<block_start>"""
Description: convert to int64 if self.x_dtype is of type datetime
-----------------------------------------------------------------
Input:
dates: cudf.Series | list | tuple
"""<line_sep><return>dt.to_int64_if_datetime(dates self.x_dtype)<block_end><def_stmt>_to_yaxis_type self dates<block_start>"""
Description: convert to int64 if self.y_dtype is of type datetime
-----------------------------------------------------------------
Input:
dates: cudf.Series | list | tuple
"""<line_sep><return>dt.to_int64_if_datetime(dates self.y_dtype)<block_end><def_stmt>_xaxis_dt_transform self dates<block_start>"""
Description: convert to datetime64 if self.x_dtype is of type datetime
-----------------------------------------------------------------
Input:
dates: list | tuple of integer timestamps objects
"""<line_sep><return>dt.to_dt_if_datetime(dates self.x_dtype)<block_end><def_stmt>_yaxis_dt_transform self dates<block_start>"""
Description: convert to datetime64 if self.y_dtype is of type datetime
-----------------------------------------------------------------
Input:
dates: list | tuple of integer timestamps objects
"""<line_sep><return>dt.to_dt_if_datetime(dates self.y_dtype)<block_end><def_stmt>_xaxis_np_dt64_transform self dates<block_start>"""
Description: convert to datetime64 if self.x_dtype is of type datetime
-----------------------------------------------------------------
Input:
dates: list | tuple of datetime.datetime objects
"""<line_sep><return>dt.to_np_dt64_if_datetime(dates self.x_dtype)<block_end><def_stmt>_yaxis_np_dt64_transform self dates<block_start>"""
Description: convert to datetime64 if self.y_dtype is of type datetime
-----------------------------------------------------------------
Input:
dates: list | tuple of datetime.datetime objects
"""<line_sep><return>dt.to_np_dt64_if_datetime(dates self.y_dtype)<block_end><def_stmt>_xaxis_stride_type_transform self stride_type<block_start>"""
Description: return stride_type=CUDF_TIMEDELTA_TYPE if self.x_dtype is
of type datetime, else return stride_type
"""<line_sep><return>dt.transform_stride_type(stride_type self.x_dtype)<block_end><def_stmt>_yaxis_stride_type_transform self stride_type<block_start>"""
Description: return stride_type=CUDF_TIMEDELTA_TYPE if self.y_dtype is
of type datetime else return stride_type
"""<line_sep><return>dt.transform_stride_type(stride_type self.y_dtype)<block_end><def_stmt>view self<block_start><return>self.chart<block_end><def_stmt>add_event self event callback<block_start><def_stmt>release_state <block_start><with_stmt>edit_readonly(state)<block_start>state.busy=<false><block_end><block_end><def_stmt>callback_busy_state event<block_start><with_stmt>edit_readonly(state)<block_start>state.busy=<true><block_end>cb=functools.partial(callback event)<line_sep>self.chart.document.add_next_tick_callback(cb)<line_sep>self.chart.document.add_next_tick_callback(release_state)<block_end>self.chart.on_event(event callback_busy_state)<block_end><def_stmt>update_dimensions self width=<none> height=<none><block_start>print("base calc source function, to over-ridden by delegated classes")<line_sep><return>-1<block_end><def_stmt>calculate_source self data<block_start>print("base calc source function, to over-ridden by delegated classes")<line_sep><return>-1<block_end><def_stmt>generate_chart self<block_start>print("base calc source function, to over-ridden by delegated classes")<line_sep><return>-1<block_end><def_stmt>add_reset_event self callback=<none><block_start>print("base calc source function, to over-ridden by delegated classes")<line_sep><return>-1<block_end><def_stmt>compute_query_dict self query_dict<block_start>print("base calc source function, to over-ridden by delegated classes")<line_sep><return>-1<block_end><def_stmt>reset_chart self data:list=[]<block_start>print("base calc source function, to over-ridden by delegated classes")<line_sep><return>-1<block_end><def_stmt>reload_chart self data patch_update:bool<block_start>print("base calc source function, to over-ridden by delegated classes")<line_sep><return>-1<block_end><def_stmt>format_source_data self source_dict patch_update=<false><block_start>""""""<line_sep># print('function to be overridden by library specific extensions')
<return>-1<block_end><def_stmt>get_source_y_axis self# print('function to be overridden by library specific extensions')
<block_start><return>[]<block_end><def_stmt>apply_mappers self<block_start>""""""<line_sep># print('function to be overridden by library specific extensions')
<return>-1<block_end><block_end> |
<class_stmt>Meta(dict)<block_start><def_stmt>__getattr__ self name<block_start><try_stmt><block_start><return>self[name]<block_end><except_stmt>KeyError<block_start><raise>AttributeError(name)<block_end><block_end><def_stmt>__setattr__ self name value<block_start>self[name]=value<block_end><def_stmt>bind self name func<block_start>setattr(self.__class__ name func)<block_end><block_end> |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Defines a unary natural number (Peano natural number) abstract
data type for Relay and provides some utility functions for it.
Nats are useful for testing purposes, as they make it easy to write
test cases for recursion and pattern matching."""<import_from_stmt>tvm.relay.backend.interpreter ConstructorValue<def_stmt>get_type prelude name<block_start>ty_var=prelude.mod.get_global_type_var(name)<line_sep>ty_data=prelude.mod.type_definitions[ty_var]<line_sep><return>tuple([ty_var]+list(ty_data.constructors))<block_end><def_stmt>count prelude n<block_start>"""Takes a ConstructorValue corresponding to a nat ADT
and converts it into a Python integer. This is an example of
using an ADT value in Python.
"""<assert_stmt>isinstance(n ConstructorValue)<line_sep>_,z,s=prelude.mod.get_type("nat")<if_stmt>n.tag<eq>z.tag<block_start><return>0<block_end><assert_stmt>n.tag<eq>s.tag<line_sep><return>1+count(prelude n.fields[0])<block_end><def_stmt>make_nat_value prelude n<block_start>"""The inverse of count(): Given a non-negative Python integer,
constructs a ConstructorValue representing that value as a nat.
"""<line_sep>_,z,s=prelude.mod.get_type("nat")<if_stmt>n<eq>0<block_start><return>ConstructorValue(z.tag [] z)<block_end><return>ConstructorValue(s.tag [make_nat_value(prelude n-1)] s)<block_end><def_stmt>make_nat_expr prelude n<block_start>"""Given a non-negative Python integer, constructs a Python
expression representing that integer's value as a nat.
"""<assert_stmt>n<ge>0<line_sep>_,z,s=prelude.mod.get_type("nat")<line_sep>ret=z()<while_stmt>n<g>0<block_start>ret=s(ret)<line_sep>n=n-1<block_end><return>ret<block_end> |
<import_from_stmt>django.db models<import_from_stmt>..models SafeDeleteModel<import_from_stmt>.testcase SafeDeleteTestCase<class_stmt>InvisibleModel(SafeDeleteModel)# SafeDeleteModel subclasses automatically have their visibility set to invisible.
<block_start>name=models.CharField(max_length=100)<block_end><class_stmt>VisibilityTestCase(SafeDeleteTestCase)<block_start><def_stmt>setUp self<block_start>self.instance=InvisibleModel.objects.create(name='instance')<block_end><def_stmt>test_visible_by_pk self<block_start>"""Test whether the soft deleted model cannot be found by filtering on pk."""<line_sep>self.assertSoftDelete(self.instance save=<false>)<line_sep>self.assertEqual(InvisibleModel.objects.filter(pk=self.instance.pk).count() 0)<line_sep>self.assertRaises(InvisibleModel.DoesNotExist InvisibleModel.objects.get pk=self.instance.pk)<block_end><def_stmt>test_invisible_by_name self<block_start>"""Test whether the soft deleted model cannot be found by filtering on name."""<line_sep>self.assertSoftDelete(self.instance save=<false>)<line_sep>self.assertEqual(InvisibleModel.objects.filter(name=self.instance.name).count() 0)<line_sep>self.assertRaises(InvisibleModel.DoesNotExist InvisibleModel.objects.get name=self.instance.name)<block_end><block_end> |
"""
A set of tools for generating adversarial example on paddle platform
"""<line_sep> |
"""
This app creates a collapsible, responsive sidebar layout with
dash-bootstrap-components and some custom css with media queries.
When the screen is small, the sidebar moved to the top of the page, and the
links get hidden in a collapse element. We use a callback to toggle the
collapse when on a small screen, and the custom CSS to hide the toggle, and
force the collapse to stay open when the screen is large.
dcc.Location is used to track the current location, a callback uses the current
location to render the appropriate page content. The active prop of each
NavLink is set automatically according to the current pathname. To use this
feature you must install dash-bootstrap-components >= 0.11.0.
For more details on building multi-page Dash applications, check out the Dash
documentation: https://dash.plot.ly/urls
"""<import_stmt>dash<import_stmt>dash_bootstrap_components<as>dbc<import_from_stmt>dash Input Output State dcc html<line_sep>app=dash.Dash(external_stylesheets=[dbc.themes.BOOTSTRAP] # these meta_tags ensure content is scaled correctly on different devices
# see: https://www.w3schools.com/css/css_rwd_viewport.asp for more
meta_tags=[{"name":"viewport" "content":"width=device-width, initial-scale=1"}] )<line_sep># we use the Row and Col components to construct the sidebar header
# it consists of a title, and a toggle, the latter is hidden on large screens
sidebar_header=dbc.Row([dbc.Col(html.H2("Sidebar" className="display-4")) dbc.Col([html.Button(# use the Bootstrap navbar-toggler classes to style
html.Span(className="navbar-toggler-icon") className="navbar-toggler" # the navbar-toggler classes don't set color
style={"color":"rgba(0,0,0,.5)" "border-color":"rgba(0,0,0,.1)" } id="navbar-toggle" ) html.Button(# use the Bootstrap navbar-toggler classes to style
html.Span(className="navbar-toggler-icon") className="navbar-toggler" # the navbar-toggler classes don't set color
style={"color":"rgba(0,0,0,.5)" "border-color":"rgba(0,0,0,.1)" } id="sidebar-toggle" ) ] # the column containing the toggle will be only as wide as the
# toggle, resulting in the toggle being right aligned
width="auto" # vertically align the toggle in the center
align="center" ) ])<line_sep>sidebar=html.Div([sidebar_header # we wrap the horizontal rule and short blurb in a div that can be
# hidden on a small screen
html.Div([html.Hr() html.P("A responsive sidebar layout with collapsible navigation "<concat>"links." className="lead" ) ] id="blurb" ) # use the Collapse component to animate hiding / revealing links
dbc.Collapse(dbc.Nav([dbc.NavLink("Home" href="/" active="exact") dbc.NavLink("Page 1" href="/page-1" active="exact") dbc.NavLink("Page 2" href="/page-2" active="exact") ] vertical=<true> pills=<true> ) id="collapse" ) ] id="sidebar" )<line_sep>content=html.Div(id="page-content")<line_sep>app.layout=html.Div([dcc.Location(id="url") sidebar content])<line_sep>@app.callback(Output("page-content" "children") [Input("url" "pathname")])<def_stmt>render_page_content pathname<block_start><if_stmt>pathname<eq>"/"<block_start><return>html.P("This is the content of the home page!")<block_end><elif_stmt>pathname<eq>"/page-1"<block_start><return>html.P("This is the content of page 1. Yay!")<block_end><elif_stmt>pathname<eq>"/page-2"<block_start><return>html.P("Oh cool, this is page 2!")<block_end># If the user tries to reach a different page, return a 404 message
<return>dbc.Jumbotron([html.H1("404: Not found" className="text-danger") html.Hr() html.P(f"The pathname {pathname} was not recognised...") ])<block_end>@app.callback(Output("sidebar" "className") [Input("sidebar-toggle" "n_clicks")] [State("sidebar" "className")] )<def_stmt>toggle_classname n classname<block_start><if_stmt>n<and>classname<eq>""<block_start><return>"collapsed"<block_end><return>""<block_end>@app.callback(Output("collapse" "is_open") [Input("navbar-toggle" "n_clicks")] [State("collapse" "is_open")] )<def_stmt>toggle_collapse n is_open<block_start><if_stmt>n<block_start><return><not>is_open<block_end><return>is_open<block_end><if_stmt>__name__<eq>"__main__"<block_start>app.run_server(port=8888 debug=<true>)<block_end> |
<import_from_stmt>rpython.rlib rwin32<import_from_stmt>rpython.rlib.rarithmetic r_uint<import_from_stmt>rpython.rtyper.lltypesystem lltype rffi<import_from_stmt>rpython.rtyper.tool rffi_platform<import_from_stmt>rpython.translator.tool.cbuild ExternalCompilationInfo<import_from_stmt>pypy.interpreter.error oefmt wrap_windowserror<import_from_stmt>pypy.interpreter.function StaticMethod<import_from_stmt>pypy.interpreter.gateway interp2app unwrap_spec<import_from_stmt>_multiprocess.interp_connection w_handle<line_sep>CONSTANTS="""
PIPE_ACCESS_INBOUND PIPE_ACCESS_DUPLEX
GENERIC_READ GENERIC_WRITE OPEN_EXISTING
PIPE_TYPE_MESSAGE PIPE_READMODE_MESSAGE PIPE_WAIT
PIPE_UNLIMITED_INSTANCES
NMPWAIT_WAIT_FOREVER
ERROR_PIPE_CONNECTED ERROR_SEM_TIMEOUT ERROR_PIPE_BUSY
ERROR_NO_SYSTEM_RESOURCES ERROR_BROKEN_PIPE ERROR_MORE_DATA
ERROR_ALREADY_EXISTS ERROR_NO_DATA
""".split()<class_stmt>CConfig<block_start>_compilation_info_=ExternalCompilationInfo(includes=['windows.h'] libraries=['kernel32'] )<for_stmt>name CONSTANTS<block_start>locals()[name]=rffi_platform.ConstantInteger(name)<block_end><block_end>config=rffi_platform.configure(CConfig)<line_sep>globals().update(config)<def_stmt>handle_w space w_handle<block_start><return>rffi.cast(rwin32.HANDLE space.int_w(w_handle))<block_end>_CreateNamedPipe=rwin32.winexternal('CreateNamedPipeA' [rwin32.LPCSTR rwin32.DWORD rwin32.DWORD rwin32.DWORD rwin32.DWORD rwin32.DWORD rwin32.DWORD rffi.VOIDP] rwin32.HANDLE save_err=rffi.RFFI_SAVE_LASTERROR)<line_sep>_ConnectNamedPipe=rwin32.winexternal('ConnectNamedPipe' [rwin32.HANDLE rffi.VOIDP] rwin32.BOOL save_err=rffi.RFFI_SAVE_LASTERROR)<line_sep>_SetNamedPipeHandleState=rwin32.winexternal('SetNamedPipeHandleState' [rwin32.HANDLE rwin32.LPDWORD rwin32.LPDWORD rwin32.LPDWORD] rwin32.BOOL save_err=rffi.RFFI_SAVE_LASTERROR)<line_sep>_WaitNamedPipe=rwin32.winexternal('WaitNamedPipeA' [rwin32.LPCSTR rwin32.DWORD] rwin32.BOOL save_err=rffi.RFFI_SAVE_LASTERROR)<line_sep>_PeekNamedPipe=rwin32.winexternal('PeekNamedPipe' [rwin32.HANDLE rffi.VOIDP rwin32.DWORD rwin32.LPDWORD rwin32.LPDWORD rwin32.LPDWORD] rwin32.BOOL save_err=rffi.RFFI_SAVE_LASTERROR)<line_sep>_CreateFile=rwin32.winexternal('CreateFileA' [rwin32.LPCSTR rwin32.DWORD rwin32.DWORD rffi.VOIDP rwin32.DWORD rwin32.DWORD rwin32.HANDLE] rwin32.HANDLE save_err=rffi.RFFI_SAVE_LASTERROR)<line_sep>_WriteFile=rwin32.winexternal('WriteFile' [rwin32.HANDLE rffi.VOIDP rwin32.DWORD rwin32.LPDWORD rffi.VOIDP] rwin32.BOOL save_err=rffi.RFFI_SAVE_LASTERROR)<line_sep>_ReadFile=rwin32.winexternal('ReadFile' [rwin32.HANDLE rffi.VOIDP rwin32.DWORD rwin32.LPDWORD rffi.VOIDP] rwin32.BOOL save_err=rffi.RFFI_SAVE_LASTERROR)<line_sep>_ExitProcess=rwin32.winexternal('ExitProcess' [rffi.UINT] lltype.Void save_err=rffi.RFFI_SAVE_LASTERROR)<line_sep>_GetTickCount=rwin32.winexternal('GetTickCount' [] rwin32.DWORD)<line_sep>_Sleep=rwin32.winexternal('Sleep' [rwin32.DWORD] lltype.Void)<def_stmt>CloseHandle space w_handle<block_start>handle=handle_w(space w_handle)<if_stmt><not>rwin32.CloseHandle(handle)<block_start><raise>wrap_windowserror(space rwin32.lastSavedWindowsError())<block_end><block_end><def_stmt>GetLastError space<block_start>"""NOTE: don't use this. See issue #2658"""<line_sep><return>space.newint(rwin32.GetLastError_saved())<block_end># __________________________________________________________
# functions for the "win32" namespace
@unwrap_spec(name='text' openmode=r_uint pipemode=r_uint maxinstances=r_uint outputsize=r_uint inputsize=r_uint timeout=r_uint)<def_stmt>CreateNamedPipe space name openmode pipemode maxinstances outputsize inputsize timeout w_security<block_start>security=space.int_w(w_security)<if_stmt>security<block_start><raise>oefmt(space.w_NotImplementedError "expected a NULL pointer")<block_end>handle=_CreateNamedPipe(name openmode pipemode maxinstances outputsize inputsize timeout rffi.NULL)<if_stmt>handle<eq>rwin32.INVALID_HANDLE_VALUE<block_start><raise>wrap_windowserror(space rwin32.lastSavedWindowsError())<block_end><return>w_handle(space handle)<block_end><def_stmt>ConnectNamedPipe space w_handle w_overlapped<block_start>handle=handle_w(space w_handle)<line_sep>overlapped=space.int_w(w_overlapped)<if_stmt>overlapped<block_start><raise>oefmt(space.w_NotImplementedError "expected a NULL pointer")<block_end><if_stmt><not>_ConnectNamedPipe(handle rffi.NULL)<block_start><raise>wrap_windowserror(space rwin32.lastSavedWindowsError())<block_end><block_end><def_stmt>SetNamedPipeHandleState space w_handle w_pipemode w_maxinstances w_timeout<block_start>handle=handle_w(space w_handle)<line_sep>state=lltype.malloc(rffi.CArrayPtr(rffi.UINT).TO 3 flavor='raw')<line_sep>statep=lltype.malloc(rffi.CArrayPtr(rffi.UINTP).TO 3 flavor='raw' zero=<true>)<try_stmt><block_start><if_stmt><not>space.is_w(w_pipemode space.w_None)<block_start>state[0]=rffi.cast(rffi.UINT space.uint_w(w_pipemode))<line_sep>statep[0]=rffi.ptradd(state 0)<block_end><if_stmt><not>space.is_w(w_maxinstances space.w_None)<block_start>state[1]=rffi.cast(rffi.UINT space.uint_w(w_maxinstances))<line_sep>statep[1]=rffi.ptradd(state 1)<block_end><if_stmt><not>space.is_w(w_timeout space.w_None)<block_start>state[2]=rffi.cast(rffi.UINT space.uint_w(w_timeout))<line_sep>statep[2]=rffi.ptradd(state 2)<block_end><if_stmt><not>_SetNamedPipeHandleState(handle statep[0] statep[1] statep[2])<block_start><raise>wrap_windowserror(space rwin32.lastSavedWindowsError())<block_end><block_end><finally_stmt><block_start>lltype.free(state flavor='raw')<line_sep>lltype.free(statep flavor='raw')<block_end><block_end>@unwrap_spec(name='text' timeout=r_uint)<def_stmt>WaitNamedPipe space name timeout# Careful: zero means "default value specified by CreateNamedPipe()"
<block_start><if_stmt><not>_WaitNamedPipe(name timeout)<block_start><raise>wrap_windowserror(space rwin32.lastSavedWindowsError())<block_end><block_end>@unwrap_spec(filename='fsencode' access=r_uint share=r_uint disposition=r_uint flags=r_uint)<def_stmt>CreateFile space filename access share w_security disposition flags w_templatefile<block_start>security=space.int_w(w_security)<line_sep>templatefile=space.int_w(w_templatefile)<if_stmt>security<or>templatefile<block_start><raise>oefmt(space.w_NotImplementedError "expected a NULL pointer")<block_end>handle=_CreateFile(filename access share rffi.NULL disposition flags rwin32.NULL_HANDLE)<if_stmt>handle<eq>rwin32.INVALID_HANDLE_VALUE<block_start><raise>wrap_windowserror(space rwin32.lastSavedWindowsError())<block_end><return>w_handle(space handle)<block_end>@unwrap_spec(code=r_uint)<def_stmt>ExitProcess space code<block_start>_ExitProcess(code)<block_end><def_stmt>win32_namespace space<block_start>"NOT_RPYTHON"<line_sep>w_win32=space.call_function(space.w_type space.wrap("win32") space.newtuple([]) space.newdict())<line_sep># constants
<for_stmt>name CONSTANTS<block_start>space.setattr(w_win32 space.wrap(name) space.wrap(config[name]))<block_end>space.setattr(w_win32 space.wrap('NULL') space.newint(0))<line_sep># functions
<for_stmt>name ['CloseHandle' 'GetLastError' 'CreateFile' 'CreateNamedPipe' 'ConnectNamedPipe' 'SetNamedPipeHandleState' 'WaitNamedPipe' 'ExitProcess' ]<block_start>function=globals()[name]<line_sep>w_function=space.wrap(interp2app(function))<line_sep>w_method=space.wrap(StaticMethod(w_function))<line_sep>space.setattr(w_win32 space.wrap(name) w_method)<block_end><return>w_win32<block_end> |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-17 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_stmt>calvin.actor.actor Actor manage condition stateguard calvinsys<import_from_stmt>calvin.utilities.calvinlogger get_actor_logger<line_sep>_log=get_actor_logger(__name__)<class_stmt>ImageSource(Actor)<block_start>"""
When token on input, get an image.
Inputs:
trigger: anything
Outputs:
b64image: generated image
"""<line_sep>@manage(exclude=["_cam"])<def_stmt>init self<block_start>self.setup()<block_end><def_stmt>setup self<block_start>self._cam=calvinsys.open(self "image.source")<block_end><def_stmt>did_migrate self<block_start>self.setup()<block_end><def_stmt>will_end self<block_start>calvinsys.close(self._cam)<block_end>@stateguard(<lambda>self:calvinsys.can_read(self._cam))@condition(action_output=['b64image'])<def_stmt>send_image self<block_start>image=calvinsys.read(self._cam)<line_sep><return>(image )<block_end>@stateguard(<lambda>self:calvinsys.can_write(self._cam))@condition(action_input=['trigger'])<def_stmt>fetch_image self trigger<block_start>calvinsys.write(self._cam <none>)<block_end>action_priority=(fetch_image send_image)<line_sep>requires=['image.source']<line_sep>test_calvinsys={'image.source':{'read':[1 0 1 0 0 1 0 1] 'write':[<none> <none> <none> <none>]}}<line_sep>test_set=[{'inports':{'trigger':[<true> 1 "a" 0]} 'outports':{'b64image':[1 0 1 0 0 1 0 1]}}]<block_end> |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""<import_stmt>argparse<import_stmt>logging<import_stmt>os<import_stmt>subprocess<import_stmt>random<import_stmt>cv2<import_stmt>numpy<as>np<import_stmt>sys<line_sep>python_dir=os.path.dirname(os.path.dirname(os.path.realpath(__file__)))<line_sep>sys.path.insert(0 python_dir)<import_from_stmt>cuberite_process CuberiteProcess<import_from_stmt>repo repo_home<line_sep>logging.basicConfig(format="%(asctime)s [%(levelname)s]: %(message)s")<line_sep>logging.getLogger().setLevel(logging.DEBUG)<def_stmt>to_unit_vec yaw pitch<block_start>pitch<augmul>3.14159/180<line_sep>yaw<augmul>3.14159/180<line_sep><return>np.array([-1<times>np.cos(pitch)<times>np.sin(yaw) -1<times>np.sin(pitch) np.cos(pitch)<times>np.cos(yaw)])<block_end><def_stmt>ground_height blocks<block_start>dirt_pct=np.mean(np.mean(blocks[: : : 0]<eq>2 axis=1) axis=1)<if_stmt>(dirt_pct<g>0.25).any()<block_start><return>np.argmax(dirt_pct)<block_end><return><none><block_end><def_stmt>change_block schematic b<block_start>x,y,z=b<line_sep>## change to red wool
schematic[y][z][x][0]=35<line_sep>schematic[y][z][x][1]=14<block_end><def_stmt>render npy_p2b out_dir port spp img_size mn=<none><block_start>npy_file=(os.path.expanduser("~")+"/minecraft_houses/"+".".join(npy_p2b.split(".")[1:-2])+"/schematic.npy")<line_sep>schematic=np.load(npy_file)<line_sep>print(schematic.shape)<line_sep>house_name=os.path.basename(os.path.dirname(npy_file))<line_sep>p2b=np.load(npy_p2b)<line_sep># remove blocks below ground-level
g=ground_height(schematic)<line_sep>schematic=schematic[(g<or>0): : : :]<line_sep>ys,zs,xs=np.nonzero(schematic[: : : 0]<g>0)<line_sep>xmid,ymid,zmid=np.mean(xs) np.mean(ys) np.mean(zs)<line_sep>focus=np.array([xmid ymid+63 zmid])# TODO: +63 only works for flat_world seed=0w
yaw,distance=list(map(int npy_p2b.split(".")[-2].split("_")))<line_sep>look=[yaw 0]<line_sep>look_xyz=to_unit_vec(*look)<line_sep>camera=focus-(look_xyz<times>distance)<if_stmt>mn<eq>[0 0]<block_start>M,N=p2b.shape[:2]<while_stmt><true><block_start>m=random.randint(0 M-1)<line_sep>n=random.randint(0 N-1)<if_stmt>p2b[m][n][0]<ne>-1<block_start><break><block_end><block_end><block_end><else_stmt><block_start>m,n=mn<block_end>print("Select pixel at {}".format((m n)))<line_sep>print("Mapped block {}".format(p2b[m][n]))<line_sep>change_block(schematic p2b[m][n])<line_sep>logging.info("Launching cuberite at port {}".format(port))<line_sep>p=CuberiteProcess("flat_world" seed=0 game_mode="creative" place_blocks_yzx=schematic port=port)<line_sep>logging.info("Destroying cuberite at port {}".format(port))<line_sep>p.destroy()<line_sep>world_dir=os.path.join(p.workdir "world")<line_sep>render_view_bin=os.path.join(repo_home "bin/render_view")<assert_stmt>os.path.isfile(render_view_bin) "{} not found.\n\nTry running: make render_view".format(render_view_bin)<line_sep>procs=[]<line_sep>chunky_id="{}_{}".format(yaw distance)<line_sep>out_file="{}/chunky_verify.{}.{}.png".format(out_dir house_name chunky_id)<line_sep>call=[str(a)<for>a ["python3" "{}/python/minecraft_render/render.py".format(repo_home) "--world" world_dir "--out" out_file "--camera" *camera "--look" yaw 0 "--size" *img_size "--spp" spp ]]<line_sep>logging.info("CALL: "+" ".join(call))<line_sep>procs.append(subprocess.Popen(call))<for_stmt>p procs<block_start>p.wait()<block_end>## draw the sampled pixel for a better view
img=cv2.imread(out_file)<line_sep>cv2.circle(img (n m) 2 (255 0 0))<line_sep>cv2.imwrite(out_file img)<block_end><if_stmt>__name__<eq>"__main__"<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("npy_p2b")<line_sep>parser.add_argument("--out-dir" "-o" required=<true> help="Directory in which to write vision files")<line_sep>parser.add_argument("--spp" type=int default=25 help="samples per pixel")<line_sep>parser.add_argument("--port" type=int default=25565)<line_sep>parser.add_argument("--size" type=int nargs=2 default=[300 225])<line_sep>parser.add_argument("--mn" type=int nargs=2 default=[0 0])<line_sep>args=parser.parse_args()<line_sep>render(args.npy_p2b args.out_dir args.port args.spp args.size args.mn)<block_end> |
<import_stmt>pytest<line_sep>skip=<false><if_stmt><not>skip<block_start>@pytest.fixture(scope="module" params=["primary_assembly" "toplevel"])<def_stmt>assembly request<block_start><return>request.param<block_end>@pytest.fixture(scope="module" params=["98" <none>])<def_stmt>release_version request<block_start><return>request.param<block_end>@pytest.fixture(scope="module" params=["hard" "soft" "unmasked"])<def_stmt>masking request<block_start><return>request.param<block_end><def_stmt>test_ensembl_genome_download_links assembly masking release_version ensembl<block_start>"""Test Ensembl links with various options
These genomes are hosted on ftp.ensembl.org
Vertebrates are downloaded from HTTP.
"""<line_sep>mask=masking<if>masking<ne>"unmasked"<else>"none"<line_sep>toplevel=<false><if>assembly<eq>"primary_assembly"<else><true><line_sep>version=release_version<assert_stmt>ensembl.get_genome_download_link("GRCh38.p13" mask=mask toplevel=toplevel version=version)<block_end><def_stmt>test_ensemblgenomes_genome_download_links masking ensembl<block_start>"""Test Ensembl FTP links for various genomes
These genomes are hosted on ftp.ensemblgenomes.org.
"""<line_sep>mask=masking<if>masking<ne>"unmasked"<else>"none"<for_stmt>genome ["Amel_HAv3.1" "ASM23943v1"]<block_start><assert_stmt>ensembl.get_genome_download_link(genome mask=mask)<block_end><block_end><def_stmt>test_ucsc_genome_download_links masking ucsc<block_start>"""Test UCSC HTTP links for various genomes
Also test masking (unmasked should be ignored)."""<for_stmt>genome ["sacCer3" "hg38"]<block_start><assert_stmt>ucsc.get_genome_download_link(genome mask=masking)<block_end><block_end><def_stmt>test_ncbi_genome_download_links masking ncbi<block_start>"""Test NCBI HTTPS links for various genomes
Also test masking (should be ignored).
These genomes are hosted on ftp://ftp.ncbi.nlm.nih.gov."""<for_stmt>genome ["Charlie1.0" "GRCh38.p13"]<block_start><assert_stmt>ncbi.get_genome_download_link(genome mask=masking)<block_end><block_end><block_end> |
<import_stmt>datetime<import_from_stmt>connexion request<import_from_stmt>anchore_engine.apis exceptions<as>api_exceptions<import_from_stmt>anchore_engine.apis.authorization ActionBoundPermission RequestingAccountValue get_authorizer <import_from_stmt>anchore_engine.apis.context ApiRequestContextProxy<import_from_stmt>anchore_engine.clients.services internal_client_for<import_from_stmt>anchore_engine.clients.services.catalog CatalogClient<import_from_stmt>anchore_engine.common.helpers make_response_error<import_from_stmt>anchore_engine.subsys logger<line_sep>authorizer=get_authorizer()<line_sep>IMPORT_BUCKET="image_content_imports"<line_sep>MAX_UPLOAD_SIZE=100<times>1024<times>1024# 100 MB
OPERATION_EXPIRATION_DELTA=datetime.timedelta(hours=24)<line_sep>@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])<def_stmt>create_operation <block_start>"""
POST /imports/images
:return:
"""<try_stmt><block_start>client=internal_client_for(CatalogClient userId=ApiRequestContextProxy.namespace())<line_sep>resp=client.create_image_import()<line_sep><return>resp 200<block_end><except_stmt>api_exceptions.AnchoreApiError<as>ex<block_start><return>(make_response_error(ex in_httpcode=ex.__response_code__) ex.__response_code__ )<block_end><except_stmt>Exception<as>ex<block_start>logger.exception("Unexpected error in api processing")<line_sep><return>make_response_error(ex in_httpcode=500) 500<block_end><block_end>@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])<def_stmt>list_operations <block_start>"""
GET /imports/images
:return:
"""<try_stmt><block_start>client=internal_client_for(CatalogClient userId=ApiRequestContextProxy.namespace())<line_sep>resp=client.list_image_import_operations()<line_sep><return>resp 200<block_end><except_stmt>api_exceptions.AnchoreApiError<as>ex<block_start><return>(make_response_error(ex in_httpcode=ex.__response_code__) ex.__response_code__ )<block_end><except_stmt>Exception<as>ex<block_start>logger.exception("Unexpected error in api processing")<line_sep><return>make_response_error(ex in_httpcode=500) 500<block_end><block_end>@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])<def_stmt>get_operation operation_id<block_start>"""
GET /imports/images/{operation_id}
:param operation_id:
:return:
"""<try_stmt><block_start>client=internal_client_for(CatalogClient userId=ApiRequestContextProxy.namespace())<line_sep>resp=client.get_image_import_operation(operation_id)<line_sep><return>resp 200<block_end><except_stmt>api_exceptions.AnchoreApiError<as>ex<block_start><return>(make_response_error(ex in_httpcode=ex.__response_code__) ex.__response_code__ )<block_end><except_stmt>Exception<as>ex<block_start>logger.exception("Unexpected error in api processing")<line_sep><return>make_response_error(ex in_httpcode=500) 500<block_end><block_end>@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])<def_stmt>invalidate_operation operation_id<block_start>"""
DELETE /imports/images/{operation_id}
:param operation_id:
:return:
"""<try_stmt><block_start>client=internal_client_for(CatalogClient userId=ApiRequestContextProxy.namespace())<line_sep>resp=client.cancel_image_import(operation_id)<line_sep><return>resp 200<block_end><except_stmt>api_exceptions.AnchoreApiError<as>ex<block_start><return>(make_response_error(ex in_httpcode=ex.__response_code__) ex.__response_code__ )<block_end><except_stmt>Exception<as>ex<block_start>logger.exception("Unexpected error in api processing")<line_sep><return>make_response_error(ex in_httpcode=500) 500<block_end><block_end>@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])<def_stmt>list_import_packages operation_id<block_start>"""
GET /imports/images/{operation_id}/packages
:param operation_id:
:return:
"""<try_stmt><block_start>client=internal_client_for(CatalogClient userId=ApiRequestContextProxy.namespace())<line_sep>resp=client.list_import_content(operation_id "packages")<line_sep><return>resp 200<block_end><except_stmt>api_exceptions.AnchoreApiError<as>ex<block_start><return>(make_response_error(ex in_httpcode=ex.__response_code__) ex.__response_code__ )<block_end><except_stmt>Exception<as>ex<block_start>logger.exception("Unexpected error in api processing")<line_sep><return>make_response_error(ex in_httpcode=500) 500<block_end><block_end>@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])<def_stmt>list_import_dockerfiles operation_id<block_start>"""
GET /imports/images/{operation_id}/dockerfile
:param operation_id:
:return:
"""<try_stmt><block_start>client=internal_client_for(CatalogClient userId=ApiRequestContextProxy.namespace())<line_sep>resp=client.list_import_content(operation_id "dockerfile")<line_sep><return>resp 200<block_end><except_stmt>api_exceptions.AnchoreApiError<as>ex<block_start><return>(make_response_error(ex in_httpcode=ex.__response_code__) ex.__response_code__ )<block_end><except_stmt>Exception<as>ex<block_start>logger.exception("Unexpected error in api processing")<line_sep><return>make_response_error(ex in_httpcode=500) 500<block_end><block_end>@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])<def_stmt>list_import_image_manifests operation_id<block_start>"""
GET /imports/images/{operation_id}/manifest
:param operation_id:
:return:
"""<try_stmt><block_start>client=internal_client_for(CatalogClient userId=ApiRequestContextProxy.namespace())<line_sep>resp=client.list_import_content(operation_id "manifest")<line_sep><return>resp 200<block_end><except_stmt>api_exceptions.AnchoreApiError<as>ex<block_start><return>(make_response_error(ex in_httpcode=ex.__response_code__) ex.__response_code__ )<block_end><except_stmt>Exception<as>ex<block_start>logger.exception("Unexpected error in api processing")<line_sep><return>make_response_error(ex in_httpcode=500) 500<block_end><block_end>@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])<def_stmt>list_import_parent_manifests operation_id<block_start>"""
GET /imports/images/{operation_id}/manifest
:param operation_id:
:return:
"""<try_stmt><block_start>client=internal_client_for(CatalogClient userId=ApiRequestContextProxy.namespace())<line_sep>resp=client.list_import_content(operation_id "parent_manifest")<line_sep><return>resp 200<block_end><except_stmt>api_exceptions.AnchoreApiError<as>ex<block_start><return>(make_response_error(ex in_httpcode=ex.__response_code__) ex.__response_code__ )<block_end><except_stmt>Exception<as>ex<block_start>logger.exception("Unexpected error in api processing")<line_sep><return>make_response_error(ex in_httpcode=500) 500<block_end><block_end>@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])<def_stmt>list_import_image_configs operation_id<block_start>"""
GET /imports/images/{operation_id}/image_config
:param operation_id:
:return:
"""<try_stmt><block_start>client=internal_client_for(CatalogClient userId=ApiRequestContextProxy.namespace())<line_sep>resp=client.list_import_content(operation_id "image_config")<line_sep><return>resp 200<block_end><except_stmt>api_exceptions.AnchoreApiError<as>ex<block_start><return>(make_response_error(ex in_httpcode=ex.__response_code__) ex.__response_code__ )<block_end><except_stmt>Exception<as>ex<block_start>logger.exception("Unexpected error in api processing")<line_sep><return>make_response_error(ex in_httpcode=500) 500<block_end><block_end>@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])<def_stmt>import_image_packages operation_id<block_start>"""
POST /imports/images/{operation_id}/packages
:param operation_id:
:param sbom:
:return:
"""<line_sep><return>content_upload(operation_id "packages" request)<block_end>@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])<def_stmt>import_image_dockerfile operation_id<block_start>"""
POST /imports/images/{operation_id}/dockerfile
:param operation_id:
:param sbom:
:return:
"""<line_sep><return>content_upload(operation_id "dockerfile" request)<block_end>@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])<def_stmt>import_image_manifest operation_id<block_start>"""
POST /imports/images/{operation_id}/manifest
:param operation_id:
:return:
"""<line_sep><return>content_upload(operation_id "manifest" request)<block_end>@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])<def_stmt>import_image_parent_manifest operation_id<block_start>"""
POST /imports/images/{operation_id}/parent_manifest
:param operation_id:
:return:
"""<line_sep><return>content_upload(operation_id "parent_manifest" request)<block_end>@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])<def_stmt>import_image_config operation_id<block_start>"""
POST /imports/images/{operation_id}/image_config
:param operation_id:
:return:
"""<line_sep><return>content_upload(operation_id "image_config" request)<block_end><def_stmt>content_upload operation_id content_type request<block_start>"""
Generic handler for multiple types of content uploads. Still operates at the API layer
:param operation_id:
:param content_type:
:param request:
:return:
"""<try_stmt><block_start>client=internal_client_for(CatalogClient userId=ApiRequestContextProxy.namespace())<line_sep><return>(client.upload_image_import_content(operation_id content_type request.data) 200 )<block_end><except_stmt>api_exceptions.AnchoreApiError<as>ex<block_start><return>(make_response_error(ex in_httpcode=ex.__response_code__) ex.__response_code__ )<block_end><except_stmt>Exception<as>ex<block_start>logger.exception("Unexpected error in api processing")<line_sep><return>make_response_error(ex in_httpcode=500) 500<block_end><block_end> |
# coding=utf-8
# Copyright (C) ATHENA AUTHORS
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Only support eager mode
# pylint: disable=too-few-public-methods, no-member, too-many-arguments, unused-argument
""" learning rate """<import_stmt>tensorflow<as>tf<import_from_stmt>..utils.hparam register_and_parse_hparams<class_stmt>WarmUpLearningSchedule(tf.keras.optimizers.schedules.LearningRateSchedule)<block_start>""" WarmUp Learning rate schedule for Adam
Used as :
optimizer = tf.keras.optimizers.Adam(learning_rate = WarmUpLearningSchedule(512),
beta_1=0.9, beta_2=0.98, epsilon=1e-9)
Args :
model_dim is the something related to total model parameters
warmup_steps is the highest learning rate iters
Returns:
return the learning rate
Idea from the paper: Attention Is All You Need
"""<def_stmt>__init__ self model_dim=512 warmup_steps=4000 k=1.0 decay_steps=99999999 decay_rate=1.0<block_start>super().__init__()<line_sep>self.model_dim=tf.cast(model_dim tf.float32)<line_sep>self.warmup_steps=warmup_steps<line_sep>self.k=k<line_sep>self.decay_steps=tf.cast(decay_steps tf.float32)<line_sep>self.decay_rate=tf.cast(decay_rate tf.float32)<block_end><def_stmt>__call__ self step<block_start>step=tf.cast(step tf.float32)<line_sep>arg1=tf.math.rsqrt(step)<line_sep>arg2=step<times>(self.warmup_steps<power>-1.5)<line_sep>k=self.k<times>tf.cast(self.decay_rate<power>(step<floordiv>self.decay_steps) tf.float32)<line_sep><return>k<times>tf.math.rsqrt(self.model_dim)<times>tf.math.minimum(arg1 arg2)<block_end><block_end><class_stmt>WarmUpAdam(tf.keras.optimizers.Adam)<block_start>"""WarmUpAdam Implementation """<line_sep>default_config={"d_model":512 "warmup_steps":8000 "k":0.5 "decay_steps":100000 "decay_rate":1.0}<def_stmt>__init__ self config=<none> beta_1=0.9 beta_2=0.999 epsilon=1e-7 amsgrad=<false> name="WarmUpAdam" **kwargs<block_start>self.hparams=register_and_parse_hparams(self.default_config config cls=self.__class__)<line_sep>super().__init__(learning_rate=WarmUpLearningSchedule(self.hparams.d_model self.hparams.warmup_steps self.hparams.k self.hparams.decay_steps self.hparams.decay_rate) beta_1=beta_1 beta_2=beta_2 epsilon=epsilon amsgrad=amsgrad name=name )<block_end><block_end><class_stmt>ExponentialDecayLearningRateSchedule(tf.keras.optimizers.schedules.LearningRateSchedule)<block_start>""" ExponentialDecayLearningRateSchedule
Used as :
optimizer = tf.keras.optimizers.Adam(
learning_rate = ExponentialDecayLearningRate(0.01, 100))
Args :
initial_lr, decay_steps
Returns:
initial_lr * (0.5 ** (step // decay_steps))
"""<def_stmt>__init__ self initial_lr=0.005 decay_steps=10000 decay_rate=0.5<block_start>super().__init__()<line_sep>self.initial_lr=initial_lr<line_sep>self.decay_steps=tf.cast(decay_steps tf.float32)<line_sep>self.decay_rate=tf.cast(decay_rate tf.float32)<block_end><def_stmt>__call__ self step<block_start>step=tf.cast(step tf.float32)<line_sep>factor=tf.cast(self.decay_rate<power>(step<floordiv>self.decay_steps) tf.float32)<line_sep><return>self.initial_lr<times>factor<block_end><block_end><class_stmt>ExponentialDecayAdam(tf.keras.optimizers.Adam)<block_start>"""WarmUpAdam Implementation """<line_sep>default_config={"initial_lr":0.005 "decay_steps":10000 "decay_rate":0.5}<def_stmt>__init__ self config=<none> beta_1=0.9 beta_2=0.999 epsilon=1e-7 amsgrad=<false> name="WarmUpAdam" **kwargs<block_start>self.hparams=register_and_parse_hparams(self.default_config config cls=self.__class__)<line_sep>super().__init__(learning_rate=ExponentialDecayLearningRateSchedule(self.hparams.initial_lr self.hparams.decay_steps self.hparams.decay_rate) beta_1=beta_1 beta_2=beta_2 epsilon=epsilon amsgrad=amsgrad name=name )<block_end><block_end> |
#runas import numpy as np; n = 20; a = np.arange(n*n*n).reshape((n,n,n)).astype(np.uint8); b = 2. ; goodExpoMeasure(a, b)
#pythran export goodExpoMeasure(uint8[][][], float)
<import_stmt>numpy<def_stmt>goodExpoMeasure inRGB sigma<block_start>'''
Compute the good exposition image quality measure on 1 input image.
'''<line_sep>R=inRGB[0 : :].astype(numpy.float64)<line_sep>G=inRGB[1 : :].astype(numpy.float64)<line_sep>B=inRGB[2 : :].astype(numpy.float64)<line_sep>goodExpoR=numpy.exp(-((R-128)<power>2)/sigma)<line_sep>goodExpoG=numpy.exp(-((G-128)<power>2)/sigma)<line_sep>goodExpoB=numpy.exp(-((B-128)<power>2)/sigma)<line_sep>goodExpo=goodExpoR<times>goodExpoG<times>goodExpoB<line_sep>goodExpo=(numpy.round(goodExpo 2)<times>(2<power>8-1)).astype(numpy.uint8)<line_sep><return>goodExpo<block_end> |
# coding: utf-8
<import_from_future_stmt> unicode_literals<import_stmt>logging<import_stmt>os<import_stmt>tempfile<import_stmt>weakref<import_from_stmt>datetime datetime<import_from_stmt>xml.etree ElementTree<as>ETree<try_stmt><block_start><import_stmt>html<block_end><except_stmt>ImportError# Python 2.6-2.7
# noinspection PyUnresolvedReferences,PyUnresolvedReferences,PyCompatibility
<block_start><import_from_stmt>HTMLParser HTMLParser<line_sep>html=HTMLParser()<block_end><import_from_stmt>wxpy.api.chats Chat Group Member User<import_from_stmt>wxpy.compatible.utils force_encoded_string_output<import_from_stmt>wxpy.utils wrap_user_name repr_message<import_from_stmt>.article Article<import_from_stmt>..consts ATTACHMENT CARD FRIENDS MAP PICTURE RECORDING SHARING TEXT VIDEO<import_from_stmt>...compatible *<line_sep>logger=logging.getLogger(__name__)<class_stmt>Message(object)<block_start>"""
单条消息对象,包括:
* 来自好友、群聊、好友请求等聊天对象的消息
* 使用机器人账号在手机微信中发送的消息
| 但 **不包括** 代码中通过 .send/reply() 系列方法发出的消息
| 此类消息请参见 :class:`SentMessage`
"""<def_stmt>__init__ self raw bot<block_start>self.raw=raw<line_sep>self.bot=weakref.proxy(bot)<line_sep>self._receive_time=datetime.now()<line_sep># 将 msg.chat.send* 方法绑定到 msg.reply*,例如 msg.chat.send_img => msg.reply_img
<for_stmt>method '' '_image' '_file' '_video' '_msg' '_raw_msg'<block_start>setattr(self 'reply'+method getattr(self.chat 'send'+method))<block_end><block_end><def_stmt>__hash__ self<block_start><return>hash((Message self.id))<block_end>@force_encoded_string_output<def_stmt>__repr__ self<block_start><return>repr_message(self)<block_end><def_stmt>__unicode__ self<block_start><return>repr_message(self)<block_end># basic
@property<def_stmt>type self<block_start>"""
消息的类型,目前可为以下值::
# 文本
TEXT = 'Text'
# 位置
MAP = 'Map'
# 名片
CARD = 'Card'
# 提示
NOTE = 'Note'
# 分享
SHARING = 'Sharing'
# 图片
PICTURE = 'Picture'
# 语音
RECORDING = 'Recording'
# 文件
ATTACHMENT = 'Attachment'
# 视频
VIDEO = 'Video'
# 好友请求
FRIENDS = 'Friends'
# 系统
SYSTEM = 'System'
:rtype: str
"""<line_sep><return>self.raw.get('Type')<block_end>@property<def_stmt>id self<block_start>"""
消息的唯一 ID (通常为大于 0 的 64 位整型)
"""<line_sep><return>self.raw.get('NewMsgId')<block_end># content
@property<def_stmt>text self<block_start>"""
消息的文本内容
"""<line_sep>_type=self.type<line_sep>_card=self.card<if_stmt>_type<eq>MAP<block_start>location=self.location<if_stmt>location<block_start><return>location.get('label')<block_end><block_end><elif_stmt>_card<block_start><if_stmt>_type<eq>CARD<block_start><return>_card.name<block_end><elif_stmt>_type<eq>FRIENDS<block_start><return>_card.raw.get('Content')<block_end><block_end>ret=self.raw.get('Text')<if_stmt>isinstance(ret str)<block_start><return>ret<block_end><block_end><def_stmt>get_file self save_path=<none><block_start>"""
下载图片、视频、语音、附件消息中的文件内容。
可与 :any:`Message.file_name` 配合使用。
:param save_path: 文件的保存路径。若为 None,将直接返回字节数据
"""<line_sep>_text=self.raw.get('Text')<if_stmt>callable(_text)<and>self.type<in>(PICTURE RECORDING ATTACHMENT VIDEO)<block_start><return>_text(save_path)<block_end><else_stmt><block_start><raise>ValueError('download method not found, or invalid message type')<block_end><block_end>@property<def_stmt>file_name self<block_start>"""
消息中文件的文件名
"""<line_sep><return>self.raw.get('FileName')<block_end>@property<def_stmt>file_size self<block_start>"""
消息中文件的体积大小
"""<line_sep><return>self.raw.get('FileSize')<block_end>@property<def_stmt>media_id self<block_start>"""
文件类消息中的文件资源 ID (但图片视频语音等其他消息中为空)
"""<line_sep><return>self.raw.get('MediaId')<block_end># group
@property<def_stmt>is_at self<block_start>"""
当消息来自群聊,且被 @ 时,为 True
"""<line_sep><return>self.raw.get('IsAt')<or>self.raw.get('isAt')<block_end># misc
@property<def_stmt>img_height self<block_start>"""
图片高度
"""<line_sep><return>self.raw.get('ImgHeight')<block_end>@property<def_stmt>img_width self<block_start>"""
图片宽度
"""<line_sep><return>self.raw.get('ImgWidth')<block_end>@property<def_stmt>play_length self<block_start>"""
视频长度
"""<line_sep><return>self.raw.get('PlayLength')<block_end>@property<def_stmt>voice_length self<block_start>"""
语音长度
"""<line_sep><return>self.raw.get('VoiceLength')<block_end>@property<def_stmt>url self<block_start>"""
分享类消息中的网页 URL
"""<line_sep>_url=self.raw.get('Url')<if_stmt>isinstance(_url str)<block_start>_url=html.unescape(_url)<block_end><return>_url<block_end>@property<def_stmt>articles self<block_start>"""
公众号推送中的文章列表 (首篇的 标题/地址 与消息中的 text/url 相同)
其中,每篇文章均有以下属性:
* `title`: 标题
* `summary`: 摘要
* `url`: 文章 URL
* `cover`: 封面或缩略图 URL
"""<import_from_stmt>wxpy MP<if_stmt>self.type<eq>SHARING<and>isinstance(self.sender MP)<block_start>tree=ETree.fromstring(self.raw['Content'])<line_sep># noinspection SpellCheckingInspection
items=tree.findall('.//mmreader/category/item')<line_sep>article_list=list()<for_stmt>item items<block_start><def_stmt>find_text tag<block_start>found=item.find(tag)<if_stmt>found<is><not><none><block_start><return>found.text<block_end><block_end>article=Article()<line_sep>article.title=find_text('title')<line_sep>article.summary=find_text('digest')<line_sep>article.url=find_text('url')<line_sep>article.cover=find_text('cover')<line_sep>article_list.append(article)<block_end><return>article_list<block_end><block_end>@property<def_stmt>card self<block_start>"""
* 好友请求中的请求用户
* 名片消息中的推荐用户
"""<if_stmt>self.type<in>(CARD FRIENDS)<block_start><return>User(self.raw.get('RecommendInfo') self.bot)<block_end><block_end># time
@property<def_stmt>create_time self<block_start>"""
服务端发送时间
"""<line_sep># noinspection PyBroadException
<try_stmt><block_start><return>datetime.fromtimestamp(self.raw.get('CreateTime'))<block_end><except_stmt><block_start><pass><block_end><block_end>@property<def_stmt>receive_time self<block_start>"""
本地接收时间
"""<line_sep><return>self._receive_time<block_end>@property<def_stmt>latency self<block_start>"""
消息的延迟秒数 (发送时间和接收时间的差值)
"""<line_sep>create_time=self.create_time<if_stmt>create_time<block_start><return>(self.receive_time-create_time).total_seconds()<block_end><block_end>@property<def_stmt>location self<block_start>"""
位置消息中的地理位置信息
"""<try_stmt><block_start>ret=ETree.fromstring(self.raw['OriContent']).find('location').attrib<try_stmt><block_start>ret['x']=float(ret['x'])<line_sep>ret['y']=float(ret['y'])<line_sep>ret['scale']=int(ret['scale'])<line_sep>ret['maptype']=int(ret['maptype'])<block_end><except_stmt>(KeyError ValueError)<block_start><pass><block_end><return>ret<block_end><except_stmt>(TypeError KeyError ValueError ETree.ParseError)<block_start><pass><block_end><block_end># chats
@property<def_stmt>chat self<block_start>"""
消息所在的聊天会话,即:
* 对于自己发送的消息,为消息的接收者
* 对于别人发送的消息,为消息的发送者
:rtype: :class:`wxpy.User`, :class:`wxpy.Group`
"""<if_stmt>self.raw.get('FromUserName')<eq>self.bot.self.user_name<block_start><return>self.receiver<block_end><else_stmt><block_start><return>self.sender<block_end><block_end>@property<def_stmt>sender self<block_start>"""
消息的发送者
:rtype: :class:`wxpy.User`, :class:`wxpy.Group`
"""<line_sep><return>self._get_chat_by_user_name(self.raw.get('FromUserName'))<block_end>@property<def_stmt>receiver self<block_start>"""
消息的接收者
:rtype: :class:`wxpy.User`, :class:`wxpy.Group`
"""<line_sep><return>self._get_chat_by_user_name(self.raw.get('ToUserName'))<block_end>@property<def_stmt>member self<block_start>"""
* 若消息来自群聊,则此属性为消息的实际发送人(具体的群成员)
* 若消息来自其他聊天对象(非群聊),则此属性为 None
:rtype: NoneType, :class:`wxpy.Member`
"""<if_stmt>isinstance(self.chat Group)<block_start><if_stmt>self.sender<eq>self.bot.self<block_start><return>self.chat.self<block_end><else_stmt><block_start>actual_user_name=self.raw.get('ActualUserName')<for_stmt>_member self.chat.members<block_start><if_stmt>_member.user_name<eq>actual_user_name<block_start><return>_member<block_end><block_end><return>Member(dict(UserName=actual_user_name NickName=self.raw.get('ActualNickName')) self.chat)<block_end><block_end><block_end><def_stmt>_get_chat_by_user_name self user_name<block_start>"""
通过 user_name 找到对应的聊天对象
:param user_name: user_name
:return: 找到的对应聊天对象
"""<def_stmt>match_in_chats _chats<block_start><for_stmt>c _chats<block_start><if_stmt>c.user_name<eq>user_name<block_start><return>c<block_end><block_end><block_end>_chat=<none><if_stmt>user_name.startswith('@@')<block_start>_chat=match_in_chats(self.bot.groups())<block_end><elif_stmt>user_name<block_start>_chat=match_in_chats(self.bot.friends())<if_stmt><not>_chat<block_start>_chat=match_in_chats(self.bot.mps())<block_end><block_end><if_stmt><not>_chat<block_start>_chat=Chat(wrap_user_name(user_name) self.bot)<block_end><return>_chat<block_end><def_stmt>forward self chat prefix=<none> suffix=<none> raise_for_unsupported=<false><block_start>"""
将本消息转发给其他聊天对象
支持以下消息类型
* 文本 (`TEXT`)
* 视频(`VIDEO`)
* 文件 (`ATTACHMENT`)
* 图片/自定义表情 (`PICTURE`)
* 但不支持表情商店中的表情
* 名片 (`CARD`)
* 仅支持公众号名片,以及自己发出的个人号名片
* 分享 (`SHARING`)
* 会转化为 `标题 + 链接` 形式的文本消息
* 语音 (`RECORDING`)
* 会以文件方式发送
* 地图 (`MAP`)
* 会转化为 `位置名称 + 地图链接` 形式的文本消息
:param Chat chat: 接收转发消息的聊天对象
:param str prefix: 转发时增加的 **前缀** 文本,原消息为文本时会自动换行
:param str suffix: 转发时增加的 **后缀** 文本,原消息为文本时会自动换行
:param bool raise_for_unsupported:
| 为 True 时,将为不支持的消息类型抛出 `NotImplementedError` 异常
例如,将公司群中的老板消息转发出来::
from wxpy import *
bot = Bot()
# 定位公司群
company_group = ensure_one(bot.groups().search('公司微信群'))
# 定位老板
boss = ensure_one(company_group.search('老板大名'))
# 将老板的消息转发到文件传输助手
@bot.register(company_group)
def forward_boss_message(msg):
if msg.member == boss:
msg.forward(bot.file_helper, prefix='老板发言')
# 堵塞线程
embed()
"""<line_sep>logger.info('{}: forwarding to {}: {}'.format(self.bot chat self))<def_stmt>wrapped_send send_type *args **kwargs<block_start><if_stmt>send_type<eq>'msg'<block_start><if_stmt>args<block_start>text=args[0]<block_end><elif_stmt>kwargs<block_start>text=kwargs['msg']<block_end><else_stmt><block_start>text=self.text<block_end>ret=chat.send_msg('{}{}{}'.format(str(prefix)+'\n'<if>prefix<else>'' text '\n'+str(suffix)<if>suffix<else>'' ))<block_end><else_stmt><block_start><if_stmt>prefix<block_start>chat.send_msg(prefix)<block_end>ret=getattr(chat 'send_{}'.format(send_type))(*args **kwargs)<if_stmt>suffix<block_start>chat.send_msg(suffix)<block_end><block_end><return>ret<block_end><def_stmt>download_and_send <block_start>fd,path=tempfile.mkstemp(suffix='_{}'.format(self.file_name) dir=self.bot.temp_dir.name)<try_stmt><block_start>self.get_file(path)<if_stmt>self.type<eq>PICTURE<block_start><return>wrapped_send('image' path)<block_end><elif_stmt>self.type<eq>VIDEO<block_start><return>wrapped_send('video' path)<block_end><else_stmt><block_start><return>wrapped_send('file' path)<block_end><block_end><finally_stmt><block_start>os.close(fd)<block_end><block_end><def_stmt>raise_properly text<block_start>logger.warning(text)<if_stmt>raise_for_unsupported<block_start><raise>NotImplementedError(text)<block_end><block_end><if_stmt>self.type<eq>TEXT<block_start><return>wrapped_send('msg')<block_end><elif_stmt>self.type<eq>SHARING<block_start><return>wrapped_send('msg' '{}\n{}'.format(self.text self.url))<block_end><elif_stmt>self.type<eq>MAP<block_start><return>wrapped_send('msg' '{}: {}\n{}'.format(self.location['poiname'] self.location['label'] self.url))<block_end><elif_stmt>self.type<eq>ATTACHMENT# noinspection SpellCheckingInspection
<block_start>content="<appmsg appid='wxeb7ec651dd0aefa9' sdkver=''>"<concat>"<title>{file_name}</title><des></des><action></action>"<concat>"<type>6</type><content></content><url></url><lowurl></lowurl>"<concat>"<appattach><totallen>{file_size}</totallen><attachid>{media_id}</attachid>"<concat>"<fileext>{file_ext}</fileext></appattach><extinfo></extinfo></appmsg>"<line_sep>content=content.format(file_name=self.file_name file_size=self.file_size media_id=self.media_id file_ext=os.path.splitext(self.file_name)[1].replace('.' ''))<line_sep><return>wrapped_send(send_type='raw_msg' raw_type=self.raw['MsgType'] raw_content=content uri='/webwxsendappmsg?fun=async&f=json')<block_end><elif_stmt>self.type<eq>CARD<block_start><if_stmt>self.card.raw.get('AttrStatus')<and>self.sender<ne>self.bot.self# 为个人名片,且不为自己所发出
<block_start>raise_properly('Personal cards sent from others are unsupported:\n{}'.format(self))<block_end><else_stmt><block_start><return>wrapped_send(send_type='raw_msg' raw_type=self.raw['MsgType'] raw_content=self.raw['Content'] uri='/webwxsendmsg')<block_end><block_end><elif_stmt>self.type<eq>PICTURE<block_start><if_stmt>self.raw.get('HasProductId')# 来自表情商店的表情
<block_start>raise_properly('Stickers from store are unsupported:\n{}'.format(self))<block_end><else_stmt><block_start><return>download_and_send()<block_end><block_end><elif_stmt>self.type<eq>VIDEO<block_start><return>download_and_send()<block_end><elif_stmt>self.type<eq>RECORDING<block_start><return>download_and_send()<block_end><else_stmt><block_start>raise_properly('Unsupported message type:\n{}'.format(self))<block_end><block_end><block_end> |
"""ADUROLIGHT module for custom device handlers."""<line_sep> |
# Copyright 2021 The Feast Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>itertools<import_stmt>logging<import_from_stmt>datetime datetime<import_from_stmt>multiprocessing.pool ThreadPool<import_from_stmt>queue Empty Queue<import_from_stmt>threading Lock Thread<import_from_stmt>typing Any Callable Dict Iterator List Optional Sequence Tuple<import_from_stmt>pydantic PositiveInt StrictStr<import_from_stmt>pydantic.typing Literal<import_from_stmt>feast Entity utils<import_from_stmt>feast.errors FeastProviderLoginError<import_from_stmt>feast.feature_view FeatureView<import_from_stmt>feast.infra.infra_object DATASTORE_INFRA_OBJECT_CLASS_TYPE InfraObject<import_from_stmt>feast.infra.online_stores.helpers compute_entity_id<import_from_stmt>feast.infra.online_stores.online_store OnlineStore<import_from_stmt>feast.protos.feast.core.DatastoreTable_pb2 DatastoreTable<as>DatastoreTableProto <import_from_stmt>feast.protos.feast.core.InfraObject_pb2 InfraObject<as>InfraObjectProto<import_from_stmt>feast.protos.feast.types.EntityKey_pb2 EntityKey<as>EntityKeyProto<import_from_stmt>feast.protos.feast.types.Value_pb2 Value<as>ValueProto<import_from_stmt>feast.repo_config FeastConfigBaseModel RepoConfig<import_from_stmt>feast.usage log_exceptions_and_usage tracing_span<line_sep>LOGGER=logging.getLogger(__name__)<try_stmt><block_start><import_from_stmt>google.auth.exceptions DefaultCredentialsError<import_from_stmt>google.cloud datastore<import_from_stmt>google.cloud.datastore.client Key<block_end><except_stmt>ImportError<as>e<block_start><import_from_stmt>feast.errors FeastExtrasDependencyImportError<line_sep><raise>FeastExtrasDependencyImportError("gcp" str(e))<block_end>ProtoBatch=Sequence[Tuple[EntityKeyProto Dict[str ValueProto] datetime Optional[datetime]]]<class_stmt>DatastoreOnlineStoreConfig(FeastConfigBaseModel)<block_start>"""Online store config for GCP Datastore"""<line_sep>type:Literal["datastore"]="datastore"<line_sep>""" Online store type selector"""<line_sep>project_id:Optional[StrictStr]=<none><line_sep>""" (optional) GCP Project Id """<line_sep>namespace:Optional[StrictStr]=<none><line_sep>""" (optional) Datastore namespace """<line_sep>write_concurrency:Optional[PositiveInt]=40<line_sep>""" (optional) Amount of threads to use when writing batches of feature rows into Datastore"""<line_sep>write_batch_size:Optional[PositiveInt]=50<line_sep>""" (optional) Amount of feature rows per batch being written into Datastore"""<block_end><class_stmt>DatastoreOnlineStore(OnlineStore)<block_start>"""
OnlineStore is an object used for all interaction between Feast and the service used for offline storage of
features.
"""<line_sep>_client:Optional[datastore.Client]=<none><line_sep>@log_exceptions_and_usage(online_store="datastore")<def_stmt>update self config:RepoConfig tables_to_delete:Sequence[FeatureView] tables_to_keep:Sequence[FeatureView] entities_to_delete:Sequence[Entity] entities_to_keep:Sequence[Entity] partial:bool <block_start>online_config=config.online_store<assert_stmt>isinstance(online_config DatastoreOnlineStoreConfig)<line_sep>client=self._get_client(online_config)<line_sep>feast_project=config.project<for_stmt>table tables_to_keep<block_start>key=client.key("Project" feast_project "Table" table.name)<line_sep>entity=datastore.Entity(key=key exclude_from_indexes=("created_ts" "event_ts" "values"))<line_sep>entity.update({"created_ts":datetime.utcnow()})<line_sep>client.put(entity)<block_end><for_stmt>table tables_to_delete<block_start>_delete_all_values(client client.key("Project" feast_project "Table" table.name))<line_sep># Delete the table metadata datastore entity
key=client.key("Project" feast_project "Table" table.name)<line_sep>client.delete(key)<block_end><block_end><def_stmt>teardown self config:RepoConfig tables:Sequence[FeatureView] entities:Sequence[Entity] <block_start>online_config=config.online_store<assert_stmt>isinstance(online_config DatastoreOnlineStoreConfig)<line_sep>client=self._get_client(online_config)<line_sep>feast_project=config.project<for_stmt>table tables<block_start>_delete_all_values(client client.key("Project" feast_project "Table" table.name))<line_sep># Delete the table metadata datastore entity
key=client.key("Project" feast_project "Table" table.name)<line_sep>client.delete(key)<block_end><block_end><def_stmt>_get_client self online_config:DatastoreOnlineStoreConfig<block_start><if_stmt><not>self._client<block_start>self._client=_initialize_client(online_config.project_id online_config.namespace)<block_end><return>self._client<block_end>@log_exceptions_and_usage(online_store="datastore")<def_stmt>online_write_batch self config:RepoConfig table:FeatureView data:List[Tuple[EntityKeyProto Dict[str ValueProto] datetime Optional[datetime]]] progress:Optional[Callable[[int] Any]] <arrow><none><block_start>online_config=config.online_store<assert_stmt>isinstance(online_config DatastoreOnlineStoreConfig)<line_sep>client=self._get_client(online_config)<line_sep>write_concurrency=online_config.write_concurrency<line_sep>write_batch_size=online_config.write_batch_size<line_sep>feast_project=config.project<with_stmt>ThreadPool(processes=write_concurrency)<as>pool<block_start>pool.map(<lambda>b:self._write_minibatch(client feast_project table b progress) self._to_minibatches(data batch_size=write_batch_size) )<block_end><block_end>@staticmethod<def_stmt>_to_minibatches data:ProtoBatch batch_size<arrow>Iterator[ProtoBatch]<block_start>"""
Split data into minibatches, making sure we stay under GCP datastore transaction size
limits.
"""<line_sep>iterable=iter(data)<while_stmt><true><block_start>batch=list(itertools.islice(iterable batch_size))<if_stmt>len(batch)<g>0<block_start><yield>batch<block_end><else_stmt><block_start><break><block_end><block_end><block_end>@staticmethod<def_stmt>_write_minibatch client project:str table:FeatureView data:Sequence[Tuple[EntityKeyProto Dict[str ValueProto] datetime Optional[datetime]]] progress:Optional[Callable[[int] Any]] <block_start>entities=[]<for_stmt>entity_key,features,timestamp,created_ts data<block_start>document_id=compute_entity_id(entity_key)<line_sep>key=client.key("Project" project "Table" table.name "Row" document_id )<line_sep>entity=datastore.Entity(key=key exclude_from_indexes=("created_ts" "event_ts" "values"))<line_sep>content_entity=datastore.Entity(exclude_from_indexes=tuple(features.keys()))<for_stmt>k,v features.items()<block_start>content_entity[k]=v.SerializeToString()<block_end>entity["key"]=entity_key.SerializeToString()<line_sep>entity["values"]=content_entity<line_sep>entity["event_ts"]=utils.make_tzaware(timestamp)<line_sep>entity["created_ts"]=(utils.make_tzaware(created_ts)<if>created_ts<is><not><none><else><none>)<line_sep>entities.append(entity)<block_end><with_stmt>client.transaction()<block_start>client.put_multi(entities)<block_end><if_stmt>progress<block_start>progress(len(entities))<block_end><block_end>@log_exceptions_and_usage(online_store="datastore")<def_stmt>online_read self config:RepoConfig table:FeatureView entity_keys:List[EntityKeyProto] requested_features:Optional[List[str]]=<none> <arrow>List[Tuple[Optional[datetime] Optional[Dict[str ValueProto]]]]<block_start>online_config=config.online_store<assert_stmt>isinstance(online_config DatastoreOnlineStoreConfig)<line_sep>client=self._get_client(online_config)<line_sep>feast_project=config.project<line_sep>keys:List[Key]=[]<line_sep>result:List[Tuple[Optional[datetime] Optional[Dict[str ValueProto]]]]=[]<for_stmt>entity_key entity_keys<block_start>document_id=compute_entity_id(entity_key)<line_sep>key=client.key("Project" feast_project "Table" table.name "Row" document_id)<line_sep>keys.append(key)<block_end># NOTE: get_multi doesn't return values in the same order as the keys in the request.
# Also, len(values) can be less than len(keys) in the case of missing values.
<with_stmt>tracing_span(name="remote_call")<block_start>values=client.get_multi(keys)<block_end>values_dict={v.key:v<for>v values}<if>values<is><not><none><else>{}<for_stmt>key keys<block_start><if_stmt>key<in>values_dict<block_start>value=values_dict[key]<line_sep>res={}<for_stmt>feature_name,value_bin value["values"].items()<block_start>val=ValueProto()<line_sep>val.ParseFromString(value_bin)<line_sep>res[feature_name]=val<block_end>result.append((value["event_ts"] res))<block_end><else_stmt><block_start>result.append((<none> <none>))<block_end><block_end><return>result<block_end><block_end><def_stmt>_delete_all_values client key<block_start>"""
Delete all data under the key path in datastore.
Creates and uses a queue of lists of entity keys, which are batch deleted
by multiple threads.
"""<class_stmt>AtomicCounter(object)# for tracking how many deletions have already occurred; not used outside this method
<block_start><def_stmt>__init__ self<block_start>self.value=0<line_sep>self.lock=Lock()<block_end><def_stmt>increment self<block_start><with_stmt>self.lock<block_start>self.value<augadd>1<block_end><block_end><block_end>BATCH_SIZE=500# Dec 2021: delete_multi has a max size of 500: https://cloud.google.com/datastore/docs/concepts/limits
NUM_THREADS=3<line_sep>deletion_queue=Queue()<line_sep>status_info_counter=AtomicCounter()<def_stmt>worker shared_counter<block_start><while_stmt><true><block_start><try_stmt><block_start>job=deletion_queue.get(block=<false>)<block_end><except_stmt>Empty<block_start><return><block_end>client.delete_multi(job)<line_sep>shared_counter.increment()<line_sep>LOGGER.debug(f"batch deletions completed: {shared_counter.value} ({shared_counter.value<times>BATCH_SIZE} total entries) & outstanding queue size: {deletion_queue.qsize()}")<line_sep>deletion_queue.task_done()<block_end><block_end>query=client.query(kind="Row" ancestor=key)<for_stmt>page query.fetch().pages<block_start>deletion_queue.put([entity.key<for>entity page])<block_end><for_stmt>_ range(NUM_THREADS)<block_start>Thread(target=worker args=(status_info_counter )).start()<block_end>deletion_queue.join()<block_end><def_stmt>_initialize_client project_id:Optional[str] namespace:Optional[str]<arrow>datastore.Client<block_start><try_stmt><block_start>client=datastore.Client(project=project_id namespace=namespace )<line_sep><return>client<block_end><except_stmt>DefaultCredentialsError<as>e<block_start><raise>FeastProviderLoginError(str(e)+'\nIt may be necessary to run "gcloud auth application-default login" if you would like to use your '<concat>"local Google Cloud account ")<block_end><block_end><class_stmt>DatastoreTable(InfraObject)<block_start>"""
A Datastore table managed by Feast.
Attributes:
project: The Feast project of the table.
name: The name of the table.
project_id (optional): The GCP project id.
namespace (optional): Datastore namespace.
"""<line_sep>project:str<line_sep>project_id:Optional[str]<line_sep>namespace:Optional[str]<def_stmt>__init__ self project:str name:str project_id:Optional[str]=<none> namespace:Optional[str]=<none> <block_start>super().__init__(name)<line_sep>self.project=project<line_sep>self.project_id=project_id<line_sep>self.namespace=namespace<block_end><def_stmt>to_infra_object_proto self<arrow>InfraObjectProto<block_start>datastore_table_proto=self.to_proto()<line_sep><return>InfraObjectProto(infra_object_class_type=DATASTORE_INFRA_OBJECT_CLASS_TYPE datastore_table=datastore_table_proto )<block_end><def_stmt>to_proto self<arrow>Any<block_start>datastore_table_proto=DatastoreTableProto()<line_sep>datastore_table_proto.project=self.project<line_sep>datastore_table_proto.name=self.name<if_stmt>self.project_id<block_start>datastore_table_proto.project_id.value=self.project_id<block_end><if_stmt>self.namespace<block_start>datastore_table_proto.namespace.value=self.namespace<block_end><return>datastore_table_proto<block_end>@staticmethod<def_stmt>from_infra_object_proto infra_object_proto:InfraObjectProto<arrow>Any<block_start>datastore_table=DatastoreTable(project=infra_object_proto.datastore_table.project name=infra_object_proto.datastore_table.name )<line_sep># Distinguish between null and empty string, since project_id and namespace are StringValues.
<if_stmt>infra_object_proto.datastore_table.HasField("project_id")<block_start>datastore_table.project_id=(infra_object_proto.datastore_table.project_id.value)<block_end><if_stmt>infra_object_proto.datastore_table.HasField("namespace")<block_start>datastore_table.namespace=(infra_object_proto.datastore_table.namespace.value)<block_end><return>datastore_table<block_end>@staticmethod<def_stmt>from_proto datastore_table_proto:DatastoreTableProto<arrow>Any<block_start>datastore_table=DatastoreTable(project=datastore_table_proto.project name=datastore_table_proto.name )<line_sep># Distinguish between null and empty string, since project_id and namespace are StringValues.
<if_stmt>datastore_table_proto.HasField("project_id")<block_start>datastore_table.project_id=datastore_table_proto.project_id.value<block_end><if_stmt>datastore_table_proto.HasField("namespace")<block_start>datastore_table.namespace=datastore_table_proto.namespace.value<block_end><return>datastore_table<block_end><def_stmt>update self<block_start>client=_initialize_client(self.project_id self.namespace)<line_sep>key=client.key("Project" self.project "Table" self.name)<line_sep>entity=datastore.Entity(key=key exclude_from_indexes=("created_ts" "event_ts" "values"))<line_sep>entity.update({"created_ts":datetime.utcnow()})<line_sep>client.put(entity)<block_end><def_stmt>teardown self<block_start>client=_initialize_client(self.project_id self.namespace)<line_sep>key=client.key("Project" self.project "Table" self.name)<line_sep>_delete_all_values(client key)<line_sep># Delete the table metadata datastore entity
client.delete(key)<block_end><block_end> |
"""
Module description:
"""<line_sep>__version__='0.3.1'<line_sep>__author__='<NAME>, <NAME>'<line_sep>__email__='<EMAIL>, <EMAIL>'<import_from_stmt>types SimpleNamespace<import_stmt>typing<as>t<import_stmt>numpy<as>np<import_stmt>logging<as>pylog<import_from_stmt>elliot.utils logging<import_from_stmt>hyperopt STATUS_OK<class_stmt>ModelCoordinator(object)<block_start>"""
This class handles the selection of hyperparameters for the hyperparameter tuning realized with HyperOpt.
"""<def_stmt>__init__ self data_objs base:SimpleNamespace params model_class:t.ClassVar test_fold_index:int<block_start>"""
The constructor creates a Placeholder of the recommender model.
:param base: a SimpleNamespace that contains the configuration (main level) options
:param params: a SimpleNamespace that contains the hyper-parameters of the model
:param model_class: the class of the recommendation model
"""<line_sep>self.logger=logging.get_logger(self.__class__.__name__ pylog.CRITICAL<if>base.config_test<else>pylog.DEBUG)<line_sep>self.data_objs=data_objs<line_sep>self.base=base<line_sep>self.params=params<line_sep>self.model_class=model_class<line_sep>self.test_fold_index=test_fold_index<line_sep>self.model_config_index=0<block_end><def_stmt>objective self args<block_start>"""
This function respect the signature, and the return format required for HyperOpt optimization
:param args: a Dictionary that contains the new hyper-parameter values that will be used in the current run
:return: it returns a Dictionary with loss, and status being required by HyperOpt,
and params, and results being required by the framework
"""<line_sep>sampled_namespace=SimpleNamespace(**args)<line_sep>model_params=SimpleNamespace(**self.params[0].__dict__)<line_sep>self.logger.info("Hyperparameter tuning exploration:")<for_stmt>(k v) sampled_namespace.__dict__.items()<block_start>model_params.__setattr__(k v)<line_sep>self.logger.info(f"{k} set to {model_params.__getattribute__(k)}")<block_end>losses=[]<line_sep>results=[]<for_stmt>trainval_index,data_obj enumerate(self.data_objs)<block_start>self.logger.info(f"Exploration: Hyperparameter exploration number {self.model_config_index+1}")<line_sep>self.logger.info(f"Exploration: Test Fold exploration number {self.test_fold_index+1}")<line_sep>self.logger.info(f"Exploration: Train-Validation Fold exploration number {trainval_index+1}")<line_sep>model=self.model_class(data=data_obj config=self.base params=model_params)<line_sep>model.train()<line_sep>losses.append(model.get_loss())<line_sep>results.append(model.get_results())<block_end>self.model_config_index<augadd>1<line_sep>loss=np.average(losses)<line_sep>results=self._average_results(results)<line_sep><return>{'loss':loss 'status':STATUS_OK 'params':model.get_params() 'val_results':{k:result_dict["val_results"]<for>k,result_dict results.items()} 'val_statistical_results':{k:result_dict["val_statistical_results"]<for>k,result_dict model.get_results().items()} 'test_results':{k:result_dict["test_results"]<for>k,result_dict results.items()} 'test_statistical_results':{k:result_dict["test_statistical_results"]<for>k,result_dict model.get_results().items()} 'name':model.name}<block_end><def_stmt>single self<block_start>"""
This function respect the signature, and the return format required for HyperOpt optimization
:param args: a Dictionary that contains the new hyper-parameter values that will be used in the current run
:return: it returns a Dictionary with loss, and status being required by HyperOpt,
and params, and results being required by the framework
"""<line_sep>self.logger.info("Hyperparameters:")<for_stmt>k,v self.params.__dict__.items()<block_start>self.logger.info(f"{k} set to {v}")<block_end>losses=[]<line_sep>results=[]<for_stmt>trainval_index,data_obj enumerate(self.data_objs)<block_start>self.logger.info(f"Exploration: Test Fold exploration number {self.test_fold_index+1}")<line_sep>self.logger.info(f"Exploration: Train-Validation Fold exploration number {trainval_index+1}")<line_sep>model=self.model_class(data=data_obj config=self.base params=self.params)<line_sep>model.train()<line_sep>losses.append(model.get_loss())<line_sep>results.append(model.get_results())<block_end>loss=np.average(losses)<line_sep>results=self._average_results(results)<line_sep><return>{'loss':loss 'status':STATUS_OK 'params':model.get_params() 'val_results':{k:result_dict["val_results"]<for>k,result_dict results.items()} 'val_statistical_results':{k:result_dict["val_statistical_results"]<for>k,result_dict model.get_results().items()} 'test_results':{k:result_dict["test_results"]<for>k,result_dict results.items()} 'test_statistical_results':{k:result_dict["test_statistical_results"]<for>k,result_dict model.get_results().items()} 'name':model.name}<block_end>@staticmethod<def_stmt>_average_results results_list<block_start>ks=list(results_list[0].keys())<line_sep>eval_result_types=["val_results" "test_results"]<line_sep>metrics=list(results_list[0][ks[0]]["val_results"].keys())<line_sep><return>{k:{type_:{metric:np.average([fold_result[k][type_][metric]<for>fold_result results_list])<for>metric metrics}<for>type_ eval_result_types}<for>k ks}<block_end><block_end> |
"""Error definitions for SCMClient implementations."""<import_from_future_stmt> unicode_literals<class_stmt>SCMError(Exception)<block_start>"""A generic error from an SCM."""<block_end><class_stmt>AuthenticationError(Exception)<block_start>"""An error for when authentication fails."""<block_end><class_stmt>CreateCommitError(Exception)<block_start>"""The creation of a commit has failed or was aborted."""<block_end><class_stmt>MergeError(Exception)<block_start>"""An error for when merging two branches fails."""<block_end><class_stmt>PushError(Exception)<block_start>"""An error for when pushing a branch to upstream fails."""<block_end><class_stmt>AmendError(Exception)<block_start>"""An error for when amending a commit fails."""<block_end><class_stmt>OptionsCheckError(Exception)<block_start>"""An error for when command line options are used incorrectly."""<block_end><class_stmt>InvalidRevisionSpecError(Exception)<block_start>"""An error for when the specified revisions are invalid."""<block_end><class_stmt>MinimumVersionError(Exception)<block_start>"""An error for when software doesn't meet version requirements."""<block_end><class_stmt>TooManyRevisionsError(InvalidRevisionSpecError)<block_start>"""An error for when too many revisions were specified."""<def_stmt>__init__ self<block_start>"""Initialize the error."""<line_sep>super(TooManyRevisionsError self).__init__('Too many revisions specified')<block_end><block_end><class_stmt>EmptyChangeError(Exception)<block_start>"""An error for when there are no changed files."""<def_stmt>__init__ self<block_start>"""Initialize the error."""<line_sep>super(EmptyChangeError self).__init__("Couldn't find any affected files for this change.")<block_end><block_end> |
<import_from_stmt>factory fuzzy<import_from_stmt>factory.django DjangoModelFactory<import_from_stmt>tidings.models Watch<class_stmt>WatchFactory(DjangoModelFactory)<block_start><class_stmt>Meta<block_start>model=Watch<block_end>event_type="fooevent"<line_sep>is_active=<true><line_sep>secret=fuzzy.FuzzyText(length=10)<block_end> |
#
# This file is part of GreatFET
#
<import_from_future_stmt> print_function<import_stmt>sys<import_from_stmt>warnings warn<import_from_stmt>..interface GreatFETInterface<import_from_stmt>..support.bits bits<import_from_stmt>..protocol.jtag_svf SVFParser SVFEventHandler<class_stmt>JTAGPatternError(IOError)<block_start>""" Class for errors that come from a JTAG read not matching the expected response. """<def_stmt>__init__ self message result<block_start>self.result=result<line_sep>super(JTAGPatternError self).__init__(message)<block_end><block_end># FIXME: should this be an instance of a 'target' class?
<class_stmt>JTAGDevice(GreatFETInterface)<block_start>""" Class representing a single device on a JTAG scan chain. """<line_sep>DESCRIPTION="no description available"<line_sep># A list of supported IDCODEs for the relevant class.
# Used unless the supports_idcode() method is overridden.
SUPPORTED_IDCODES=[]<line_sep># A list of any GreatFET subcommands that are useful for driving this target;
# for informational use.
SUPPORTED_CONSOLE_COMMANDS=[]<line_sep>@classmethod<def_stmt>from_idcode cls idcode position_in_chain=0<block_start>""" Attempts to create a JTAGDevice object that fits the provided IDCODE. """<line_sep># Assume the generic device class is the most appropriate class for the device, initially.
most_appropriate_class=cls<line_sep># Search each imported subclass for the
<for_stmt>subclass cls.__subclasses__()<block_start><if_stmt>subclass.supports_idcode(idcode)<block_start>most_appropriate_class=subclass<line_sep><break><block_end><block_end># Finally, create an instance of the most appropriate class for this object.
instance=object.__new__(most_appropriate_class)<line_sep>most_appropriate_class.__init__(instance idcode position_in_chain)<line_sep><return>instance<block_end>@classmethod<def_stmt>supports_idcode cls idcode<block_start>"""
Returns true iff this class supports the given IDCODE.
This default implementation uses SUPPORTED_IDCODES, but subclasses can override this
for more nuanced behavior.
"""<line_sep><return>idcode<in>cls.SUPPORTED_IDCODES<block_end>@classmethod<def_stmt>supported_console_commands cls<block_start>""" Returns a list of GreatFET subcommands that provide access to the given class. """<line_sep><return>cls.SUPPORTED_CONSOLE_COMMANDS<block_end><def_stmt>idcode self<block_start>""" Returns this device's IDCODE. """<line_sep><return>self._idcode<block_end><def_stmt>description self<block_start>""" Returns a short description of the device. """<line_sep><return>self.DESCRIPTION<block_end><def_stmt>__init__ self idcode position_in_chain<block_start>self._idcode=idcode<block_end><block_end><class_stmt>JTAGChain(GreatFETInterface)<block_start>""" Class representing a JTAG scan-chain interface. """<line_sep># Short name for this type of interface.
INTERFACE_SHORT_NAME="jtag"<line_sep>#
# Simple mapping that captures the various TAP FSM states.
# Names from the JTAG SVF specification are used directly, so we can easily parse SVF files.
#
STATE_PROGRESSIONS={'RESET':{0:'IDLE' 1:'RESET'} 'IDLE':{0:'IDLE' 1:'DRSELECT'} # Data register path.
'DRSELECT':{0:'DRCAPTURE' 1:'IRSELECT'} 'DRCAPTURE':{0:'DRSHIFT' 1:'DREXIT1'} 'DRSHIFT':{0:'SHIFT_DR' 1:'DREXIT1'} 'DREXIT1':{0:'DRPAUSE' 1:'DRUPDATE'} 'DRPAUSE':{0:'DRPAUSE' 1:'DREXIT2'} 'DREXIT2':{0:'DRSHIFT' 1:'DRUPDATE'} 'DRUPDATE':{0:'IDLE' 1:'DRSELECT'} # Instruction register path.
'IRSELECT':{0:'IRCAPTURE' 1:'RESET'} 'IRCAPTURE':{0:'IRSHIFT' 1:'IREXIT1'} 'IRSHIFT':{0:'IRSHIFT' 1:'IREXIT1'} 'IREXIT1':{0:'IRPAUSE' 1:'IRUPDATE'} 'IRPAUSE':{0:'IRPAUSE' 1:'IREXIT2'} 'IREXIT2':{0:'IRSHIFT' 1:'IRUPDATE'} 'IRUPDATE':{0:'IDLE' 1:'DRSELECT'} }<def_stmt>__init__ self board max_frequency=405e3<block_start>""" Creates a new JTAG scan-chain interface.
Paramters:
board -- the GreatFET board we're working with.
max_frequency -- the maximum frequency we should attempt scan out data with
"""<line_sep># Grab our JTAG API object.
self.api=board.apis.jtag<line_sep># Assume we're starting our chain in 'IDLE'.
self.state='IDLE'<line_sep># Configure our chain to run at the relevant frequency.
self.frequency=int(max_frequency)<line_sep>self.max_bits_per_scan=self.api.configure(self.frequency)<block_end><def_stmt>set_frequency self max_frequency<block_start>""" Sets the operating frequency of future transactions on this JTAG chain. """<line_sep>self.frequency=int(max_frequency)<line_sep>self.api.configure(self.frequency)<block_end><def_stmt>_progress_state self tms_value<block_start>""" Adjusts our internal model of the TAP FSM to account for an applied TMS value. """<line_sep># Normalize our state to always be 1 or 0.
tms_value=1<if>tms_value<else>0<line_sep># Move our state to the next state per our TAP FSM.
self.state=self.STATE_PROGRESSIONS[self.state][tms_value]<block_end><def_stmt>pulse_tms self cycles=1 asserted=<true><block_start>""" Asserts or de-asserts TMS for the given number of cycles; used for navigating the TAP FSM. """<line_sep># Run the clock for a single cycle, with TMS asserted each time.
<for_stmt>_ range(cycles)<block_start>self.api.run_clock(1 asserted)<line_sep>self._progress_state(asserted)<block_end><block_end><def_stmt>initialize_chain self<block_start>""" Put the scan chain into its initial state, allowing fresh JTAG communications. """<line_sep># Pulse the TMS line five times -- this brings us into the TEST_RESET state, which resets the test logic.
self.pulse_tms(5)<line_sep># We now should know that we're in the RESET state.
<assert_stmt>(self.state<eq>'RESET')<block_end><def_stmt>_receive_data self bits_to_scan advance_state=<false><block_start>""" Performs a raw scan-in of data, and returns the result. """<line_sep># Perform our actual data scan-in.
# TODO: break larger-than-maximum transactions into smaller ones.
result=self.api.scan_in(bits_to_scan advance_state)<line_sep># Once we're complete, advance our state, if necessary.
<if_stmt>advance_state<block_start>self._progress_state(<true>)<block_end><return>result<block_end><def_stmt>_pad_data_to_length self length_in_bits data=<none><block_start>""" Pads a given data set to a given length, in bits. """<line_sep># Compute how many bytes we need the data to be.
target_length_bytes=(length_in_bits+7)<floordiv>8<line_sep># If our data doesn't need padding, return it directly.
<if_stmt>data<and>(len(data)<ge>target_length_bytes)<block_start><return>data<block_end># Create a mutable array of data; and add any data we have.
padded=bytearray()<if_stmt>data<block_start>padded.extend(data)<block_end># Figure out how much padding we need.
padding_necessary=target_length_bytes-len(padded)<line_sep>padded.extend("b\0"<times>padding_necessary)<line_sep># Return our padded data.
<return>padded<block_end><def_stmt>_transmit_data self bits_to_scan data=<none> advance_state=<false><block_start>""" Performs a raw scan-out of data, discarding any result. """<line_sep># Pad our data to the relevant length.
data=self._pad_data_to_length(bits_to_scan)<line_sep># Perform our actual data scan-in.
# TODO: break larger-than-maximum transactions into smaller ones.
self.api.scan_out(bits_to_scan advance_state data)<line_sep># Once we're complete, advance our state, if necessary.
<if_stmt>advance_state<block_start>self._progress_state(<true>)<block_end><block_end><def_stmt>_scan_data self bits_to_scan byte_data advance_state=<false><block_start>""" Performs a raw scan-in of data, and returns the result. """<line_sep># Perform our actual data scan-in.
# TODO: break larger-than-maximum transactions into smaller ones.
result=self.api.scan(bits_to_scan advance_state byte_data)<line_sep># Once we're complete, advance our state, if necessary.
<if_stmt>advance_state<block_start>self._progress_state(<true>)<block_end><return>result<block_end><def_stmt>_next_hop_towards self state<block_start>""" Identify the next TMS value we should apply to move towards the given state. """<line_sep># Special case: if we're headed to RESET, then our next hop is always 1.
<if_stmt>state<eq>'RESET'<block_start><return>1<block_end># Special case: if we're in the Select-DR state, we'll steer either towards the instruction column ('1')
# or data column ('0') based on the target state.
<if_stmt>self.state<eq>'DRSELECT'<block_start><return>1<if>'IR'<in>state<else>0<block_end># Grab the next states for TMS values of one and zero.
next_states=self.STATE_PROGRESSIONS[self.state]<line_sep># We'll apply a simple heuristic to advance through the TAP FSM.
# First, we'll identify it providing a '1' would cause us to loop back towards the current state,
# which will occur if we'd stay in the same state with a '1', or if we'd move out of the core FSM.
towards_one_would_loop=(next_states[1]<eq>self.state)<or>(next_states[1]<eq>'RESET')<line_sep># Next, we'll apply the following simple heuristics:
# - If pulsing clock with TMS=0 would land us in the right state, do so.
# - If pulsing clock with TMS=1 would cause us to self, loop, pulse clock with TMS=0.
# - Otherwise, pulse clock with TMS=1, as TMS=1 generally moves us through the TAP FSM.
target_state_is_towards_zero=(next_states[0]<eq>state)<line_sep><return>0<if>(target_state_is_towards_zero<or>towards_one_would_loop)<else>1<block_end><def_stmt>_ensure_in_state self state<block_start>"""
Ensures the JTAG TAP FSM is in the given state.
If we're not; progresses the TAP FSM by pulsing TMS until we reach the relevant state.
"""<line_sep># Progress through the TAP FSM until we're in the right state.
<while_stmt>self.state<ne>state# Identify the direction we'll need to move in order to move closer to our target state...
<block_start>next_hop=self._next_hop_towards(state)<line_sep># ... and apply it.
self.pulse_tms(asserted=next_hop)<block_end><block_end><def_stmt>move_to_state self state_name<block_start>""" Moves the JTAG scan chain to the relevant state.
Parameters:
state_name: The target state to wind up in, as a string. States are accepted in the format
defined in the JTAG SVF standard, and thus should be one of:
"RESET", "IDLE", "DRSELECT", "DRCAPTURE", "DRSHIFT", "DREXIT1", "DRPAUSE",
"DREXIT2", "DRUPDATE", "IRSELECT", "IRCAPTURE", "IRSHIFT", "IREXIT1", "IRPAUSE",
"IREXIT2", "IRUPDATE"
"""<line_sep>self._ensure_in_state(state_name.strip())<block_end><def_stmt>_shift_while_in_state self state tdi=<none> length=<none> ignore_response=<false> advance_state=<false> byteorder='big'<block_start>""" Shifts data through the chain while in the given state. """<line_sep># Normalize our data into a bitstring type that we can easily work with.
# This both ensures we have a known format; and implicitly handles things like padding.
<if_stmt>tdi<block_start>data_bits=bits(tdi length byteorder=byteorder)<line_sep># Convert from our raw data to the format we'll need to send down to the device.
bit_length=len(data_bits)<line_sep>data_bytes=data_bits.to_bytes(byteorder='big')<block_end><else_stmt><block_start><if_stmt>length<is><none><block_start><raise>ValueError("either TDI or length must be provided!")<block_end>bit_length=length<block_end># Move into our shift-DR state.
self._ensure_in_state(state)<line_sep># Finally, issue the transaction itself.
<if_stmt>tdi<and>ignore_response<block_start>self._transmit_data(bit_length data_bytes advance_state)<line_sep><return><none><block_end><elif_stmt>tdi<block_start>result=self._scan_data(bit_length data_bytes advance_state)<block_end><else_stmt><block_start>result=self._receive_data(bit_length advance_state)<block_end># Return our data, converted back up to bits.
<return>bits(result bit_length)<block_end><def_stmt>_validate_response self response_bits tdo=<none> mask=<none><block_start>""" Validates the response provided by a _shift_while_in_state call, in the traditional JTAG SVF form. """<line_sep># If we don't have any data to validate against, vacuously succeed.
<if_stmt>(<not>tdo)<or>(<not>response_bits)<block_start><return><block_end># If we have a mask, mask both the TDO value and response, and then compare.
masked_response=mask&response_bits<if>mask<else>response_bits<line_sep>masked_tdo=mask&tdo<if>mask<else>tdo<if_stmt>masked_response<ne>masked_tdo<block_start><raise>JTAGPatternError("Scan result did not match expected pattern: {} != {} (expected)!".format(masked_response masked_tdo) response_bits)<block_end><block_end><def_stmt>shift_data self tdi=<none> length=<none> tdo=<none> mask=<none> ignore_response=<false> advance_state=<false> byteorder='big'<block_start>""" Shifts data through the scan-chain's data register.
Parameters:
tdi -- The bits to be scanned out via TDI. Can be a support.bits() object, a string of 1's and 0's,
an integer, or bytes. If this is an integer or bytes object, the length argument must be provided.
If omitted or None, a string of all zeroes will be used,
length -- The length of the transaction to be performed, in bits. This can be longer than the TDI data;
in which case the transmission will be padded with zeroes.
tdo -- The expected data to be received from the scan operation. If this is provided, the read result
will be compared to this data (optionally masked by mask), and an exception will be thrown if
the data doesn't match this value. Designed to behave like the SVF TDO field.
mask -- If provided, the given tdo argument will be masked, such that only bits corresponding to a '1'
in this mask argument are considered when checking against 'tdo'. This is the behavior defiend
in the SVF standard; see it for more information.
ignore_response -- If provided; the returned response will always be empty, and tdo and mask will be ignored.
This allows for slight a performance optimization, as we don't have to shuttle data back.
byteorder -- The byteorder to consider the tdi value in; if bytes are provided.
Returns the bits read, or None if the response is ignored.
"""<line_sep># Perform the core shift, and gather the response.
response=self._shift_while_in_state('DRSHIFT' tdi=tdi length=length ignore_response=ignore_response advance_state=advance_state byteorder=byteorder)<line_sep># Validate our response against any provided constraints.
self._validate_response(response tdo=tdo mask=mask)<line_sep><return>response<block_end><def_stmt>shift_instruction self tdi=<none> length=<none> tdo=<none> mask=<none> ignore_response=<false> advance_state=<false> byteorder='big'<block_start>""" Shifts data through the chain's instruction register.
Parameters:
tdi -- The bits to be scanned out via TDI. Can be a support.bits() object, a string of 1's and 0's,
an integer, or bytes. If this is an integer or bytes object, the length argument must be provided.
If omitted or None, a string of all zeroes will be used,
length -- The length of the transaction to be performed, in bits. This can be longer than the TDI data;
in which case the transmission will be padded with zeroes.
tdo -- The expected data to be received from the scan operation. If this is provided, the read result
will be compared to this data (optionally masked by mask), and an exception will be thrown if
the data doesn't match this value. Designed to behave like the SVF TDO field.
mask -- If provided, the given tdo argument will be masked, such that only bits corresponding to a '1'
in this mask argument are considered when checking against 'tdo'. This is the behavior defiend
in the SVF standard; see it for more information.
ignore_response -- If provided; the returned response will always be empty, and tdo and mask will be ignored.
This allows for slight a performance optimization, as we don't have to shuttle data back.
byteorder -- The byteorder to consider the tdi value in; if bytes are provided.
Returns the bits read, or None if the response is ignored.
"""<line_sep># Perform the core shift, and gather the response.
response=self._shift_while_in_state('IRSHIFT' tdi=tdi length=length ignore_response=ignore_response advance_state=advance_state byteorder=byteorder)<line_sep># Validate our response against any provided constraints.
self._validate_response(response tdo=tdo mask=mask)<line_sep><return>response<block_end><def_stmt>run_test self cycles from_state='IDLE' end_state=<none><block_start>""" Places the device into the RUNTEST/IDLE (or provided) state, and pulses the JTAG clock.
Paraameters:
cycles -- The number of cycles for which the device should remain in the given state.
from_state -- The state in which the cycles should be spent; defaults to IDLE.
end_state -- The state in which the device should be placed after the test is complete.
"""<if_stmt>from_state<block_start>self.move_to_state(from_state)<block_end>self.api.run_clock(cycles <false> timeout=0)<if_stmt>from_state<block_start>self.move_to_state(end_state)<block_end><block_end><def_stmt>_create_device_for_idcode self idcode position_in_chain<block_start>""" Creates a JTAGDevice object for the relevant idcode. """<line_sep><return>JTAGDevice.from_idcode(idcode position_in_chain)<block_end><def_stmt>enumerate self return_idcodes=<false><block_start>""" Initializes the JTAG TAP FSM, and attempts to identify all connected devices.
Parameters:
return_idcodes -- If true, this method will return a list of IDCodes rather than JTAGDevice objects.
Returns a list of JTAGDevices (return_idcodes=False) or JTAG IDCODES (return_idcodes=True).
"""<line_sep>devices=[]<line_sep># Place the JTAG TAP FSM into its initial state, so we can perform enumeration.
self.initialize_chain()<line_sep># Resetting the TAP FSM also automatically loaded the instruction register with the IDCODE
# instruction, and accordingly filled the chain of data registers with each device's IDCODE.
# We can accordingly just scan out the data using shift_data.
# Once we (re-)initialize the chain, each device automatically loads the IDCODE instruction
# for execution. This means that if we just scan in data, we'll receive each device's IDCODE,
# followed by a null terminator (32 bits of zeroes).
position_in_chain=0<while_stmt><true># Attempt to read a 32-bit IDCODE from the device.
<block_start>raw_idcode=self.shift_data(length=32)<line_sep>idcode=int.from_bytes(raw_idcode byteorder='little')<line_sep># If our IDCODE is all 1's, and we have no devices, we seem to be stuck at one.
# Warn the user.
<if_stmt>idcode<eq>0xFFFFFFFF<and><not>devices<block_start>warn("TDI appears to be stuck at '1'. Check your wiring?")<block_end># If we've received our null IDCODE, we've finished enumerating the chain.
# We'll also treat an all-1's IDCODE as a terminator, as this invalid IDCODE occurs
# if TDI is stuck-at-one.
<if_stmt>idcode<in>(0x00000000 0xFFFFFFFF)<block_start>self.pulse_tms(asserted=<true>)<line_sep><break><block_end><if_stmt>return_idcodes<block_start>devices.append(idcode)<block_end><else_stmt><block_start>devices.append(self._create_device_for_idcode(idcode position_in_chain))<block_end>position_in_chain<augadd>1<block_end><return>devices<block_end><def_stmt>play_svf_instructions self svf_string log_function=<none> error_log_function=print<block_start>""" Executes a string of JTAG SVF instructions, strumming the relevant scan chain.
svf_string -- A string containing valid JTAG SVF instructions to be executed.
log_function -- If provided, this function will be called with verbose operation information.
log_error -- This function will be used to print information about errors that occur.
"""<line_sep># Create the parser that will run our SVF file, and run our SVF.
parser=SVFParser(svf_string GreatfetSVFEventHandler(self log_function error_log_function))<line_sep>parser.parse_file()<block_end><def_stmt>play_svf_file self svf_file log_function=<none> error_log_function=print<block_start>""" Executes the JTAG SVF instructions from the given file.
svf_file -- A filename or file object pointing to a JTAG SVF file.
log_function -- If provided, this function will be called with verbose operation information.
log_error -- This function will be used to print information about errors that occur.
"""<line_sep>close_after=<false><if_stmt>isinstance(svf_file str)<block_start>svf_file=open(svf_file 'r')<line_sep>close_after=<true><block_end>self.play_svf_instructions(svf_file.read() log_function=log_function error_log_function=error_log_function)<if_stmt>close_after<block_start>svf_file.close()<block_end><block_end><block_end><class_stmt>GreatfetSVFEventHandler(SVFEventHandler)<block_start>""" SVF event handler that delegates handling of SVF instructions to a GreatFET JTAG interface. """<def_stmt>__init__ self interface verbose_log_function=<none> error_log_function=print<block_start>""" Creates a new SVF event handler.
Parameters:
interface: The GreatFET JTAG interface that will execute our JTAG commands.
"""<if_stmt>verbose_log_function<is><none><block_start>verbose_log_function=<lambda>string:<none><block_end><if_stmt>error_log_function<is><none><block_start>error_log_function=print<block_end>self.interface=interface<line_sep>self.log=verbose_log_function<line_sep>self.log_error=error_log_function<line_sep># Assume that after a data / instruction shift operation that we'll
# wind up in the IDLE state, per the SVF standard. The SVF file can
# override these defaults
self.end_dr_state='IDLE'<line_sep>self.end_ir_state='IDLE'<line_sep># By default, don't have any headers or trailers for IR or DR shifts.
# The SVF can override these using the HDR/TDR/HIR/TIR instructions.
nullary_padding={'tdi':bits() 'tdo':bits() 'mask':bits() }<line_sep>self.dr_header=nullary_padding.copy()<line_sep>self.dr_trailer=nullary_padding.copy()<line_sep>self.ir_header=nullary_padding.copy()<line_sep>self.ir_trailer=nullary_padding.copy()<line_sep># Store default masks for our ShiftIR and ShiftDR instructions.
self.last_dr_mask=<none><line_sep>self.last_dr_smask=<none><line_sep>self.ir_mask=<none><line_sep>self.ir_smask=<none><block_end><def_stmt>svf_frequency self frequency<block_start>"""Called when the ``FREQUENCY`` command is encountered."""<line_sep>self.log(" -- FREQUENCY set to {}".format(frequency))<line_sep>self.interface.set_frequency(frequency)<block_end><def_stmt>svf_trst self mode<block_start>"""Called when the ``TRST`` command is encountered."""<line_sep>warn('SVF provided TRST command; but this implementation does not yet support driving the TRST line')<block_end><def_stmt>svf_state self state path<block_start>"""Called when the ``STATE`` command is encountered."""<line_sep># Visit each state in any intermediate paths provided...
<if_stmt>path<block_start><for_stmt>intermediate path<block_start>self.log("STATE; Moving through {}.".format(intermediate))<line_sep>self.interface.move_to_state(intermediate)<block_end><block_end># ... ensuring we end up in the relevant state.
self.log("Moving to {} STATE.".format(state))<line_sep>self.interface.move_to_state(state)<block_end><def_stmt>svf_endir self state<block_start>"""Called when the ``ENDIR`` command is encountered."""<line_sep>self.log("Moving to {} after each Shift-IR.".format(state))<line_sep>self.end_dr_state=state<block_end><def_stmt>svf_enddr self state<block_start>"""Called when the ``ENDDR`` command is encountered."""<line_sep>self.log("Moving to {} after each Shift-DR.".format(state))<line_sep>self.end_ir_state=state<block_end><def_stmt>svf_hir self **header<block_start>"""Called when the ``HIR`` command is encountered."""<line_sep>self.log("Applying Shift-IR prefix. ")<line_sep>self.ir_header=header<block_end><def_stmt>svf_tir self **trailer<block_start>self.log("Applying Shift-IR suffix. ")<line_sep>self.ir_trailer=trailer<block_end><def_stmt>svf_hdr self **header<block_start>"""Called when the ``HDR`` command is encountered."""<line_sep>self.log("Applying Shift-DR header. ")<line_sep>self.dr_header=header<block_end><def_stmt>svf_tdr self **trailer<block_start>"""Called when the ``TDR`` command is encountered."""<line_sep>self.log("Applying Shift-DR suffix. ")<line_sep>self.dr_trailer=trailer<block_end><def_stmt>svf_sir self **data<block_start>"""Called when the ``SIR`` command is encountered."""<line_sep># Append our header and trailer to each of our arguments.
arguments={}<for_stmt>arg,value data.items()<block_start>header=self.ir_header[arg]<if>(arg<in>self.ir_header)<else>bits()<line_sep>trailer=self.ir_trailer[arg]<if>(arg<in>self.ir_trailer)<else>bits()<line_sep>arguments[arg]=(header+value+trailer)<if>value<else><none><block_end><if_stmt>data['mask']<block_start>self.ir_mask=data['mask']<block_end><if_stmt>data['smask']<block_start>self.ir_smask=data['mask']<block_end>self.log("Performing SHIFT-IR:")<line_sep>self.log("out: {}".format(arguments['tdi']))<line_sep>self.log("expected: {}".format(arguments['tdo']))<line_sep>self.log("mask: {}".format(arguments['tdo']))<try_stmt><block_start>result=self.interface.shift_instruction(tdi=arguments['tdi'] tdo=arguments['tdo'] mask=arguments['mask'])<block_end><except_stmt>JTAGPatternError<as>e<block_start>self.log("in: {} [FAIL]\n".format(e.result))<line_sep>self.log_error("\n\n<!> Failure while performing SHIFT-IR: \n "+str(e))<line_sep><raise><block_end>self.log("in: {} [OK]\n".format(result))<block_end><def_stmt>svf_sdr self **data<block_start>"""Called when the ``SDR`` command is encountered."""<line_sep># Append our header and trailer to each of our arguments.
arguments={}<for_stmt>arg,value data.items()<block_start>header=self.dr_header[arg]<if>(arg<in>self.dr_header)<else>bits()<line_sep>trailer=self.dr_trailer[arg]<if>(arg<in>self.dr_trailer)<else>bits()<line_sep>arguments[arg]=(header+value+trailer)<if>value<else><none><block_end><if_stmt>data['mask']<block_start>self.dr_mask=data['mask']<block_end><if_stmt>data['smask']<block_start>self.dr_smask=data['mask']<block_end>self.log("Performing SHIFT-DR:")<line_sep>self.log("out: {}".format(arguments['tdi']))<line_sep>self.log("expected: {}".format(arguments['tdo']))<line_sep>self.log("mask: {}".format(arguments['tdo']))<try_stmt><block_start>result=self.interface.shift_data(tdi=arguments['tdi'] tdo=arguments['tdo'] mask=arguments['mask'])<block_end><except_stmt>JTAGPatternError<as>e<block_start>self.log("in: {} [FAIL]\n".format(e.result))<line_sep>self.log_error("\n\n<!> Failure while performing SHIFT-DR: \n "+str(e))<line_sep><raise><block_end>self.log("in: {} [OK]\n".format(result))<block_end><def_stmt>svf_runtest self run_state run_count run_clock min_time max_time end_state<block_start>"""Called when the ``RUNTEST`` command is encountered."""<line_sep>self.log("Running test for {} cycles.".format(run_count))<line_sep>self.interface.run_test(run_count from_state=run_state end_state=end_state)<block_end><def_stmt>svf_piomap self mapping<block_start>"""Called when the ``PIOMAP`` command is encountered."""<line_sep><raise>NotImplementedError("This implementation does not yet support PIOMAP.")<block_end><def_stmt>svf_pio self vector<block_start>"""Called when the ``PIO`` command is encountered."""<line_sep><raise>NotImplementedError("This implementation does not yet support PIO.")<block_end><block_end> |
<import_stmt>cv2<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>torch.utils.data Dataset<import_from_stmt>stage1.label_encoder DataEncoder<class_stmt>DataGenerator(Dataset)<block_start><def_stmt>__init__ self config data phase='train'<block_start>self.phase=phase<line_sep>self.data=data<line_sep>self.config=config<line_sep>self.encoder=DataEncoder(self.config)<block_end><def_stmt>__getitem__ self idx<block_start>img=cv2.imread(self.data.get_image_path(idx))# BGR
bboxes=self.data.get_bbox(idx)<line_sep>img_h,img_w,_=img.shape<line_sep># data augmentation
<if_stmt>self.phase<eq>'train'<block_start>random_flip=np.random.randint(0 2)<if_stmt>random_flip<eq>1<block_start>img=cv2.flip(img 1)<line_sep>x1s=img_w-bboxes[: 2]<line_sep>x2s=img_w-bboxes[: 0]<line_sep>bboxes[: 0]=x1s<line_sep>bboxes[: 2]=x2s<block_end><block_end># min size resizing
scale=self.config.img_max_size/max(img_w img_h)<line_sep>img_h2=int(img_h<times>scale)<line_sep>img_w2=int(img_w<times>scale)<line_sep>img=cv2.resize(img (img_w2 img_h2) interpolation=cv2.INTER_CUBIC)<line_sep>bboxes<augmul>scale<line_sep>img=np.transpose(img (2 0 1)).astype(np.float32)# channel, height, width
img[[0 2]]=img[[2 0]]<line_sep>img=img/255.0<line_sep>img=(img-self.config.mu)/self.config.sigma<line_sep><return>torch.from_numpy(img) torch.from_numpy(bboxes)<block_end><def_stmt>collate_fn self batch<block_start>imgs=[x[0]<for>x batch]<line_sep>bboxes=[x[1]<for>x batch]<line_sep># Use the same size to accelerate dynamic graph
maxh=self.config.img_max_size#max([img.size(1) for img in imgs])
maxw=self.config.img_max_size#max([img.size(2) for img in imgs])
num_imgs=len(imgs)<line_sep>pad_imgs=torch.zeros(num_imgs 3 maxh maxw)<line_sep>reg_targets=[]<line_sep>cls_targets=[]<for_stmt>i range(num_imgs)<block_start>img=imgs[i]<line_sep>pad_imgs[i : :img.size(1) :img.size(2)]=img# Pad images to the same size
reg_target,cls_target=self.encoder.encode(bboxes[i] torch.ones([1 ]) [maxh maxw])<line_sep>reg_targets.append(reg_target)<line_sep>cls_targets.append(cls_target)<block_end>reg_targets=torch.stack(reg_targets)# [batch_size, anchor#, 4]
cls_targets=torch.stack(cls_targets)# [batch_size, anchor#] 0 for neg, 1, 2, 3 ... for different classes
<return>pad_imgs reg_targets cls_targets<block_end><def_stmt>__len__ self<block_start><return>self.data.size()<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><import_from_stmt>src.config Config<import_from_stmt>coco Coco<import_from_stmt>torch.utils.data DataLoader<import_from_stmt>time time<line_sep>db_path='/home/storage/lsy/coco/'<line_sep>config=Config()<line_sep>train_coco=Coco(db_path 'train')<line_sep>train_dataset=DataGenerator(config train_coco phase='train')<line_sep>train_loader=DataLoader(train_dataset batch_size=32 shuffle=<true> num_workers=16 collate_fn=train_dataset.collate_fn pin_memory=<true>)<line_sep>t0=time()<for_stmt>i,(data reg_targets cls_targets) enumerate(train_loader)<block_start>print(data.size() reg_targets.size() cls_targets.size())<block_end>t1=time()<block_end> |
<import_from_stmt>trex.emu.api *<import_stmt>argparse<import_stmt>get_args<class_stmt>Prof1()<block_start><def_stmt>__init__ self<block_start>self.mac=Mac('00:00:00:70:00:01')<line_sep>self.def_ns_plugs={'ipv6':{'dmac':self.mac.V()}}<line_sep>self.def_c_plugs=<none><block_end><def_stmt>create_profile self ns_size clients_size<block_start>ns_list=[]<line_sep># create different namespace each time
vport,tci,tpid=0 [0 0] [0x00 0x00]<for_stmt>j range(vport ns_size+vport)<block_start>ns_key=EMUNamespaceKey(vport=j tci=tci tpid=tpid)<line_sep>ns=EMUNamespaceObj(ns_key=ns_key def_c_plugs=self.def_c_plugs)<line_sep>mac=self.mac<line_sep>ipv6=Ipv6("2001:DB8:1::2")<line_sep># create a different client each time
<for_stmt>i range(clients_size)<block_start>client=EMUClientObj(mac=mac[i].V() ipv6=ipv6[i].V() plugs={'ipv6':{} } )<line_sep>ns.add_clients(client)<block_end>ns_list.append(ns)<block_end><return>EMUProfile(ns=ns_list def_ns_plugs=self.def_ns_plugs)<block_end><def_stmt>get_profile self tuneables<block_start>args=get_args.get_args(tuneables)<line_sep><return>self.create_profile(args.ns args.clients)<block_end><block_end><def_stmt>register <block_start><return>Prof1()<block_end> |
<import_stmt>re<import_stmt>collections<import_stmt>string<line_sep># copy from https://github.com/wenhuchen/HybridQA/blob/master/evaluate_script.py
<def_stmt>normalize_answer s<block_start>"""Lower text and remove punctuation, articles and extra whitespace."""<def_stmt>remove_articles text<block_start>regex=re.compile(r"\b(a|an|the)\b" re.UNICODE)<line_sep><return>re.sub(regex " " text)<block_end><def_stmt>white_space_fix text<block_start><return>" ".join(text.split())<block_end><def_stmt>remove_punc text<block_start>exclude=set(string.punctuation)<line_sep><return>"".join(ch<for>ch text<if>ch<not><in>exclude)<block_end><def_stmt>lower text<block_start><return>text.lower()<block_end><return>white_space_fix(remove_articles(remove_punc(lower(s))))<block_end><def_stmt>get_tokens s<block_start><if_stmt><not>s<block_start><return>[]<block_end><return>normalize_answer(s).split()<block_end><def_stmt>compute_exact a_gold a_pred<block_start><return>int(normalize_answer(a_gold)<eq>normalize_answer(a_pred))<block_end><def_stmt>compute_f1 a_gold a_pred<block_start>gold_toks=get_tokens(a_gold)<line_sep>pred_toks=get_tokens(a_pred)<line_sep>common=collections.Counter(gold_toks)&collections.Counter(pred_toks)<line_sep>num_same=sum(common.values())<if_stmt>len(gold_toks)<eq>0<or>len(pred_toks)<eq>0# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
<block_start><return>int(gold_toks<eq>pred_toks)<block_end><if_stmt>num_same<eq>0<block_start><return>0<block_end>precision=1.0<times>num_same/len(pred_toks)<line_sep>recall=1.0<times>num_same/len(gold_toks)<line_sep>f1=(2<times>precision<times>recall)/(precision+recall)<line_sep><return>f1<block_end><def_stmt>get_raw_scores examples reference<block_start>"""
Computes the exact and f1 scores from the examples and the model predictions
"""<line_sep>exact_scores={}<line_sep>f1_scores={}<for_stmt>example examples<block_start>qas_id=example['question_id']<line_sep>gold_answers=[reference['reference'][qas_id]]<line_sep>prediction=example['pred']<line_sep>exact_scores[qas_id]=max(compute_exact(a prediction)<for>a gold_answers)<line_sep>f1_scores[qas_id]=max(compute_f1(a prediction)<for>a gold_answers)<block_end>qid_list=reference['reference'].keys()<line_sep>total=len(qid_list)<line_sep>table_list=reference['table']<line_sep>passage_list=reference['passage']<line_sep><return>collections.OrderedDict([("table exact" 100.0<times>sum(exact_scores[k]<for>k table_list)/len(table_list)) ("table f1" 100.0<times>sum(f1_scores[k]<for>k table_list)/len(table_list)) ("passage exact" 100.0<times>sum(exact_scores[k]<for>k passage_list)/len(passage_list)) ("passage f1" 100.0<times>sum(f1_scores[k]<for>k passage_list)/len(passage_list)) ("total exact" 100.0<times>sum(exact_scores[k]<for>k qid_list)/total) ("total f1" 100.0<times>sum(f1_scores[k]<for>k qid_list)/total) ("total" total) ])<block_end><class_stmt>EvaluateTool(object)<block_start><def_stmt>__init__ self args<block_start>self.args=args<block_end><def_stmt>evaluate self preds golds section<block_start>summary={}<line_sep>exact_scores={}<line_sep>f1_scores={}<for_stmt>pred,gold zip(preds golds)<block_start>qas_id=gold['id']<line_sep>gold_answers=[gold['answer_text']]<line_sep>exact_scores[qas_id]=max(compute_exact(a pred)<for>a gold_answers)<line_sep>f1_scores[qas_id]=max(compute_f1(a pred)<for>a gold_answers)<block_end>total=len(golds)<line_sep>qid_list=list(exact_scores.keys())<line_sep>summary["acc"]=sum(exact_scores[k]<for>k qid_list)/total<line_sep>summary["f1"]=sum(f1_scores[k]<for>k qid_list)/total<line_sep><return>summary<block_end><block_end> |
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_from_stmt>lib.models.tools.module_helper ModuleHelper<import_from_stmt>lib.utils.tools.logger Logger<as>Log<class_stmt>ProjectionHead(nn.Module)<block_start><def_stmt>__init__ self dim_in proj_dim=256 proj='convmlp' bn_type='torchsyncbn'<block_start>super(ProjectionHead self).__init__()<line_sep>Log.info('proj_dim: {}'.format(proj_dim))<if_stmt>proj<eq>'linear'<block_start>self.proj=nn.Conv2d(dim_in proj_dim kernel_size=1)<block_end><elif_stmt>proj<eq>'convmlp'<block_start>self.proj=nn.Sequential(nn.Conv2d(dim_in dim_in kernel_size=1) ModuleHelper.BNReLU(dim_in bn_type=bn_type) nn.Conv2d(dim_in proj_dim kernel_size=1))<block_end><block_end><def_stmt>forward self x<block_start><return>F.normalize(self.proj(x) p=2 dim=1)<block_end><block_end> |
<import_stmt>numpy<as>np<import_from_stmt>deepspeaker.audio_ds read_mfcc<import_from_stmt>deepspeaker.batcher sample_from_mfcc<import_from_stmt>deepspeaker.constants SAMPLE_RATE NUM_FRAMES WIN_LENGTH<import_from_stmt>deepspeaker.conv_models DeepSpeakerModel<import_stmt>tensorflow<as>tf<def_stmt>build_model ckpt_path<block_start>model=DeepSpeakerModel()<line_sep>model.m.load_weights(ckpt_path by_name=<true>)<line_sep><return>model<block_end><def_stmt>predict_embedding model audio sr=SAMPLE_RATE win_length=WIN_LENGTH cuda=<true><block_start>mfcc=sample_from_mfcc(read_mfcc(audio sr win_length) NUM_FRAMES)<line_sep># Call the model to get the embeddings of shape (1, 512) for each file.
gpus=tf.config.experimental.list_physical_devices('GPU')<if>cuda<else>0<if_stmt>gpus<block_start><try_stmt><block_start>tf.config.experimental.set_visible_devices(gpus[0] 'GPU')<block_end><except_stmt>RuntimeError<as>e<block_start>print(e)<block_end><with_stmt>tf.device('/device:GPU:0')<block_start>embedding=model.m.predict(np.expand_dims(mfcc axis=0))# Female
<block_end><block_end><else_stmt><block_start><with_stmt>tf.device('device:cpu:0')<block_start>embedding=model.m.predict(np.expand_dims(mfcc axis=0))# Female
<block_end><block_end><return>embedding<block_end> |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Manages all plugins."""<import_stmt>importlib<import_stmt>importlib.machinery<import_stmt>importlib.util<import_stmt>inspect<import_stmt>logging<import_stmt>os<import_stmt>sys<import_stmt>types<import_from_stmt>typing TYPE_CHECKING Any Dict List Optional Type<try_stmt><block_start><import_stmt>importlib_metadata<block_end><except_stmt>ImportError<block_start><import_from_stmt>importlib metadata<as>importlib_metadata<block_end><import_from_stmt>airflow settings<import_from_stmt>airflow.utils.entry_points entry_points_with_dist<import_from_stmt>airflow.utils.file find_path_from_directory<import_from_stmt>airflow.utils.module_loading as_importable_string<if_stmt>TYPE_CHECKING<block_start><import_from_stmt>airflow.hooks.base BaseHook<import_from_stmt>airflow.timetables.base Timetable<block_end>log=logging.getLogger(__name__)<line_sep>import_errors:Dict[str str]={}<line_sep>plugins=<none># type: Optional[List[AirflowPlugin]]
# Plugin components to integrate as modules
registered_hooks:Optional[List['BaseHook']]=<none><line_sep>macros_modules:Optional[List[Any]]=<none><line_sep>executors_modules:Optional[List[Any]]=<none><line_sep># Plugin components to integrate directly
admin_views:Optional[List[Any]]=<none><line_sep>flask_blueprints:Optional[List[Any]]=<none><line_sep>menu_links:Optional[List[Any]]=<none><line_sep>flask_appbuilder_views:Optional[List[Any]]=<none><line_sep>flask_appbuilder_menu_links:Optional[List[Any]]=<none><line_sep>global_operator_extra_links:Optional[List[Any]]=<none><line_sep>operator_extra_links:Optional[List[Any]]=<none><line_sep>registered_operator_link_classes:Optional[Dict[str Type]]=<none><line_sep>timetable_classes:Optional[Dict[str Type["Timetable"]]]=<none><line_sep>"""Mapping of class names to class of OperatorLinks registered by plugins.
Used by the DAG serialization code to only allow specific classes to be created
during deserialization
"""<line_sep>PLUGINS_ATTRIBUTES_TO_DUMP={"hooks" "executors" "macros" "flask_blueprints" "appbuilder_views" "appbuilder_menu_items" "global_operator_extra_links" "operator_extra_links" "source" }<class_stmt>AirflowPluginSource<block_start>"""Class used to define an AirflowPluginSource."""<def_stmt>__str__ self<block_start><raise>NotImplementedError<block_end><def_stmt>__html__ self<block_start><raise>NotImplementedError<block_end><block_end><class_stmt>PluginsDirectorySource(AirflowPluginSource)<block_start>"""Class used to define Plugins loaded from Plugins Directory."""<def_stmt>__init__ self path<block_start>self.path=os.path.relpath(path settings.PLUGINS_FOLDER)<block_end><def_stmt>__str__ self<block_start><return>f"$PLUGINS_FOLDER/{self.path}"<block_end><def_stmt>__html__ self<block_start><return>f"<em>$PLUGINS_FOLDER/</em>{self.path}"<block_end><block_end><class_stmt>EntryPointSource(AirflowPluginSource)<block_start>"""Class used to define Plugins loaded from entrypoint."""<def_stmt>__init__ self entrypoint:importlib_metadata.EntryPoint dist:importlib_metadata.Distribution<block_start>self.dist=dist.metadata['name']<line_sep>self.version=dist.version<line_sep>self.entrypoint=str(entrypoint)<block_end><def_stmt>__str__ self<block_start><return>f"{self.dist}=={self.version}: {self.entrypoint}"<block_end><def_stmt>__html__ self<block_start><return>f"<em>{self.dist}=={self.version}:</em> {self.entrypoint}"<block_end><block_end><class_stmt>AirflowPluginException(Exception)<block_start>"""Exception when loading plugin."""<block_end><class_stmt>AirflowPlugin<block_start>"""Class used to define AirflowPlugin."""<line_sep>name:Optional[str]=<none><line_sep>source:Optional[AirflowPluginSource]=<none><line_sep>hooks:List[Any]=[]<line_sep>executors:List[Any]=[]<line_sep>macros:List[Any]=[]<line_sep>admin_views:List[Any]=[]<line_sep>flask_blueprints:List[Any]=[]<line_sep>menu_links:List[Any]=[]<line_sep>appbuilder_views:List[Any]=[]<line_sep>appbuilder_menu_items:List[Any]=[]<line_sep># A list of global operator extra links that can redirect users to
# external systems. These extra links will be available on the
# task page in the form of buttons.
#
# Note: the global operator extra link can be overridden at each
# operator level.
global_operator_extra_links:List[Any]=[]<line_sep># A list of operator extra links to override or add operator links
# to existing Airflow Operators.
# These extra links will be available on the task page in form of
# buttons.
operator_extra_links:List[Any]=[]<line_sep># A list of timetable classes that can be used for DAG scheduling.
timetables:List[Type["Timetable"]]=[]<line_sep>@classmethod<def_stmt>validate cls<block_start>"""Validates that plugin has a name."""<if_stmt><not>cls.name<block_start><raise>AirflowPluginException("Your plugin needs a name.")<block_end><block_end>@classmethod<def_stmt>on_load cls *args **kwargs<block_start>"""
Executed when the plugin is loaded.
This method is only called once during runtime.
:param args: If future arguments are passed in on call.
:param kwargs: If future arguments are passed in on call.
"""<block_end><block_end><def_stmt>is_valid_plugin plugin_obj<block_start>"""
Check whether a potential object is a subclass of
the AirflowPlugin class.
:param plugin_obj: potential subclass of AirflowPlugin
:return: Whether or not the obj is a valid subclass of
AirflowPlugin
"""<line_sep><global>plugins<if_stmt>(inspect.isclass(plugin_obj)<and>issubclass(plugin_obj AirflowPlugin)<and>(plugin_obj<is><not>AirflowPlugin))<block_start>plugin_obj.validate()<line_sep><return>plugin_obj<not><in>plugins<block_end><return><false><block_end><def_stmt>register_plugin plugin_instance<block_start>"""
Start plugin load and register it after success initialization
:param plugin_instance: subclass of AirflowPlugin
"""<line_sep><global>plugins<line_sep>plugin_instance.on_load()<line_sep>plugins.append(plugin_instance)<block_end><def_stmt>load_entrypoint_plugins <block_start>"""
Load and register plugins AirflowPlugin subclasses from the entrypoints.
The entry_point group should be 'airflow.plugins'.
"""<line_sep><global>import_errors<line_sep>log.debug("Loading plugins from entrypoints")<for_stmt>entry_point,dist entry_points_with_dist('airflow.plugins')<block_start>log.debug('Importing entry_point plugin %s' entry_point.name)<try_stmt><block_start>plugin_class=entry_point.load()<if_stmt><not>is_valid_plugin(plugin_class)<block_start><continue><block_end>plugin_instance=plugin_class()<line_sep>plugin_instance.source=EntryPointSource(entry_point dist)<line_sep>register_plugin(plugin_instance)<block_end><except_stmt>Exception<as>e<block_start>log.exception("Failed to import plugin %s" entry_point.name)<line_sep>import_errors[entry_point.module]=str(e)<block_end><block_end><block_end><def_stmt>load_plugins_from_plugin_directory <block_start>"""Load and register Airflow Plugins from plugins directory"""<line_sep><global>import_errors<line_sep>log.debug("Loading plugins from directory: %s" settings.PLUGINS_FOLDER)<for_stmt>file_path find_path_from_directory(settings.PLUGINS_FOLDER ".airflowignore")<block_start><if_stmt><not>os.path.isfile(file_path)<block_start><continue><block_end>mod_name,file_ext=os.path.splitext(os.path.split(file_path)[-1])<if_stmt>file_ext<ne>'.py'<block_start><continue><block_end><try_stmt><block_start>loader=importlib.machinery.SourceFileLoader(mod_name file_path)<line_sep>spec=importlib.util.spec_from_loader(mod_name loader)<line_sep>mod=importlib.util.module_from_spec(spec)<line_sep>sys.modules[spec.name]=mod<line_sep>loader.exec_module(mod)<line_sep>log.debug('Importing plugin module %s' file_path)<for_stmt>mod_attr_value (m<for>m mod.__dict__.values()<if>is_valid_plugin(m))<block_start>plugin_instance=mod_attr_value()<line_sep>plugin_instance.source=PluginsDirectorySource(file_path)<line_sep>register_plugin(plugin_instance)<block_end><block_end><except_stmt>Exception<as>e<block_start>log.exception('Failed to import plugin %s' file_path)<line_sep>import_errors[file_path]=str(e)<block_end><block_end><block_end><def_stmt>make_module name:str objects:List[Any]<block_start>"""Creates new module."""<if_stmt><not>objects<block_start><return><none><block_end>log.debug('Creating module %s' name)<line_sep>name=name.lower()<line_sep>module=types.ModuleType(name)<line_sep>module._name=name.split('.')[-1]# type: ignore
module._objects=objects# type: ignore
module.__dict__.update((o.__name__ o)<for>o objects)<line_sep><return>module<block_end><def_stmt>ensure_plugins_loaded <block_start>"""
Load plugins from plugins directory and entrypoints.
Plugins are only loaded if they have not been previously loaded.
"""<import_from_stmt>airflow.stats Stats<line_sep><global>plugins registered_hooks<if_stmt>plugins<is><not><none><block_start>log.debug("Plugins are already loaded. Skipping.")<line_sep><return><block_end><if_stmt><not>settings.PLUGINS_FOLDER<block_start><raise>ValueError("Plugins folder is not set")<block_end>log.debug("Loading plugins")<with_stmt>Stats.timer()<as>timer<block_start>plugins=[]<line_sep>registered_hooks=[]<line_sep>load_plugins_from_plugin_directory()<line_sep>load_entrypoint_plugins()<line_sep># We don't do anything with these for now, but we want to keep track of
# them so we can integrate them in to the UI's Connection screens
<for_stmt>plugin plugins<block_start>registered_hooks.extend(plugin.hooks)<block_end><block_end>num_loaded=len(plugins)<if_stmt>num_loaded<g>0<block_start>log.debug("Loading %d plugin(s) took %.2f seconds" num_loaded timer.duration)<block_end><block_end><def_stmt>initialize_web_ui_plugins <block_start>"""Collect extension points for WEB UI"""<line_sep><global>plugins<line_sep><global>flask_blueprints<line_sep><global>flask_appbuilder_views<line_sep><global>flask_appbuilder_menu_links<if_stmt>(flask_blueprints<is><not><none><and>flask_appbuilder_views<is><not><none><and>flask_appbuilder_menu_links<is><not><none>)<block_start><return><block_end>ensure_plugins_loaded()<if_stmt>plugins<is><none><block_start><raise>AirflowPluginException("Can't load plugins.")<block_end>log.debug("Initialize Web UI plugin")<line_sep>flask_blueprints=[]<line_sep>flask_appbuilder_views=[]<line_sep>flask_appbuilder_menu_links=[]<for_stmt>plugin plugins<block_start>flask_appbuilder_views.extend(plugin.appbuilder_views)<line_sep>flask_appbuilder_menu_links.extend(plugin.appbuilder_menu_items)<line_sep>flask_blueprints.extend([{'name':plugin.name 'blueprint':bp}<for>bp plugin.flask_blueprints])<if_stmt>(plugin.admin_views<and><not>plugin.appbuilder_views)<or>(plugin.menu_links<and><not>plugin.appbuilder_menu_items)<block_start>log.warning("Plugin \'%s\' may not be compatible with the current Airflow version. "<concat>"Please contact the author of the plugin." plugin.name )<block_end><block_end><block_end><def_stmt>initialize_extra_operators_links_plugins <block_start>"""Creates modules for loaded extension from extra operators links plugins"""<line_sep><global>global_operator_extra_links<line_sep><global>operator_extra_links<line_sep><global>registered_operator_link_classes<if_stmt>(global_operator_extra_links<is><not><none><and>operator_extra_links<is><not><none><and>registered_operator_link_classes<is><not><none>)<block_start><return><block_end>ensure_plugins_loaded()<if_stmt>plugins<is><none><block_start><raise>AirflowPluginException("Can't load plugins.")<block_end>log.debug("Initialize extra operators links plugins")<line_sep>global_operator_extra_links=[]<line_sep>operator_extra_links=[]<line_sep>registered_operator_link_classes={}<for_stmt>plugin plugins<block_start>global_operator_extra_links.extend(plugin.global_operator_extra_links)<line_sep>operator_extra_links.extend(list(plugin.operator_extra_links))<line_sep>registered_operator_link_classes.update({f"{link.__class__.__module__}.{link.__class__.__name__}":link.__class__<for>link plugin.operator_extra_links})<block_end><block_end><def_stmt>initialize_timetables_plugins <block_start>"""Collect timetable classes registered by plugins."""<line_sep><global>timetable_classes<if_stmt>timetable_classes<is><not><none><block_start><return><block_end>ensure_plugins_loaded()<if_stmt>plugins<is><none><block_start><raise>AirflowPluginException("Can't load plugins.")<block_end>log.debug("Initialize extra timetables plugins")<line_sep>timetable_classes={as_importable_string(timetable_class):timetable_class<for>plugin plugins<for>timetable_class plugin.timetables}<block_end><def_stmt>integrate_executor_plugins <arrow><none><block_start>"""Integrate executor plugins to the context."""<line_sep><global>plugins<line_sep><global>executors_modules<if_stmt>executors_modules<is><not><none><block_start><return><block_end>ensure_plugins_loaded()<if_stmt>plugins<is><none><block_start><raise>AirflowPluginException("Can't load plugins.")<block_end>log.debug("Integrate executor plugins")<line_sep>executors_modules=[]<for_stmt>plugin plugins<block_start><if_stmt>plugin.name<is><none><block_start><raise>AirflowPluginException("Invalid plugin name")<block_end>plugin_name:str=plugin.name<line_sep>executors_module=make_module('airflow.executors.'+plugin_name plugin.executors)<if_stmt>executors_module<block_start>executors_modules.append(executors_module)<line_sep>sys.modules[executors_module.__name__]=executors_module<block_end><block_end><block_end><def_stmt>integrate_macros_plugins <arrow><none><block_start>"""Integrates macro plugins."""<line_sep><global>plugins<line_sep><global>macros_modules<import_from_stmt>airflow macros<if_stmt>macros_modules<is><not><none><block_start><return><block_end>ensure_plugins_loaded()<if_stmt>plugins<is><none><block_start><raise>AirflowPluginException("Can't load plugins.")<block_end>log.debug("Integrate DAG plugins")<line_sep>macros_modules=[]<for_stmt>plugin plugins<block_start><if_stmt>plugin.name<is><none><block_start><raise>AirflowPluginException("Invalid plugin name")<block_end>macros_module=make_module(f'airflow.macros.{plugin.name}' plugin.macros)<if_stmt>macros_module<block_start>macros_modules.append(macros_module)<line_sep>sys.modules[macros_module.__name__]=macros_module<line_sep># Register the newly created module on airflow.macros such that it
# can be accessed when rendering templates.
setattr(macros plugin.name macros_module)<block_end><block_end><block_end><def_stmt>get_plugin_info attrs_to_dump:Optional[List[str]]=<none><arrow>List[Dict[str Any]]<block_start>"""
Dump plugins attributes
:param attrs_to_dump: A list of plugin attributes to dump
:type attrs_to_dump: List
"""<line_sep>ensure_plugins_loaded()<line_sep>integrate_executor_plugins()<line_sep>integrate_macros_plugins()<line_sep>initialize_web_ui_plugins()<line_sep>initialize_extra_operators_links_plugins()<if_stmt><not>attrs_to_dump<block_start>attrs_to_dump=PLUGINS_ATTRIBUTES_TO_DUMP<block_end>plugins_info=[]<if_stmt>plugins<block_start><for_stmt>plugin plugins<block_start>info={"name":plugin.name}<line_sep>info.update({n:getattr(plugin n)<for>n attrs_to_dump})<line_sep>plugins_info.append(info)<block_end><block_end><return>plugins_info<block_end> |
"""
Copyright (c) 2020 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""<import_from_stmt>tensorflow.keras Model<import_from_stmt>tensorflow.keras Sequential<import_from_stmt>tensorflow.keras.layers Flatten Dense Activation Input Concatenate<import_from_stmt>tensorflow.keras.optimizers Adam<import_from_stmt>rl.agents DDPGAgent<import_from_stmt>rl.agents DQNAgent<import_from_stmt>rl.memory SequentialMemory<import_from_stmt>rl.policy BoltzmannQPolicy<import_from_stmt>rl.random OrnsteinUhlenbeckProcess<class_stmt>RLAgent<block_start><def_stmt>__init__ self env alg='ddpg'<block_start>self.env=env<line_sep>nb_actions=env.action_space.shape[0]<line_sep>nb_states=env.observation_space.shape[0]<if_stmt>alg<eq>'ddpg'<block_start>self.agent=self._build_ddpg(nb_actions nb_states)<block_end><elif_stmt>alg<eq>'dpn'<block_start>self.agent=self._build_dqn(nb_actions nb_states)<block_end><else_stmt><block_start><raise>ValueError('Can not support this reinforcement learning algorithm.')<block_end><block_end>@staticmethod# not regression test on DQN, suggest to choose DDPG.
<def_stmt>_build_dqn nb_actions nb_states# build network
<block_start>model=Sequential()<line_sep>model.add(Flatten(input_shape=(1 nb_states)))<line_sep>model.add(Dense(16))<line_sep>model.add(Activation('relu'))<line_sep>model.add(Dense(16))<line_sep>model.add(Activation('relu'))<line_sep>model.add(Dense(nb_actions activation='linear'))<line_sep># build alg
memory=SequentialMemory(limit=10240 window_length=1)<line_sep>policy=BoltzmannQPolicy()<line_sep>dqn=DQNAgent(model=model nb_actions=nb_actions memory=memory nb_steps_warmup=10 enable_dueling_network=<true> dueling_type='avg' target_model_update=1e-2 policy=policy)<line_sep>dqn.compile(Adam() metrics=['mae'])<line_sep><return>dqn<block_end>@staticmethod<def_stmt>_build_ddpg nb_actions nb_states# build an actor network
<block_start>actor=Sequential()<line_sep>actor.add(Flatten(input_shape=(1 nb_states)))<line_sep>actor.add(Dense(16))<line_sep>actor.add(Activation('relu'))<line_sep>actor.add(Dense(16))<line_sep>actor.add(Activation('relu'))<line_sep>actor.add(Dense(nb_actions))<line_sep>actor.add(Activation('sigmoid'))<line_sep># build a critic network
action_input=Input(shape=(nb_actions ) name='action_input')<line_sep>observation_input=Input(shape=(1 nb_states) name='observation_input')<line_sep>flattened_observation=Flatten()(observation_input)<line_sep>x=Concatenate()([action_input flattened_observation])<line_sep>x=Dense(32)(x)<line_sep>x=Activation('relu')(x)<line_sep>x=Dense(32)(x)<line_sep>x=Activation('relu')(x)<line_sep>x=Dense(1)(x)<line_sep>x=Activation('linear')(x)<line_sep>critic=Model(inputs=[action_input observation_input] outputs=x)<line_sep># tricks:
memory=SequentialMemory(limit=10240 window_length=1)<line_sep>oup=OrnsteinUhlenbeckProcess(size=nb_actions theta=.15 mu=0. sigma=.3)<line_sep># build ddpg alg
ddpg=DDPGAgent(nb_actions=nb_actions actor=actor critic=critic critic_action_input=action_input memory=memory nb_steps_warmup_actor=100 nb_steps_warmup_critic=100 random_process=oup gamma=.99 target_model_update=1e-3)<line_sep>ddpg.compile(Adam() metrics=['mae'])<line_sep><return>ddpg<block_end><def_stmt>fit self steps nb_max_episode_steps=100 verbose=0<block_start>self.agent.fit(self.env nb_steps=steps nb_max_episode_steps=nb_max_episode_steps verbose=verbose)<block_end><def_stmt>save self filepath<block_start>self.agent.save_weights(filepath overwrite=<true>)<block_end><def_stmt>load self filepath<block_start>self.agent.load_weights(filepath)<block_end><def_stmt>test self episodes nb_max_episode_steps=10 verbose=0<block_start>self.agent.test(self.env nb_episodes=episodes nb_max_episode_steps=nb_max_episode_steps verbose=verbose)<block_end><block_end> |
<import_from_stmt>copy copy<import_stmt>cloudpickle<import_from_stmt>sortedcontainers SortedDict SortedSet<import_from_stmt>adaptive.learner.base_learner BaseLearner<class_stmt>_IgnoreFirstArgument<block_start>"""Remove the first argument from the call signature.
The SequenceLearner's function receives a tuple ``(index, point)``
but the original function only takes ``point``.
This is the same as `lambda x: function(x[1])`, however, that is not
pickable.
"""<def_stmt>__init__ self function<block_start>self.function=function<block_end><def_stmt>__call__ self index_point *args **kwargs<block_start>index,point=index_point<line_sep><return>self.function(point *args **kwargs)<block_end><def_stmt>__getstate__ self<block_start><return>self.function<block_end><def_stmt>__setstate__ self function<block_start>self.__init__(function)<block_end><block_end><class_stmt>SequenceLearner(BaseLearner)<block_start>r"""A learner that will learn a sequence. It simply returns
the points in the provided sequence when asked.
This is useful when your problem cannot be formulated in terms of
another adaptive learner, but you still want to use Adaptive's
routines to run, save, and plot.
Parameters
----------
function : callable
The function to learn. Must take a single element `sequence`.
sequence : sequence
The sequence to learn.
Attributes
----------
data : dict
The data as a mapping from "index of element in sequence" => value.
Notes
-----
From primitive tests, the `~adaptive.SequenceLearner` appears to have a
similar performance to `ipyparallel`\s ``load_balanced_view().map``. With
the added benefit of having results in the local kernel already.
"""<def_stmt>__init__ self function sequence<block_start>self._original_function=function<line_sep>self.function=_IgnoreFirstArgument(function)<line_sep>self._to_do_indices=SortedSet({i<for>i,_ enumerate(sequence)})<line_sep>self._ntotal=len(sequence)<line_sep>self.sequence=copy(sequence)<line_sep>self.data=SortedDict()<line_sep>self.pending_points=set()<block_end><def_stmt>ask self n tell_pending=<true><block_start>indices=[]<line_sep>points=[]<line_sep>loss_improvements=[]<for_stmt>index self._to_do_indices<block_start><if_stmt>len(points)<ge>n<block_start><break><block_end>point=self.sequence[index]<line_sep>indices.append(index)<line_sep>points.append((index point))<line_sep>loss_improvements.append(1/self._ntotal)<block_end><if_stmt>tell_pending<block_start><for_stmt>i,p zip(indices points)<block_start>self.tell_pending((i p))<block_end><block_end><return>points loss_improvements<block_end><def_stmt>loss self real=<true><block_start><if_stmt><not>(self._to_do_indices<or>self.pending_points)<block_start><return>0<block_end><else_stmt><block_start>npoints=self.npoints+(0<if>real<else>len(self.pending_points))<line_sep><return>(self._ntotal-npoints)/self._ntotal<block_end><block_end><def_stmt>remove_unfinished self<block_start><for_stmt>i self.pending_points<block_start>self._to_do_indices.add(i)<block_end>self.pending_points=set()<block_end><def_stmt>tell self point value<block_start>index,point=point<line_sep>self.data[index]=value<line_sep>self.pending_points.discard(index)<line_sep>self._to_do_indices.discard(index)<block_end><def_stmt>tell_pending self point<block_start>index,point=point<line_sep>self.pending_points.add(index)<line_sep>self._to_do_indices.discard(index)<block_end><def_stmt>done self<block_start><return><not>self._to_do_indices<and><not>self.pending_points<block_end><def_stmt>result self<block_start>"""Get the function values in the same order as ``sequence``."""<if_stmt><not>self.done()<block_start><raise>Exception("Learner is not yet complete.")<block_end><return>list(self.data.values())<block_end>@property<def_stmt>npoints self<block_start><return>len(self.data)<block_end><def_stmt>_get_data self<block_start><return>self.data<block_end><def_stmt>_set_data self data<block_start><if_stmt>data<block_start>indices,values=zip(*data.items())<line_sep># the points aren't used by tell, so we can safely pass None
points=[(i <none>)<for>i indices]<line_sep>self.tell_many(points values)<block_end><block_end><def_stmt>__getstate__ self<block_start><return>(cloudpickle.dumps(self._original_function) self.sequence self._get_data() )<block_end><def_stmt>__setstate__ self state<block_start>function,sequence,data=state<line_sep>function=cloudpickle.loads(function)<line_sep>self.__init__(function sequence)<line_sep>self._set_data(data)<block_end><block_end> |
<import_stmt>pytest<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>pycox.models.utils pad_col make_subgrid cumsum_reverse<line_sep>@pytest.mark.parametrize('val' [0 1 5])<def_stmt>test_pad_col_start val<block_start>x=torch.ones((2 3))<line_sep>x_pad=pad_col(x val where='start')<line_sep>pad=torch.ones(2 1)<times>val<assert_stmt>(x_pad<eq>torch.cat([pad x] dim=1)).all()<block_end>@pytest.mark.parametrize('val' [0 1 5])<def_stmt>test_pad_col_end val<block_start>x=torch.ones((2 3))<line_sep>x_pad=pad_col(x val)<line_sep>pad=torch.ones(2 1)<times>val<assert_stmt>(x_pad<eq>torch.cat([x pad] dim=1)).all()<block_end>@pytest.mark.parametrize('n' [2 13 40])<def_stmt>test_make_subgrid_1 n<block_start>grid=np.random.uniform(0 100 n)<line_sep>grid=np.sort(grid)<line_sep>new_grid=make_subgrid(grid 1)<assert_stmt>len(new_grid)<eq>len(grid)<assert_stmt>(new_grid<eq>grid).all()<block_end>@pytest.mark.parametrize('sub' [2 10 20])@pytest.mark.parametrize('start' [0 2])@pytest.mark.parametrize('stop' [4 100])@pytest.mark.parametrize('n' [5 10])<def_stmt>test_make_subgrid sub start stop n<block_start>grid=np.linspace(start stop n)<line_sep>new_grid=make_subgrid(grid sub)<line_sep>true_new=np.linspace(start stop n<times>sub-(sub-1))<assert_stmt>len(new_grid)<eq>len(true_new)<assert_stmt>np.abs(true_new-new_grid).max()<l>1e-13<block_end><def_stmt>test_cumsum_reverse_error_dim <block_start>x=torch.randn((5 3))<with_stmt>pytest.raises(NotImplementedError)<block_start>cumsum_reverse(x dim=0)<block_end><with_stmt>pytest.raises(NotImplementedError)<block_start>cumsum_reverse(x dim=2)<block_end><block_end><def_stmt>test_cumsum_reverse_dim_1 <block_start>torch.manual_seed(1234)<line_sep>x=torch.randn(5 16)<line_sep>res_np=x.numpy()[: ::-1].cumsum(1)[: ::-1]<line_sep>res=cumsum_reverse(x dim=1)<assert_stmt>np.abs(res.numpy()-res_np).max()<l>1e-6<block_end> |
# TODO merge naive and weighted loss.
<import_stmt>torch<import_stmt>torch.nn.functional<as>F<import_from_stmt>..bbox bbox_overlaps<def_stmt>weighted_nll_loss pred label weight avg_factor=<none><block_start><if_stmt>avg_factor<is><none><block_start>avg_factor=max(torch.sum(weight<g>0).float().item() 1.)<block_end>raw=F.nll_loss(pred label reduction='none')<line_sep><return>torch.sum(raw<times>weight)[<none>]/avg_factor<block_end><def_stmt>weighted_cross_entropy pred label weight avg_factor=<none> reduce=<true><block_start><if_stmt>avg_factor<is><none><block_start>avg_factor=max(torch.sum(weight<g>0).float().item() 1.)<block_end>raw=F.cross_entropy(pred label reduction='none')<if_stmt>reduce<block_start><return>torch.sum(raw<times>weight)[<none>]/avg_factor<block_end><else_stmt><block_start><return>raw<times>weight/avg_factor<block_end><block_end><def_stmt>fcos_binary_cross_entropy pred label <block_start><return>F.binary_cross_entropy_with_logits(pred label.float() weight.float() reduction='mean')[<none>]<block_end><def_stmt>weighted_binary_cross_entropy pred label weight avg_factor=<none><block_start><if_stmt>avg_factor<is><none><block_start>avg_factor=max(torch.sum(weight<g>0).float().item() 1.)<block_end><return>F.binary_cross_entropy_with_logits(pred label.float() weight.float() reduction='sum')[<none>]/avg_factor<block_end><def_stmt>sigmoid_focal_loss pred target weight gamma=2.0 alpha=0.25 reduction='mean'<block_start>pred_sigmoid=pred.sigmoid()<line_sep>target=target.type_as(pred)<line_sep>pt=(1-pred_sigmoid)<times>target+pred_sigmoid<times>(1-target)<line_sep>weight=(alpha<times>target+(1-alpha)<times>(1-target))<times>weight<line_sep>weight=weight<times>pt.pow(gamma)<line_sep>loss=F.binary_cross_entropy_with_logits(pred target reduction='none')<times>weight<line_sep>reduction_enum=F._Reduction.get_enum(reduction)<line_sep># none: 0, mean:1, sum: 2
<if_stmt>reduction_enum<eq>0<block_start><return>loss<block_end><elif_stmt>reduction_enum<eq>1<block_start><return>loss.mean()<block_end><elif_stmt>reduction_enum<eq>2<block_start><return>loss.sum()<block_end><block_end><def_stmt>weighted_sigmoid_focal_loss pred target weight gamma=2.0 alpha=0.25 avg_factor=<none> num_classes=80<block_start><if_stmt>avg_factor<is><none><block_start>avg_factor=torch.sum(weight<g>0).float().item()/num_classes+1e-6<block_end><return>sigmoid_focal_loss(pred target weight gamma=gamma alpha=alpha reduction='sum')[<none>]/avg_factor<block_end><def_stmt>mask_cross_entropy pred target label<block_start>num_rois=pred.size()[0]<line_sep>inds=torch.arange(0 num_rois dtype=torch.long device=pred.device)<line_sep>pred_slice=pred[inds label].squeeze(1)<line_sep><return>F.binary_cross_entropy_with_logits(pred_slice target reduction='mean')[<none>]<block_end><def_stmt>smooth_l1_loss pred target beta=1.0 reduction='mean'<block_start><assert_stmt>beta<g>0<assert_stmt>pred.size()<eq>target.size()<and>target.numel()<g>0<line_sep>diff=torch.abs(pred-target)<line_sep>loss=torch.where(diff<l>beta 0.5<times>diff<times>diff/beta diff-0.5<times>beta)<line_sep>reduction_enum=F._Reduction.get_enum(reduction)<line_sep># none: 0, mean:1, sum: 2
<if_stmt>reduction_enum<eq>0<block_start><return>loss<block_end><elif_stmt>reduction_enum<eq>1<block_start><return>loss.sum()/pred.numel()<block_end><elif_stmt>reduction_enum<eq>2<block_start><return>loss.sum()<block_end><block_end><def_stmt>weighted_iou_loss pred target weight style='naive' beta=0.2 eps=1e-3 avg_factor=<none><block_start><if_stmt>style<not><in>['bounded' 'naive']<block_start><raise>ValueError('Only support bounded iou loss and naive iou loss.')<block_end>inds=torch.nonzero(weight[: 0]<g>0)<if_stmt>avg_factor<is><none><block_start>avg_factor=inds.numel()+1e-6<block_end><if_stmt>inds.numel()<g>0<block_start>inds=inds.squeeze(1)<block_end><else_stmt><block_start><return>(pred<times>weight).sum()[<none>]/avg_factor<block_end><if_stmt>style<eq>'bounded'<block_start>loss=bounded_iou_loss(pred[inds] target[inds] beta=beta eps=eps reduction='sum')<block_end><else_stmt><block_start>loss=iou_loss(pred[inds] target[inds] reduction='sum')<block_end>loss=loss[<none>]/avg_factor<line_sep><return>loss<block_end><def_stmt>iou_loss pred_bboxes target_bboxes reduction='mean'<block_start>ious=bbox_overlaps(pred_bboxes target_bboxes is_aligned=<true>)<line_sep>loss=-ious.log()<line_sep>reduction_enum=F._Reduction.get_enum(reduction)<if_stmt>reduction_enum<eq>0<block_start><return>loss<block_end><elif_stmt>reduction_enum<eq>1<block_start><return>loss.mean()<block_end><elif_stmt>reduction_enum<eq>2<block_start><return>loss.sum()<block_end><block_end><def_stmt>weighted_smoothl1 pred target weight beta=1.0 avg_factor=<none><block_start><if_stmt>avg_factor<is><none><block_start>avg_factor=torch.sum(weight<g>0).float().item()/4+1e-6<block_end>loss=smooth_l1_loss(pred target beta reduction='none')<line_sep><return>torch.sum(loss<times>weight)[<none>]/avg_factor<block_end><def_stmt>accuracy pred target topk=1<block_start><if_stmt>isinstance(topk int)<block_start>topk=(topk )<line_sep>return_single=<true><block_end><else_stmt><block_start>return_single=<false><block_end>maxk=max(topk)<line_sep>_,pred_label=pred.topk(maxk 1 <true> <true>)<line_sep>pred_label=pred_label.t()<line_sep>correct=pred_label.eq(target.view(1 -1).expand_as(pred_label))<line_sep>res=[]<for_stmt>k topk<block_start>correct_k=correct[:k].view(-1).float().sum(0 keepdim=<true>)<line_sep>res.append(correct_k.mul_(100.0/pred.size(0)))<block_end><return>res[0]<if>return_single<else>res<block_end> |
<import_stmt>cv2<import_from_stmt>typing List<import_from_stmt>core.element.BaseImg BaseImg<import_from_stmt>core.element.CharImg CharImg<import_from_stmt>PIL Image ImageFont ImageDraw<import_stmt>os<import_stmt>numpy<as>np<import_stmt>json<import_from_stmt>utils time_util<as>tu<import_stmt>math<import_stmt>traceback<import_from_stmt>utils log<line_sep>TYPE_ORIENTATION_HORIZONTAL=0<line_sep>TYPE_ORIENTATION_VERTICAL=1<line_sep>TYPE_ALIGN_MODEL_B=0# 文本对齐模式:底部/左边 对齐
TYPE_ALIGN_MODEL_C=1# 文本对齐模式:居中 对齐
TYPE_ALIGN_MODEL_T=2# 文本对齐模式:顶部/右边 对齐
<class_stmt>TextImg(BaseImg)<block_start>"""
字符串图片对象
"""<def_stmt>__init__ self char_obj_list:List[CharImg] text_img_output_dir text_img_info_output_dir orientation align_mode img:Image.Image=<none> img_path:str=<none> **kwargs<block_start>tmp_list=[]<for_stmt>item char_obj_list<block_start><if_stmt>isinstance(item dict)<block_start>tmp_list.append(CharImg(**item))<block_end><block_end><if_stmt>tmp_list<block_start>char_obj_list=tmp_list<block_end>self.char_obj_list=char_obj_list<line_sep>self.text="".join([char_obj.char<for>char_obj self.char_obj_list])<line_sep>self.text_img_output_dir=text_img_output_dir<line_sep>self.text_img_info_output_dir=text_img_info_output_dir<line_sep>self.orientation=orientation<line_sep>self.align_mode=align_mode<if_stmt>img_path<block_start>self.img_name=img_path.split(os.sep)[-1]<line_sep>self.name=self.img_name.split('.')[0]<line_sep>self.img_path=img_path<line_sep>self.img=load_img(self.img_path)<block_end><else_stmt><block_start>self.name=self._gen_name(align_mode orientation)<line_sep>self.img_name=self.name+".png"<line_sep>self.img_path=os.path.join(text_img_output_dir self.img_name)<line_sep>self.img=img<block_end><block_end><def_stmt>_gen_name self align_mode orientation<block_start>o="v"<if>orientation<eq>TYPE_ORIENTATION_VERTICAL<else>"h"<line_sep>a='b'<if_stmt>align_mode<eq>TYPE_ALIGN_MODEL_T<block_start>a='t'<block_end><elif_stmt>align_mode<eq>TYPE_ALIGN_MODEL_C<block_start>a='c'<block_end><return>tu.timestamp()+"_"+o+"_"+a+"_"+self.text.replace(" " "_")<block_end><def_stmt>__repr__ self<block_start><return>json.dumps(self.__dict__ cls=CharImgEncoder)<block_end><def_stmt>export self<block_start>"""
数据导出
:return:
"""<line_sep>self.img.save(self.img_path)<line_sep>json_file_path=os.path.join(self.text_img_info_output_dir self.name+".json")<with_stmt>open(json_file_path 'w')<as>f<block_start>json.dump(self.__dict__ f cls=CharImgEncoder)<block_end><block_end>@staticmethod<def_stmt>load_from_json file_path<block_start>"""
从json文件中加载对象
:param file_path:
:return:
"""<assert_stmt>os.path.exists(file_path) "json file is not exist,please check: {file_path}".format(file_path=file_path)<with_stmt>open(file_path 'r')<as>f<block_start>j=json.load(f)<line_sep><return>TextImg(**j)<block_end><block_end><def_stmt>show self with_box=<false><block_start>"""
展示图片
:param with_box:
:return:
"""<line_sep>image=self.cv_img()<if_stmt>with_box<block_start><for_stmt>char_obj self.char_obj_list<block_start>pt1=(char_obj.box[0] char_obj.box[1])<line_sep>pt2=(char_obj.box[2] char_obj.box[3])<line_sep>image=cv2.rectangle(image pt1=pt1 pt2=pt2 color=(0 0 255) thickness=1)<block_end><block_end>cv2.imshow(self.text image)<line_sep>cv2.waitKey()<line_sep>cv2.destroyWindow(self.text)<block_end><def_stmt>cv_img self<block_start>"""
获取opencv的image对象
:return:
"""<line_sep>image=np.array(self.img)<line_sep>image=cv2.cvtColor(image cv2.COLOR_RGBA2BGRA)<line_sep><return>image<block_end><def_stmt>pil_img self<block_start>"""
获取pillow的image对象
:return:
"""<line_sep><return>self.img<block_end><block_end><class_stmt>CharImgEncoder(json.JSONEncoder)<block_start><def_stmt>default self o<block_start><if_stmt><not>isinstance(o Image.Image)<block_start><return>o.__dict__<block_end><block_end><block_end><def_stmt>load_img img_path<block_start>"""
从磁盘上加载图片文件
:param img_path:
:return:
"""<assert_stmt>os.path.exists(img_path) "image is not exist, please check. {img_path}".format(img_path=img_path)<line_sep><return>Image.open(img_path)<block_end><def_stmt>calc_bg_size font_path:str orientation:int char_obj_list:List[CharImg] spacing_rate:float padding auto_padding_to_ratio<arrow>tuple<block_start>"""
计算背景尺寸
:param font_path: 字体路径
:param orientation: 朝向
:param char_obj_list: 字符对象
:param spacing_rate: 间距 (相对于文字大小的占比)
:param padding: 内边距
:param auto_padding_to_ratio: 自动 padding 到指定的比例(水平排布是 w/h 竖直排布是 h/w)
:return:
"""<line_sep>max_char_bg_w=0<line_sep>max_char_bg_h=0<line_sep>bg_w=0<line_sep>bg_h=0<for_stmt>index,char_obj enumerate(char_obj_list)<block_start>font=ImageFont.truetype(font_path size=char_obj.font_size)<line_sep># 获取当前字符的背景尺寸
char_bg_w=0<line_sep>char_bg_h=0<try_stmt><block_start>char_bg_w,char_bg_h=font.getsize(char_obj.char)<line_sep># 加上边框尺寸
char_bg_w<augadd>char_obj.border_width<times>2<line_sep>char_bg_h<augadd>char_obj.border_width<times>2<block_end><except_stmt>Exception<as>e<block_start>traceback.print_exc()<block_end>char_obj.size=(char_bg_w char_bg_h)<line_sep># 获取当前行文本的最大字符图片的宽高
max_char_bg_w=char_bg_w<if>char_bg_w<g>max_char_bg_w<else>max_char_bg_w<line_sep>max_char_bg_h=char_bg_h<if>char_bg_h<g>max_char_bg_h<else>max_char_bg_h<line_sep># 判断是否遍历到了最后一个字符的位置
is_last=index<eq>len(char_obj_list)-1<line_sep>r=0<if>is_last<else>spacing_rate<if_stmt>orientation<eq>TYPE_ORIENTATION_VERTICAL<block_start>bg_w=max_char_bg_w<line_sep>bg_h<augadd>math.ceil(char_obj.size[1]<times>(1+r))<block_end><else_stmt><block_start>bg_w<augadd>math.ceil(char_obj.size[0]<times>(1+r))<line_sep>bg_h=max_char_bg_h<block_end><block_end><if_stmt>auto_padding_to_ratio<g>0# 自动 padding 到指定尺寸
# 如果是水平排列 则在左右两边加padding
# auto_padding_to_ratio = tw / th
<block_start><if_stmt>orientation<eq>TYPE_ORIENTATION_HORIZONTAL<block_start>st_w=auto_padding_to_ratio<times>bg_h<if_stmt>st_w<g>bg_w<block_start>d=round((st_w-bg_w)/2)<line_sep>padding=(d 0 d 0)<block_end><else_stmt><block_start>st_h=bg_w/auto_padding_to_ratio<line_sep>d=round((st_h-bg_h)/2)<line_sep>padding=(0 d 0 d)<block_end><block_end># 如果是竖直排列 则在上下两边加padding
# auto_padding_to_ratio = th / tw
<elif_stmt>orientation<eq>TYPE_ORIENTATION_VERTICAL<block_start>st_h=auto_padding_to_ratio<times>bg_w<if_stmt>st_h<g>bg_h<block_start>d=round((st_h-bg_h)/2)<line_sep>padding=(0 d 0 d)<block_end><else_stmt><block_start>st_w=bg_h/auto_padding_to_ratio<line_sep>d=round((st_w-bg_w)/2)<line_sep>padding=(d 0 d 0)<block_end><block_end><block_end>bg_w=bg_w+padding[0]+padding[2]<line_sep>bg_h=bg_h+padding[1]+padding[3]<line_sep><return>bg_w bg_h padding<block_end><def_stmt>draw_text font_path bg_w bg_h orientation char_obj_list:List[CharImg] spacing_rate align_mode padding<block_start>"""
在文字贴图背景上绘制文字
:param font_path:
:param bg_w:
:param bg_h:
:param orientation:
:param char_obj_list:
:param spacing_rate:
:param align_mode:
:param padding:
:return:
"""<line_sep>img=Image.new("RGBA" (bg_w bg_h) color=(0 0 0 0))<line_sep>draw=ImageDraw.Draw(img)<line_sep>font_area_w=bg_w-padding[0]-padding[2]<line_sep>font_area_h=bg_h-padding[1]-padding[3]<line_sep>tmp_char=<none><line_sep>l,t=0 0<for_stmt>index,char_obj enumerate(char_obj_list)<block_start>font=ImageFont.truetype(font_path size=char_obj.font_size)<line_sep>cw,ch=char_obj.size<if_stmt>orientation<eq>TYPE_ORIENTATION_VERTICAL<block_start><if_stmt>align_mode<eq>TYPE_ALIGN_MODEL_B<block_start>l=0<block_end><elif_stmt>align_mode<eq>TYPE_ALIGN_MODEL_C<block_start>l=math.ceil((font_area_w-cw)/2)<block_end><elif_stmt>align_mode<eq>TYPE_ALIGN_MODEL_T<block_start>l=font_area_w-cw<block_end><if_stmt>tmp_char<block_start>add_t=math.ceil(tmp_char.size[1]<times>(1+spacing_rate))<line_sep>t<augadd>add_t<block_end><else_stmt><block_start>t=0<block_end>l<augadd>padding[0]<if_stmt>index<eq>0<block_start>t<augadd>padding[1]<block_end>char_obj.box=[l t l+cw t+ch]<block_end><else_stmt><block_start>t=0<if_stmt>align_mode<eq>TYPE_ALIGN_MODEL_B<block_start>t=font_area_h-ch<block_end><elif_stmt>align_mode<eq>TYPE_ALIGN_MODEL_C<block_start>t=math.ceil((font_area_h-ch)/2)<block_end><elif_stmt>align_mode<eq>TYPE_ALIGN_MODEL_T<block_start>t=0<block_end><if_stmt>tmp_char<block_start>add_l=math.ceil(tmp_char.size[0]<times>(1+spacing_rate))<line_sep>l<augadd>add_l<block_end><else_stmt><block_start>l=0<block_end>t<augadd>padding[1]<if_stmt>index<eq>0<block_start>l<augadd>padding[0]<block_end>char_obj.box=[l t l+cw t+ch]<block_end>log.info("draw text >> {text} color: {color} font: {font}".format(text=char_obj.char color=char_obj.color font=font))<line_sep>draw.text((l+char_obj.border_width t+char_obj.border_width) text=char_obj.char fill=char_obj.color font=font)<if_stmt>char_obj.border_width<g>0<block_start>draw.rectangle(xy=tuple(char_obj.box) width=char_obj.border_width outline=char_obj.border_color)<block_end>tmp_char=char_obj<block_end><return>img<block_end><def_stmt>gen_batch_char_obj text color font_size border_width=0 border_color=(0 0 0 0)<arrow>List[CharImg]<block_start>"""
生成一批CharImg对象
:param text:
:param color:
:param font_size:
:param border_width:
:param border_color:
:return:
"""<line_sep>char_obj_list=[]<for_stmt>char text<block_start>char_obj_list.append(CharImg(char font_size=font_size color=color border_width=border_width border_color=border_color))<block_end><return>char_obj_list<block_end><def_stmt>create char_obj_list:List[CharImg] orientation:int=TYPE_ORIENTATION_HORIZONTAL align_mode:int=TYPE_ALIGN_MODEL_B spacing_rate:float=0.08 padding=(0 0 0 0) auto_padding_to_ratio=0 font_path="" text_img_output_dir="" text_img_info_output_dir=""<block_start>"""
生成文本图片
:param char_obj_list: 字符对象列表
:param orientation: 生成的方向
:param align_mode: 文本对齐模式
:param spacing_rate: 间距 (相对于文字大小的占比)
:param padding: 内边距
:param auto_padding_to_ratio: 自动padding到指定的比例 <=0 代表不自动padding (水平排布是 w/h 竖直排布是 h/w)
:param font_path: 字体文件路径
:param text_img_output_dir:
:param text_img_info_output_dir:
:return:
"""<line_sep># 生成文本贴图的透明背景区域
bg_w,bg_h,padding=calc_bg_size(font_path orientation char_obj_list spacing_rate padding auto_padding_to_ratio)<line_sep># 绘制文字
img=draw_text(font_path bg_w bg_h orientation char_obj_list spacing_rate align_mode padding)<line_sep><return>TextImg(char_obj_list=char_obj_list text_img_output_dir=text_img_output_dir text_img_info_output_dir=text_img_info_output_dir orientation=orientation align_mode=align_mode img=img)<block_end> |
# -*- coding:utf-8 -*-
"""
股票技术指标接口
Created on 2018/05/26
@author: <NAME>
@group : **
@contact: <EMAIL>
"""<def_stmt>ma data n=10 val_name="close"<block_start><import_stmt>numpy<as>np<line_sep>'''
移动平均线 Moving Average
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
移动平均线时长,时间单位根据data决定
val_name:string
计算哪一列的列名,默认为 close 收盘值
return
-------
list
移动平均线
'''<line_sep>values=[]<line_sep>MA=[]<for_stmt>index,row data.iterrows()<block_start>values.append(row[val_name])<if_stmt>len(values)<eq>n<block_start><del_stmt>values[0]<block_end>MA.append(np.average(values))<block_end><return>np.asarray(MA)<block_end><def_stmt>md data n=10 val_name="close"<block_start><import_stmt>numpy<as>np<line_sep>'''
移动标准差
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
移动平均线时长,时间单位根据data决定
val_name:string
计算哪一列的列名,默认为 close 收盘值
return
-------
list
移动平均线
'''<line_sep>values=[]<line_sep>MD=[]<for_stmt>index,row data.iterrows()<block_start>values.append(row[val_name])<if_stmt>len(values)<eq>n<block_start><del_stmt>values[0]<block_end>MD.append(np.std(values))<block_end><return>np.asarray(MD)<block_end><def_stmt>_get_day_ema prices n<block_start>a=1-2/(n+1)<line_sep>day_ema=0<for_stmt>index,price enumerate(reversed(prices))<block_start>day_ema<augadd>a<power>index<times>price<block_end><return>day_ema<block_end><def_stmt>ema data n=12 val_name="close"<block_start><import_stmt>numpy<as>np<line_sep>'''
指数平均数指标 Exponential Moving Average
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
移动平均线时长,时间单位根据data决定
val_name:string
计算哪一列的列名,默认为 close 收盘值
return
-------
EMA:numpy.ndarray<numpy.float64>
指数平均数指标
'''<line_sep>prices=[]<line_sep>EMA=[]<for_stmt>index,row data.iterrows()<block_start><if_stmt>index<eq>0<block_start>past_ema=row[val_name]<line_sep>EMA.append(row[val_name])<block_end><else_stmt># Y=[2*X+(N-1)*Y’]/(N+1)
<block_start>today_ema=(2<times>row[val_name]+(n-1)<times>past_ema)/(n+1)<line_sep>past_ema=today_ema<line_sep>EMA.append(today_ema)<block_end><block_end><return>np.asarray(EMA)<block_end><def_stmt>macd data quick_n=12 slow_n=26 dem_n=9 val_name="close"<block_start><import_stmt>numpy<as>np<line_sep>'''
指数平滑异同平均线(MACD: Moving Average Convergence Divergence)
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
quick_n:int
DIFF差离值中快速移动天数
slow_n:int
DIFF差离值中慢速移动天数
dem_n:int
DEM讯号线的移动天数
val_name:string
计算哪一列的列名,默认为 close 收盘值
return
-------
OSC:numpy.ndarray<numpy.float64>
MACD bar / OSC 差值柱形图 DIFF - DEM
DIFF:numpy.ndarray<numpy.float64>
差离值
DEM:numpy.ndarray<numpy.float64>
讯号线
'''<line_sep>ema_quick=np.asarray(ema(data quick_n val_name))<line_sep>ema_slow=np.asarray(ema(data slow_n val_name))<line_sep>DIFF=ema_quick-ema_slow<line_sep>data["diff"]=DIFF<line_sep>DEM=ema(data dem_n "diff")<line_sep>OSC=DIFF-DEM<line_sep><return>OSC DIFF DEM<block_end><def_stmt>kdj data<block_start><import_stmt>numpy<as>np<line_sep>'''
随机指标KDJ
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
return
-------
K:numpy.ndarray<numpy.float64>
K线
D:numpy.ndarray<numpy.float64>
D线
J:numpy.ndarray<numpy.float64>
J线
'''<line_sep>K,D,J=[] [] []<line_sep>last_k,last_d=<none> <none><for_stmt>index,row data.iterrows()<block_start><if_stmt>last_k<is><none><or>last_d<is><none><block_start>last_k=50<line_sep>last_d=50<block_end>c,l,h=row["close"] row["low"] row["high"]<line_sep>rsv=(c-l)/(h-l)<times>100<line_sep>k=(2/3)<times>last_k+(1/3)<times>rsv<line_sep>d=(2/3)<times>last_d+(1/3)<times>k<line_sep>j=3<times>k-2<times>d<line_sep>K.append(k)<line_sep>D.append(d)<line_sep>J.append(j)<line_sep>last_k,last_d=k d<block_end><return>np.asarray(K) np.asarray(D) np.asarray(J)<block_end><def_stmt>rsi data n=6 val_name="close"<block_start><import_stmt>numpy<as>np<line_sep>'''
相对强弱指标RSI
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,时间单位根据data决定
return
-------
RSI:numpy.ndarray<numpy.float64>
RSI线
'''<line_sep>RSI=[]<line_sep>UP=[]<line_sep>DOWN=[]<for_stmt>index,row data.iterrows()<block_start><if_stmt>index<eq>0<block_start>past_value=row[val_name]<line_sep>RSI.append(0)<block_end><else_stmt><block_start>diff=row[val_name]-past_value<if_stmt>diff<g>0<block_start>UP.append(diff)<line_sep>DOWN.append(0)<block_end><else_stmt><block_start>UP.append(0)<line_sep>DOWN.append(diff)<block_end><if_stmt>len(UP)<eq>n<block_start><del_stmt>UP[0]<block_end><if_stmt>len(DOWN)<eq>n<block_start><del_stmt>DOWN[0]<block_end>past_value=row[val_name]<line_sep>rsi=np.sum(UP)/(-np.sum(DOWN)+np.sum(UP))<times>100<line_sep>RSI.append(rsi)<block_end><block_end><return>np.asarray(RSI)<block_end><def_stmt>boll data n=10 val_name="close" k=2<block_start>'''
布林线指标BOLL
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,时间单位根据data决定
return
-------
BOLL:numpy.ndarray<numpy.float64>
中轨线
UPPER:numpy.ndarray<numpy.float64>
D线
J:numpy.ndarray<numpy.float64>
J线
'''<line_sep>BOLL=ma(data n val_name)<line_sep>MD=md(data n val_name)<line_sep>UPPER=BOLL+k<times>MD<line_sep>LOWER=BOLL-k<times>MD<line_sep><return>BOLL UPPER LOWER<block_end><def_stmt>wnr data n=14<block_start>'''
威廉指标 w&r
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,时间单位根据data决定
return
-------
WNR:numpy.ndarray<numpy.float64>
威廉指标
'''<line_sep>high_prices=[]<line_sep>low_prices=[]<line_sep>WNR=[]<for_stmt>index,row data.iterrows()<block_start>high_prices.append(row["high"])<if_stmt>len(high_prices)<eq>n<block_start><del_stmt>high_prices[0]<block_end>low_prices.append(row["low"])<if_stmt>len(low_prices)<eq>n<block_start><del_stmt>low_prices[0]<block_end>highest=max(high_prices)<line_sep>lowest=min(low_prices)<line_sep>wnr=(highest-row["close"])/(highest-lowest)<times>100<line_sep>WNR.append(wnr)<block_end><return>WNR<block_end><def_stmt>_get_any_ma arr n<block_start><import_stmt>numpy<as>np<line_sep>MA=[]<line_sep>values=[]<for_stmt>val arr<block_start>values.append(val)<if_stmt>len(values)<eq>n<block_start><del_stmt>values[0]<block_end>MA.append(np.average(values))<block_end><return>np.asarray(MA)<block_end><def_stmt>dmi data n=14 m=14 k=6<block_start><import_stmt>numpy<as>np<line_sep>'''
动向指标或趋向指标 DMI
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
+-DI(n): DI统计时长,默认14
m:int
ADX(m): ADX统计时常参数,默认14
k:int
ADXR(k): ADXR统计k个周期前数据,默认6
return
-------
P_DI:numpy.ndarray<numpy.float64>
+DI指标
M_DI:numpy.ndarray<numpy.float64>
-DI指标
ADX:numpy.ndarray<numpy.float64>
ADX指标
ADXR:numpy.ndarray<numpy.float64>
ADXR指标
ref.
-------
https://www.mk-mode.com/octopress/2012/03/03/03002038/
'''<line_sep># 上升动向(+DM)
P_DM=[0.]<line_sep># 下降动向(-DM)
M_DM=[0.]<line_sep># 真实波幅TR
TR=[0.]<line_sep># 动向
DX=[0.]<line_sep>P_DI=[0.]<line_sep>M_DI=[0.]<for_stmt>index,row data.iterrows()<block_start><if_stmt>index<eq>0<block_start>past_row=row<block_end><else_stmt><block_start>p_dm=row["high"]-past_row["high"]<line_sep>m_dm=past_row["low"]-row["low"]<if_stmt>(p_dm<l>0<and>m_dm<l>0)<or>(np.isclose(p_dm m_dm))<block_start>p_dm=0<line_sep>m_dm=0<block_end><if_stmt>p_dm<g>m_dm<block_start>m_dm=0<block_end><if_stmt>m_dm<g>p_dm<block_start>p_dm=0<block_end>P_DM.append(p_dm)<line_sep>M_DM.append(m_dm)<line_sep>tr=max(row["high"]-past_row["low"] row["high"]-past_row["close"] past_row["close"]-row["low"])<line_sep>TR.append(tr)<if_stmt>len(P_DM)<eq>n<block_start><del_stmt>P_DM[0]<block_end><if_stmt>len(M_DM)<eq>n<block_start><del_stmt>M_DM[0]<block_end><if_stmt>len(TR)<eq>n<block_start><del_stmt>TR[0]<block_end># 上升方向线(+DI)
p_di=(np.average(P_DM)/np.average(TR))<times>100<line_sep>P_DI.append(p_di)<line_sep># 下降方向线(-DI)
m_di=(np.average(M_DM)/np.average(TR))<times>100<line_sep>M_DI.append(m_di)<line_sep># 当日+DI与-DI
# p_day_di = (p_dm / tr) * 100
# m_day_di = (m_dm / tr) * 100
# 动向DX
# dx=(di dif÷di sum) ×100
# di dif为上升指标和下降指标的价差的绝对值
# di sum为上升指标和下降指标的总和
# adx就是dx的一定周期n的移动平均值。
<if_stmt>(p_di+m_di)<eq>0<block_start>dx=0<block_end><else_stmt><block_start>dx=(abs(p_di-m_di)/(p_di+m_di))<times>100<block_end>DX.append(dx)<line_sep>past_row=row<block_end><block_end>ADX=_get_any_ma(DX m)<line_sep>#
# # 估计数值ADXR
ADXR=[]<for_stmt>index,adx enumerate(ADX)<block_start><if_stmt>index<ge>k<block_start>adxr=(adx+ADX[index-k])/2<line_sep>ADXR.append(adxr)<block_end><else_stmt><block_start>ADXR.append(0)<block_end><block_end><return>P_DI M_DI ADX ADXR<block_end><def_stmt>bias data n=5<block_start><import_stmt>numpy<as>np<line_sep>'''
乖离率 bias
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,默认5
return
-------
BIAS:numpy.ndarray<numpy.float64>
乖离率指标
'''<line_sep>MA=ma(data n)<line_sep>CLOSES=data["close"]<line_sep>BIAS=(np.true_divide((CLOSES-MA) MA))<times>(100/100)<line_sep><return>BIAS<block_end><def_stmt>asi data n=5<block_start><import_stmt>numpy<as>np<line_sep>'''
振动升降指标 ASI
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,默认5
return
-------
ASI:numpy.ndarray<numpy.float64>
振动升降指标
'''<line_sep>SI=[]<for_stmt>index,row data.iterrows()<block_start><if_stmt>index<eq>0<block_start>last_row=row<line_sep>SI.append(0.)<block_end><else_stmt><block_start>a=abs(row["close"]-last_row["close"])<line_sep>b=abs(row["low"]-last_row["close"])<line_sep>c=abs(row["high"]-last_row["close"])<line_sep>d=abs(last_row["close"]-last_row["open"])<if_stmt>b<g>a<and>b<g>c<block_start>r=b+(1/2)<times>a+(1/4)<times>d<block_end><elif_stmt>c<g>a<and>c<g>b<block_start>r=c+(1/4)<times>d<block_end><else_stmt><block_start>r=0<block_end>e=row["close"]-last_row["close"]<line_sep>f=row["close"]-last_row["open"]<line_sep>g=last_row["close"]-last_row["open"]<line_sep>x=e+(1/2)<times>f+g<line_sep>k=max(a b)<line_sep>l=3<if_stmt>np.isclose(r 0)<or>np.isclose(l 0)<block_start>si=0<block_end><else_stmt><block_start>si=50<times>(x/r)<times>(k/l)<block_end>SI.append(si)<block_end><block_end>ASI=_get_any_ma(SI n)<line_sep><return>ASI<block_end><def_stmt>vr data n=26<block_start><import_stmt>numpy<as>np<line_sep>'''
Volatility Volume Ratio 成交量变异率
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,默认26
return
-------
VR:numpy.ndarray<numpy.float64>
成交量变异率
'''<line_sep>VR=[]<line_sep>AV_volumes,BV_volumes,CV_volumes=[] [] []<for_stmt>index,row data.iterrows()<block_start><if_stmt>row["close"]<g>row["open"]<block_start>AV_volumes.append(row["volume"])<block_end><elif_stmt>row["close"]<l>row["open"]<block_start>BV_volumes.append(row["volume"])<block_end><else_stmt><block_start>CV_volumes.append(row["volume"])<block_end><if_stmt>len(AV_volumes)<eq>n<block_start><del_stmt>AV_volumes[0]<block_end><if_stmt>len(BV_volumes)<eq>n<block_start><del_stmt>BV_volumes[0]<block_end><if_stmt>len(CV_volumes)<eq>n<block_start><del_stmt>CV_volumes[0]<block_end>avs=sum(AV_volumes)<line_sep>bvs=sum(BV_volumes)<line_sep>cvs=sum(CV_volumes)<if_stmt>(bvs+(1/2)<times>cvs)<ne>0<block_start>vr=(avs+(1/2)<times>cvs)/(bvs+(1/2)<times>cvs)<block_end><else_stmt><block_start>vr=0<block_end>VR.append(vr)<block_end><return>np.asarray(VR)<block_end><def_stmt>arbr data n=26<block_start><import_stmt>numpy<as>np<line_sep>'''
AR 指标 BR指标
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,默认26
return
-------
AR:numpy.ndarray<numpy.float64>
AR指标
BR:numpy.ndarray<numpy.float64>
BR指标
'''<line_sep>H,L,O,PC=np.array([0]) np.array([0]) np.array([0]) np.array([0])<line_sep>AR,BR=np.array([0]) np.array([0])<for_stmt>index,row data.iterrows()<block_start><if_stmt>index<eq>0<block_start>last_row=row<block_end><else_stmt><block_start>h=row["high"]<line_sep>H=np.append(H [h])<if_stmt>len(H)<eq>n<block_start>H=np.delete(H 0)<block_end>l=row["low"]<line_sep>L=np.append(L [l])<if_stmt>len(L)<eq>n<block_start>L=np.delete(L 0)<block_end>o=row["open"]<line_sep>O=np.append(O [o])<if_stmt>len(O)<eq>n<block_start>O=np.delete(O 0)<block_end>pc=last_row["close"]<line_sep>PC=np.append(PC [pc])<if_stmt>len(PC)<eq>n<block_start>PC=np.delete(PC 0)<block_end>ar=(np.sum(np.asarray(H)-np.asarray(O))/sum(np.asarray(O)-np.asarray(L)))<times>100<line_sep>AR=np.append(AR [ar])<line_sep>br=(np.sum(np.asarray(H)-np.asarray(PC))/sum(np.asarray(PC)-np.asarray(L)))<times>100<line_sep>BR=np.append(BR [br])<line_sep>last_row=row<block_end><block_end><return>np.asarray(AR) np.asarray(BR)<block_end><def_stmt>dpo data n=20 m=6<block_start>'''
区间震荡线指标 DPO
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,默认20
m:int
MADPO的参数M,默认6
return
-------
DPO:numpy.ndarray<numpy.float64>
DPO指标
MADPO:numpy.ndarray<numpy.float64>
MADPO指标
'''<line_sep>CLOSES=data["close"]<line_sep>DPO=CLOSES-ma(data int(n/2+1))<line_sep>MADPO=_get_any_ma(DPO m)<line_sep><return>DPO MADPO<block_end><def_stmt>trix data n=12 m=20<block_start><import_stmt>numpy<as>np<line_sep>'''
三重指数平滑平均线 TRIX
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,默认12
m:int
TRMA的参数M,默认20
return
-------
TRIX:numpy.ndarray<numpy.float64>
AR指标
TRMA:numpy.ndarray<numpy.float64>
BR指标
'''<line_sep>CLOSES=[]<line_sep>TRIX=[]<for_stmt>index,row data.iterrows()<block_start>CLOSES.append(row["close"])<if_stmt>len(CLOSES)<eq>n<block_start><del_stmt>CLOSES[0]<block_end>tr=np.average(CLOSES)<if_stmt>index<eq>0<block_start>past_tr=tr<line_sep>TRIX.append(0)<block_end><else_stmt><block_start>trix=(tr-past_tr)/past_tr<times>100<line_sep>TRIX.append(trix)<block_end><block_end>TRMA=_get_any_ma(TRIX m)<line_sep><return>TRIX TRMA<block_end><def_stmt>bbi data<block_start><import_stmt>numpy<as>np<line_sep>'''
Bull And Bearlndex 多空指标
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
return
-------
BBI:numpy.ndarray<numpy.float64>
BBI指标
'''<line_sep>CS=[]<line_sep>BBI=[]<for_stmt>index,row data.iterrows()<block_start>CS.append(row["close"])<if_stmt>len(CS)<l>24<block_start>BBI.append(row["close"])<block_end><else_stmt><block_start>bbi=np.average([np.average(CS[-3:]) np.average(CS[-6:]) np.average(CS[-12:]) np.average(CS[-24:])])<line_sep>BBI.append(bbi)<block_end><block_end><return>np.asarray(BBI)<block_end><def_stmt>mtm data n=6<block_start><import_stmt>numpy<as>np<line_sep>'''
Momentum Index 动量指标
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,默认6
return
-------
MTM:numpy.ndarray<numpy.float64>
MTM动量指标
'''<line_sep>MTM=[]<line_sep>CN=[]<for_stmt>index,row data.iterrows()<block_start><if_stmt>index<l>n-1<block_start>MTM.append(0.)<block_end><else_stmt><block_start>mtm=row["close"]-CN[index-n]<line_sep>MTM.append(mtm)<block_end>CN.append(row["close"])<block_end><return>np.asarray(MTM)<block_end><def_stmt>obv data<block_start><import_stmt>numpy<as>np<line_sep>'''
On Balance Volume 能量潮指标
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
return
-------
OBV:numpy.ndarray<numpy.float64>
OBV能量潮指标
'''<line_sep>tmp=np.true_divide(((data["close"]-data["low"])-(data["high"]-data["close"])) (data["high"]-data["low"]))<line_sep>OBV=tmp<times>data["volume"]<line_sep><return>OBV<block_end><def_stmt>sar data n=4<block_start><raise>Exception("Not implemented yet")<block_end><def_stmt>plot_all data is_show=<true> output=<none><block_start><import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>pylab rcParams<import_stmt>numpy<as>np<line_sep>rcParams['figure.figsize']=18 50<line_sep>plt.figure()<line_sep># 收盘价
plt.subplot(20 1 1)<line_sep>plt.plot(data["date"] data["close"] label="close")<line_sep>plt.xlabel('date')<line_sep>plt.ylabel('value')<line_sep>plt.legend()<line_sep>plt.xticks(rotation=90)<line_sep># 移动平均线
plt.subplot(20 1 2)<line_sep>MA=ma(data n=10)<line_sep>plt.plot(data["date"] MA label="MA(n=10)")<line_sep>plt.plot(data["date"] data["close"] label="CLOSE PRICE")<line_sep>plt.title("MA")<line_sep>plt.xlabel('date')<line_sep>plt.ylabel('value')<line_sep>plt.legend()<line_sep>plt.xticks(rotation=90)<line_sep># 移动标准差
n=10<line_sep>plt.subplot(20 1 3)<line_sep>MD=md(data n)<line_sep>plt.plot(data["date"] MD label="MD(n=10)")<line_sep>plt.title("MD")<line_sep>plt.xlabel('date')<line_sep>plt.ylabel('value')<line_sep>plt.legend()<line_sep>plt.xticks(rotation=90)<line_sep># 指数平均数指标
plt.subplot(20 1 4)<line_sep>EMA=ema(data n)<line_sep>plt.plot(data["date"] EMA label="EMA(n=12)")<line_sep>plt.title("EMA")<line_sep>plt.xlabel('date')<line_sep>plt.ylabel('value')<line_sep>plt.legend()<line_sep>plt.xticks(rotation=90)<line_sep># 指数平滑异同平均线(MACD: Moving Average Convergence Divergence)
plt.subplot(20 1 5)<line_sep>OSC,DIFF,DEM=macd(data n)<line_sep>plt.plot(data["date"] OSC label="OSC")<line_sep>plt.plot(data["date"] DIFF label="DIFF")<line_sep>plt.plot(data["date"] DEM label="DEM")<line_sep>plt.title("MACD")<line_sep>plt.xlabel('date')<line_sep>plt.ylabel('value')<line_sep>plt.legend()<line_sep>plt.xticks(rotation=90)<line_sep># 随机指标
plt.subplot(20 1 6)<line_sep>K,D,J=kdj(data)<line_sep>plt.plot(data["date"] K label="K")<line_sep>plt.plot(data["date"] D label="D")<line_sep>plt.plot(data["date"] J label="J")<line_sep>plt.title("KDJ")<line_sep>plt.xlabel('date')<line_sep>plt.ylabel('value')<line_sep>plt.legend()<line_sep>plt.xticks(rotation=90)<line_sep># 相对强弱指标
plt.subplot(20 1 7)<line_sep>RSI6=rsi(data 6)<line_sep>RSI12=rsi(data 12)<line_sep>RSI24=rsi(data 24)<line_sep>plt.plot(data["date"] RSI6 label="RSI(n=6)")<line_sep>plt.plot(data["date"] RSI12 label="RSI(n=12)")<line_sep>plt.plot(data["date"] RSI24 label="RSI(n=24)")<line_sep>plt.title("RSI")<line_sep>plt.xlabel('date')<line_sep>plt.ylabel('value')<line_sep>plt.legend()<line_sep>plt.xticks(rotation=90)<line_sep># BOLL 林线指标
plt.subplot(20 1 8)<line_sep>BOLL,UPPER,LOWER=boll(data)<line_sep>plt.plot(data["date"] BOLL label="BOLL(n=10)")<line_sep>plt.plot(data["date"] UPPER label="UPPER(n=10)")<line_sep>plt.plot(data["date"] LOWER label="LOWER(n=10)")<line_sep>plt.plot(data["date"] data["close"] label="CLOSE PRICE")<line_sep>plt.title("BOLL")<line_sep>plt.xlabel('date')<line_sep>plt.ylabel('value')<line_sep>plt.legend()<line_sep>plt.xticks(rotation=90)<line_sep># W&R 威廉指标
plt.subplot(20 1 9)<line_sep>WNR=wnr(data n=14)<line_sep>plt.plot(data["date"] WNR label="WNR(n=14)")<line_sep>plt.title("WNR")<line_sep>plt.xlabel('date')<line_sep>plt.ylabel('value')<line_sep>plt.legend()<line_sep>plt.xticks(rotation=90)<line_sep># 动向或趋向指标
plt.subplot(20 1 10)<line_sep>P_DI,M_DI,ADX,ADXR=dmi(data)<line_sep>plt.plot(data["date"] P_DI label="+DI(n=14)")<line_sep>plt.plot(data["date"] M_DI label="-DI(n=14)")<line_sep>plt.plot(data["date"] ADX label="ADX(m=14)")<line_sep>plt.plot(data["date"] ADXR label="ADXR(k=6)")<line_sep>plt.title("DMI")<line_sep>plt.xlabel('date')<line_sep>plt.ylabel('value')<line_sep>plt.legend()<line_sep>plt.xticks(rotation=90)<line_sep># 乖离值
plt.subplot(20 1 11)<line_sep>BIAS=bias(data n=5)<line_sep>plt.plot(data["date"] BIAS label="BIAS(n=5)")<line_sep>plt.title("BIAS")<line_sep>plt.xlabel('date')<line_sep>plt.ylabel('value')<line_sep>plt.legend()<line_sep>plt.xticks(rotation=90)<line_sep># 振动升降指标
plt.subplot(20 1 12)<line_sep>ASI=asi(data n=5)<line_sep>plt.plot(data["date"] ASI label="ASI(n=5)")<line_sep>plt.title("ASI")<line_sep>plt.xlabel('date')<line_sep>plt.ylabel('value')<line_sep>plt.legend()<line_sep>plt.xticks(rotation=90)<line_sep># 振动升降指标
plt.subplot(20 1 13)<line_sep>VR=vr(data n=26)<line_sep>plt.plot(data["date"] VR label="VR(n=26)")<line_sep>plt.title("VR")<line_sep>plt.xlabel('date')<line_sep>plt.ylabel('value')<line_sep>plt.legend()<line_sep>plt.xticks(rotation=90)<line_sep># 振动升降指标
plt.subplot(20 1 14)<line_sep>AR,BR=arbr(data n=26)<line_sep>plt.plot(data["date"] AR label="AR(n=26)")<line_sep>plt.plot(data["date"] BR label="BR(n=26)")<line_sep>plt.title("ARBR")<line_sep>plt.xlabel('date')<line_sep>plt.ylabel('value')<line_sep>plt.legend()<line_sep>plt.xticks(rotation=90)<line_sep># 区间震荡线
plt.subplot(20 1 15)<line_sep>DPO,MADPO=dpo(data n=20 m=6)<line_sep>plt.plot(data["date"] DPO label="DPO(n=20)")<line_sep>plt.plot(data["date"] MADPO label="MADPO(m=6)")<line_sep>plt.title("DPO")<line_sep>plt.xlabel('date')<line_sep>plt.ylabel('value')<line_sep>plt.legend()<line_sep>plt.xticks(rotation=90)<line_sep># 三重指数平滑平均线
plt.subplot(20 1 16)<line_sep>TRIX,TRMA=trix(data n=12 m=20)<line_sep>plt.plot(data["date"] TRIX label="DPO(n=12)")<line_sep>plt.plot(data["date"] TRMA label="MADPO(m=20)")<line_sep>plt.title("TRIX")<line_sep>plt.xlabel('date')<line_sep>plt.ylabel('value')<line_sep>plt.legend()<line_sep>plt.xticks(rotation=90)<line_sep># 多空指标
plt.subplot(20 1 17)<line_sep>BBI=bbi(data)<line_sep>plt.plot(data["date"] BBI label="BBI(3,6,12,24)")<line_sep>plt.title("BBI")<line_sep>plt.xlabel('date')<line_sep>plt.ylabel('value')<line_sep>plt.legend()<line_sep>plt.xticks(rotation=90)<line_sep># 动量指标
plt.subplot(20 1 18)<line_sep>MTM=mtm(data n=6)<line_sep>plt.plot(data["date"] MTM label="MTM(n=6)")<line_sep>plt.title("MTM")<line_sep>plt.xlabel('date')<line_sep>plt.ylabel('value')<line_sep>plt.legend()<line_sep>plt.xticks(rotation=90)<line_sep># 动量指标
plt.subplot(20 1 19)<line_sep>OBV=obv(data)<line_sep>plt.plot(data["date"] OBV label="OBV")<line_sep>plt.title("OBV")<line_sep>plt.xlabel('date')<line_sep>plt.ylabel('value')<line_sep>plt.legend()<line_sep>plt.xticks(rotation=90)<line_sep>plt.tight_layout()<if_stmt>is_show<block_start>plt.show()<block_end><if_stmt>output<is><not><none><block_start>plt.savefig(output)<block_end><block_end> |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pyatv/protocols/mrp/protobuf/NowPlayingClient.proto
"""Generated protocol buffer code."""<import_from_stmt>google.protobuf descriptor<as>_descriptor<import_from_stmt>google.protobuf message<as>_message<import_from_stmt>google.protobuf reflection<as>_reflection<import_from_stmt>google.protobuf symbol_database<as>_symbol_database<line_sep># @@protoc_insertion_point(imports)
_sym_db=_symbol_database.Default()<line_sep>DESCRIPTOR=_descriptor.FileDescriptor(name='pyatv/protocols/mrp/protobuf/NowPlayingClient.proto' package='' syntax='proto2' serialized_options=<none> create_key=_descriptor._internal_create_key serialized_pb=b'\n3pyatv/protocols/mrp/protobuf/NowPlayingClient.proto\"\xe8\x01\n\x10NowPlayingClient\x12\x19\n\x11processIdentifier\x18\x01 \x01(\x05\x12\x18\n\x10\x62undleIdentifier\x18\x02 \x01(\t\x12)\n!parentApplicationBundleIdentifier\x18\x03 \x01(\t\x12\x1d\n\x15processUserIdentifier\x18\x04 \x01(\x05\x12\x1c\n\x14nowPlayingVisibility\x18\x05 \x01(\x05\x12\x13\n\x0b\x64isplayName\x18\x07 \x01(\t\x12\"\n\x1a\x62undleIdentifierHierarchys\x18\x08 \x03(\t')<line_sep>_NOWPLAYINGCLIENT=_descriptor.Descriptor(name='NowPlayingClient' full_name='NowPlayingClient' filename=<none> file=DESCRIPTOR containing_type=<none> create_key=_descriptor._internal_create_key fields=[_descriptor.FieldDescriptor(name='processIdentifier' full_name='NowPlayingClient.processIdentifier' index=0 number=1 type=5 cpp_type=1 label=1 has_default_value=<false> default_value=0 message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> serialized_options=<none> file=DESCRIPTOR create_key=_descriptor._internal_create_key) _descriptor.FieldDescriptor(name='bundleIdentifier' full_name='NowPlayingClient.bundleIdentifier' index=1 number=2 type=9 cpp_type=9 label=1 has_default_value=<false> default_value=b"".decode('utf-8') message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> serialized_options=<none> file=DESCRIPTOR create_key=_descriptor._internal_create_key) _descriptor.FieldDescriptor(name='parentApplicationBundleIdentifier' full_name='NowPlayingClient.parentApplicationBundleIdentifier' index=2 number=3 type=9 cpp_type=9 label=1 has_default_value=<false> default_value=b"".decode('utf-8') message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> serialized_options=<none> file=DESCRIPTOR create_key=_descriptor._internal_create_key) _descriptor.FieldDescriptor(name='processUserIdentifier' full_name='NowPlayingClient.processUserIdentifier' index=3 number=4 type=5 cpp_type=1 label=1 has_default_value=<false> default_value=0 message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> serialized_options=<none> file=DESCRIPTOR create_key=_descriptor._internal_create_key) _descriptor.FieldDescriptor(name='nowPlayingVisibility' full_name='NowPlayingClient.nowPlayingVisibility' index=4 number=5 type=5 cpp_type=1 label=1 has_default_value=<false> default_value=0 message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> serialized_options=<none> file=DESCRIPTOR create_key=_descriptor._internal_create_key) _descriptor.FieldDescriptor(name='displayName' full_name='NowPlayingClient.displayName' index=5 number=7 type=9 cpp_type=9 label=1 has_default_value=<false> default_value=b"".decode('utf-8') message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> serialized_options=<none> file=DESCRIPTOR create_key=_descriptor._internal_create_key) _descriptor.FieldDescriptor(name='bundleIdentifierHierarchys' full_name='NowPlayingClient.bundleIdentifierHierarchys' index=6 number=8 type=9 cpp_type=9 label=3 has_default_value=<false> default_value=[] message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> serialized_options=<none> file=DESCRIPTOR create_key=_descriptor._internal_create_key) ] extensions=[] nested_types=[] enum_types=[] serialized_options=<none> is_extendable=<false> syntax='proto2' extension_ranges=[] oneofs=[] serialized_start=56 serialized_end=288 )<line_sep>DESCRIPTOR.message_types_by_name['NowPlayingClient']=_NOWPLAYINGCLIENT<line_sep>_sym_db.RegisterFileDescriptor(DESCRIPTOR)<line_sep>NowPlayingClient=_reflection.GeneratedProtocolMessageType('NowPlayingClient' (_message.Message ) {'DESCRIPTOR':_NOWPLAYINGCLIENT '__module__':'pyatv.protocols.mrp.protobuf.NowPlayingClient_pb2'# @@protoc_insertion_point(class_scope:NowPlayingClient)
})<line_sep>_sym_db.RegisterMessage(NowPlayingClient)<line_sep># @@protoc_insertion_point(module_scope)
|
<import_from_stmt>.Candle Candle<import_from_stmt>.CompletedTrade CompletedTrade<import_from_stmt>.Exchange Exchange<import_from_stmt>.FuturesExchange FuturesExchange<import_from_stmt>.Order Order<import_from_stmt>.Position Position<import_from_stmt>.Route Route<import_from_stmt>.SpotExchange SpotExchange<import_from_stmt>.Ticker Ticker<import_from_stmt>.utils store_candle_into_db store_ticker_into_db store_trade_into_db store_orderbook_into_db<line_sep> |
<import_stmt>os<import_stmt>sys<import_from_stmt>unittest2.loader defaultTestLoader<def_stmt>collector # import __main__ triggers code re-execution
<block_start>__main__=sys.modules['__main__']<line_sep>setupDir=os.path.abspath(os.path.dirname(__main__.__file__))<line_sep><return>defaultTestLoader.discover(setupDir)<block_end> |
<import_stmt>os<import_stmt>hashlib<import_stmt>time<import_from_stmt>sys exit<line_sep>dirName=os.path.dirname(os.path.realpath(__file__))<line_sep>yesAnswers=['yes' 'Yes' 'YES' 'Y' 'y' '']<line_sep>noAnswers=['No' 'no' 'n' 'N' 'NO']<line_sep>ENILfilePath=''<def_stmt>getListOfFiles dirName# create a list of file and sub directories
# names in the given directory
<block_start>listOfFile=os.listdir(dirName)<line_sep>allFiles=list()<line_sep># Iterate over all the entries
<for_stmt>entry listOfFile# Create full path
<block_start>fullPath=os.path.join(dirName entry)<line_sep># If entry is a directory then get the list of files in this directory
<if_stmt>os.path.isdir(fullPath)<block_start>allFiles=allFiles+getListOfFiles(fullPath)<block_end><else_stmt><block_start>allFiles.append(fullPath)<block_end><block_end>#remove hashchecker.enil from allFiles as the hash for that will never match cached hash
<for_stmt>entry allFiles<block_start><if_stmt>entry[-5:]<eq>".enil"<block_start>proxy=allFiles.index(entry)<line_sep>ENILfilePath=allFiles.pop(proxy)<block_end><block_end><return>allFiles<block_end><def_stmt>encrypterString unencryptedString#takes any string and converts it into an encrypted string based on cipherSource
<block_start>CSVstring=''<line_sep>cipherSource="abCDlm:nfcde)istuxyzv-UVWjkBghGYoEFpq+rw*1(2H89\\0.~53K LIMQ_T467RSNOP=/AZ;"<line_sep>length=len(unencryptedString)<line_sep>proxy=""<for_stmt>char range(0 length)<block_start>indexNum=cipherSource.index(str(unencryptedString[char]))+1<line_sep>proxy=proxy+cipherSource[indexNum]<block_end>CSVstring=proxy+","<line_sep>correctedCSVstring=CSVstring[0:-1]<line_sep><return>correctedCSVstring<block_end>allFiles=getListOfFiles(dirName)<def_stmt>encrypterList unencryptedList#Takes a list of strings and returns a comma separated string of encrypted strings from the list
<block_start>outputCSVstring=''<for_stmt>file_name unencryptedList<block_start>proxy=encrypterString(file_name)<line_sep>outputCSVstring=outputCSVstring+','+proxy<block_end>correctedOutputCSVstring=outputCSVstring[1:]<line_sep><return>(correctedOutputCSVstring)<block_end><def_stmt>decrypterString CSVstring#same as encrypter string but decrypts
<block_start>outputString=''<line_sep>cipherSource="abCDlm:nfcde)istuxyzv-UVWjkBghGYoEFpq+rw*1(2H89\\0.~53K LIMQ_T467RSNOP=/AZ;"<line_sep>length=len(CSVstring)<line_sep>proxy=""<for_stmt>char range(0 length)<block_start><if_stmt>CSVstring[char]<eq>","<block_start>proxy=proxy+","<block_end><else_stmt><block_start>indexNum=cipherSource.index(str(CSVstring[char]))-1<line_sep>proxy=proxy+cipherSource[indexNum]<block_end><block_end>outputString=proxy+","<line_sep><return>outputString<block_end><def_stmt>decrypterList encryptedList#same as encrypterList but decrypts
<block_start>outputString=''<for_stmt>encrypted_item encryptedList<block_start>proxy=decrypterString(encrypted_item)<line_sep>outputString=outputString+','+proxy<block_end>correctedOutputCSVstring=outputString[1:]<line_sep><return>(correctedOutputCSVstring)<block_end><def_stmt>storeCreator hashesDict masterHash#creating the text for the enil file
<block_start>tempFiles=list(hashesDict.keys())<line_sep>tempHashes=list(hashesDict.values())<line_sep>#preparing the CSV string of files in a ciphered way
files=""<line_sep>files=encrypterList(tempFiles)<line_sep>files=files+"\n"<line_sep>#preparing CSV string of hashes in a ciphered way
hashes=""<line_sep>hashes=encrypterList(tempHashes)<line_sep>hashes=hashes+"\n"<line_sep>#preparing masterHash in a ciphered way
masterHash=encrypterString(masterHash)<line_sep><return>(files hashes masterHash)<block_end><def_stmt>dictcreator allFiles#creates a dictionary of filePath:hash of file and a hash of the string made of the sum of all hashes(called the masterHash)
<block_start>hashesDict={}<line_sep>masterHash=""<line_sep>sha256_hash=hashlib.sha256()<for_stmt>filename allFiles<block_start><with_stmt>open(filename "rb")<as>f# Read and update hash string value in blocks of 4K
<block_start><for_stmt>byte_block iter(<lambda>:f.read(4096) b"")<block_start>sha256_hash.update(byte_block)<line_sep>g=sha256_hash.hexdigest()<block_end>hashesDict[filename]=g<line_sep>masterHash=masterHash+g<line_sep>hash_object=hashlib.sha256(masterHash.encode())<line_sep>masterHash=hash_object.hexdigest()<block_end><block_end><return>(hashesDict masterHash)<line_sep>#returns (files,hashes,masterHash) all of which are strings
<block_end><def_stmt>noENILfile #should run when there is no ENIL file.
#creates an ENIL file with the foll encyrpted entries:1)all file paths,2)all hashes of files,3)masterhash
<block_start>allFiles=getListOfFiles(dirName)<line_sep>hashesDict,masterHash=dictcreator(allFiles)<line_sep>files,hashes,masterHash=storeCreator(hashesDict masterHash)<with_stmt>open("hashstore.enil" "w")<as>f<block_start>f.write(files)<line_sep>f.write(hashes)<line_sep>f.write(masterHash)<line_sep>f.close()<block_end>print("Hash checker 'ENIL' file did not exist previously so one has been created")<line_sep>time.sleep(2)<line_sep>exit()<block_end><def_stmt>ENILfileFound #should run when an ENIL file is found
#reads the enil file and decrypts and returns files,hashes and masterHash
<block_start><with_stmt>open('hashstore.enil' 'r')<as>f<block_start>sums=f.readlines()<line_sep>sums=[x.strip()<for>x sums]<line_sep>f.close()<line_sep>files=sums[0]<line_sep>hashes=sums[1]<line_sep>masterHash=sums[2]<line_sep>files=str(decrypterString(files))[0:-1]<line_sep>files=files.split(',')<line_sep>hashes=str(decrypterString(hashes))[0:-1]<line_sep>hashes=hashes.split(',')<line_sep>masterHashFromENIL=decrypterString(masterHash)<line_sep>hashesDictFromENIL={}<if_stmt>len(files)<eq>len(hashes)<block_start><for_stmt>n range(len(files))<block_start>hashesDictFromENIL[files[n]]=hashes[n]<block_end><block_end><return>(hashesDictFromENIL masterHashFromENIL)<block_end><block_end><def_stmt>ENILfileUpdate #should run only after checking with the user
#over writes previous ENIL file with updates values
<block_start>aF=getListOfFiles(dirName)<line_sep>has,mas=dictcreator(aF)<line_sep>fil,hashes,maste=storeCreator(has mas)<with_stmt>open("hashstore.enil" "w")<as>f<block_start>f.write(fil)<line_sep>f.write(hashes)<line_sep>f.write(maste)<line_sep>f.close()<block_end>print("Hash checker 'ENIL' file has been updated")<block_end><def_stmt>checkForDeletions CurrentFiles CacheFiles#chceks the 2 files list to see if any file from old list was deleted ie not there in current list
<block_start>deletedFiles=[]<for_stmt>file CacheFiles<block_start><if_stmt>file<in>CurrentFiles<block_start>nono=0<block_end><else_stmt><block_start>deletedFiles.append(file)<block_end><block_end><return>(deletedFiles)<block_end><def_stmt>checkForAdditions CurrentFiles CacheFiles#checks to see if any file was added ie file was not there in old list but is there in new list
<block_start>addedFiles=[]<for_stmt>file CurrentFiles<block_start><if_stmt>file<in>CacheFiles<block_start><continue><block_end><else_stmt><block_start>addedFiles.append(file)<block_end><block_end><return>(addedFiles)<block_end><def_stmt>deleteFiles addedFiles#allows user to manually delete files he/she hasnt added or modified directly through command line
<block_start>filePath=input('Copy the path of the file you want to delete from the list above and paste it here(one at a time):')<if_stmt>filePath<in>addedFiles<block_start>os.remove(filePath)<block_end><else_stmt><block_start>print(filePath+' isnt a file path that was recently added. ' end="")<line_sep>time.sleep(4)<line_sep>retryResponse=input('Would you like to try again?(y/n)')<if_stmt>retryResponse<in>yesAnswers<block_start>deleteFiles(addedFiles)<block_end><block_end>anotherOne=input('Would you like to add another file?(y/n)')<if_stmt>anotherOne<in>yesAnswers<block_start>deleteFiles(addedFiles)<line_sep><return><block_end><block_end><def_stmt>UserCheckAdded addedFiles#allows user to go through each addedFile to see if theres a file that they didnt add
<block_start>confirmation=input('Were some of the added files not added by you? Would you like to remove one or more of the added files?(y/n)\n')<if_stmt>confirmation<in>yesAnswers<block_start>print('The following is a list of all files that have been added since last run time:')<line_sep>time.sleep(2)<for_stmt>file addedFiles<block_start>print(file)<line_sep>time.sleep(1.5)<block_end>print("If any of these files was not added by you and you suspect the file of being malicious you should delete the file immediately.")<line_sep>time.sleep(2.5)<line_sep>maliciousFileChecker=input('Would you like to delete files not added by you?(y/n)')<if_stmt>maliciousFileChecker<in>yesAnswers<block_start>deleteFiles(addedFiles)<block_end><else_stmt><block_start><return>(0)<block_end><block_end><block_end><def_stmt>antiModChecker hashesDictFromENIL masterHashFromENIL<block_start>allFiles=getListOfFiles(dirName)<line_sep>hashesDict,masterHash=dictcreator(allFiles)<line_sep>masterHashFromENIL=masterHashFromENIL[:-1]<line_sep>#check that masterHash is same
<if_stmt>masterHash<eq>masterHashFromENIL<block_start>print('Files have not been modified.')<line_sep>time.sleep(1)<line_sep>print('Integrity of all files is maintained. Virus checker will now close')<line_sep>time.sleep(4)<line_sep>#exits program
exit()<block_end><else_stmt><block_start>CurrentFiles=list(hashesDict.keys())<line_sep>CacheFiles=list(hashesDictFromENIL.keys())<line_sep>#check for file additions and deletions
addedFiles=checkForAdditions(CurrentFiles CacheFiles)<line_sep>deletedFiles=checkForDeletions(CurrentFiles CacheFiles)<if_stmt>len(addedFiles)<eq>0<and>len(deletedFiles)<eq>0<block_start>print("No files have been added or deleted")<line_sep>time.sleep(3)<block_end><else_stmt><block_start><if_stmt>len(deletedFiles)<ne>0<block_start>print("The following files have been deleted:")<line_sep>time.sleep(2)<for_stmt>file deletedFiles<block_start>print(file)<line_sep>time.sleep(0.5)<block_end>garbage=input('press enter to continue')<block_end><if_stmt>len(addedFiles)<ne>0<block_start>print("\nThe following files have been added:")<line_sep>time.sleep(2)<for_stmt>file addedFiles<block_start>print(file)<line_sep>time.sleep(0.5)<block_end>garbage=input('press enter to continue')<block_end>#Make sure that the added files were added by the user:
UserCheckAdded(addedFiles)<block_end>#check the hashes
#only need to check hash of files that are currently in folders
verified=[]<line_sep>modifie=[]<line_sep>print('\nVerifying file integrity of old files' end='')<line_sep>time.sleep(1.5)<line_sep>print('.' end='')<line_sep>time.sleep(1.5)<line_sep>print('.' end='')<line_sep>time.sleep(1.5)<line_sep>print('.')<line_sep>time.sleep(2)<for_stmt>file CurrentFiles<block_start><if_stmt>file<in>addedFiles<block_start>print(file)<block_end><else_stmt><block_start><if_stmt>hashesDict[file]<eq>hashesDictFromENIL[file]<block_start>verified.append(file)<line_sep><continue><block_end><else_stmt><block_start>modifie.append(file)<block_end><block_end><block_end>print('\nFollowing files have not been modified and their integrity is guranteed:')<line_sep>time.sleep(1)<for_stmt>file verified<block_start>print(file)<block_end>garbage=input('press enter to continue')<line_sep>print('\nFollowing files have been modified so they may have been infected with a virus:')<line_sep>time.sleep(1)<for_stmt>file modifie<block_start>print(file)<block_end>garbage=input('press enter to continue')<line_sep>print('Note: Only update the cache if you are confident that integrity of all files are intact.')<line_sep>time.sleep(4)<line_sep>updateConfirmation=input('Would you like to update the cache of file integrity keys?(y/n)')<if_stmt>updateConfirmation<in>yesAnswers<block_start>print('Virus check will close soon after updating' end='')<line_sep>time.sleep(0.5)<line_sep>print('.' end='')<line_sep>time.sleep(0.5)<line_sep>print('.' end='')<line_sep>time.sleep(0.5)<line_sep>print('.' end='')<line_sep>time.sleep(2.5)<line_sep>ENILfileUpdate()<line_sep>time.sleep(4)<line_sep>exit()<block_end><else_stmt><block_start>print('not updating the cache file means the integrity of recently added or modified files cannot not be verified the next time you run this file.')<line_sep>time.sleep(2)<line_sep>reconfirmation=input('Are you sure you dont want to update files?')<if_stmt>reconfirmation<in>yesAnswers<block_start>print('Virus checker will now close')<line_sep>time.sleep(2)<line_sep>exit()<block_end><block_end><block_end><block_end>###Logics Start here
ENILtester=os.listdir(dirName)<line_sep>ENILpresent=<false><for_stmt>entry ENILtester<block_start><if_stmt>entry[-5:]<eq>".enil"<block_start>ENILpresent=<true><block_end><block_end><if_stmt>ENILpresent<block_start>hashesDictFromENIL,masterHashFromENIL=ENILfileFound()<line_sep>antiModChecker(hashesDictFromENIL masterHashFromENIL)<block_end><else_stmt><block_start>noENILfile()<block_end> |
<import_stmt>re<import_stmt>ast<import_stmt>types<import_stmt>random<import_stmt>inspect<import_from_stmt>datetime datetime timedelta<import_from_stmt>aioredis.errors RedisError<import_from_stmt>html.parser HTMLParser<import_from_stmt>sqlalchemy Column SmallInteger String Integer Boolean DateTime<import_from_stmt>sqlalchemy.sql func<import_from_stmt>.base Base BaseModel ModelMeta<import_from_stmt>.mc cache clear_mc<import_from_stmt>.user User<import_from_stmt>.utils trunc_utf8<import_from_stmt>.comment CommentMixin<import_from_stmt>.react ReactMixin<import_from_stmt>.markdown markdown toc toc_md MLStripper<import_from_stmt>. schemas<import_stmt>config<line_sep>MC_KEY_TAGS_BY_POST_ID='post:%s:tags'<line_sep>MC_KEY_RELATED='post:related_posts:%s:limit:%s'<line_sep>MC_KEY_POST_BY_SLUG='post:%s:slug'<line_sep>MC_KEY_ALL_POSTS='core:posts:%s:v2'<line_sep>MC_KEY_FEED='core:feed'<line_sep>MC_KEY_SITEMAP='core:sitemap'<line_sep>MC_KEY_SEARCH='core:search.json'<line_sep>MC_KEY_ARCHIVES='core:archives'<line_sep>MC_KEY_ARCHIVE='core:archive:%s'<line_sep>MC_KEY_TAGS='core:tags'<line_sep>MC_KEY_TAG='core:tag:%s'<line_sep>MC_KEY_SPECIAL_ITEMS='special:%s:items'<line_sep>MC_KEY_SPECIAL_POST_ITEMS='special:%s:post_items'<line_sep>MC_KEY_SPECIAL_BY_PID='special:by_pid:%s'<line_sep>MC_KEY_SPECIAL_BY_SLUG='special:%s:slug'<line_sep>MC_KEY_ALL_SPECIAL_TOPICS='special:topics'<line_sep>RK_PAGEVIEW='frodo:pageview:{}:v2'<line_sep>RK_ALL_POST_IDS='frodo:all_post_ids'<line_sep>RK_VISITED_POST_IDS='frodo:visited_post_ids'<line_sep>BQ_REGEX=re.compile(r'<blockquote>.*?</blockquote>')<line_sep>PAGEVIEW_FIELD='pv'<class_stmt>Post(BaseModel CommentMixin ReactMixin)<block_start>STATUSES=(STATUS_UNPUBLISHED STATUS_ONLINE)=range(2)<line_sep>status=Column(SmallInteger() default=STATUS_UNPUBLISHED)<line_sep>(TYPE_ARTICLE TYPE_PAGE)=range(2)<line_sep>created_at=Column(DateTime server_default=func.now() nullable=<false>)<line_sep>title=Column(String(100) unique=<true>)<line_sep>author_id=Column(Integer())<line_sep>slug=Column(String(100))<line_sep>summary=Column(String(255))<line_sep>can_comment=Column(Boolean() default=<true>)<line_sep>type=Column(Integer() default=TYPE_ARTICLE)<line_sep>pageview=Column(Integer() default=0)<line_sep>kind=config.K_POST<line_sep>@cache(MC_KEY_RELATED%('{self.id}' '{limit}'))<async_keyword><def_stmt>get_related self limit:int=4<block_start>tag_ids=[tag.id<for>tag <await>self.tags]<if_stmt><not>tag_ids<block_start><return>[]<block_end>post_ids=set([item['post_id']<for>item <await>PostTag.async_in('tag_id' tag_ids)])<line_sep>post_ids<augsub>set([self.id])<if_stmt><not>post_ids<block_start><return>[]<block_end>related_posts=[Post(**p)<for>p <await>Post.async_in('id' post_ids)]<line_sep><return>related_posts[:limit]<if>len(related_posts)<ge>limit<else>related_posts<block_end>@classmethod<async_keyword><def_stmt>acreate cls **kwargs<block_start>tags=kwargs.pop('tags' [])<line_sep>content=kwargs.pop('content')<line_sep>obj_id=<await>super().acreate(**kwargs)<line_sep>kwargs['id']=obj_id<if_stmt>tags<block_start><try_stmt><block_start><await>PostTag.update_multi(obj_id tags)<block_end><except_stmt><block_start><await>Post.adelete(id=obj_id)<line_sep><return><block_end><block_end>obj=cls(**(<await>cls.async_first(id=obj_id)))<line_sep><await>obj.set_content(content)<line_sep><return>obj<block_end><async_keyword><def_stmt>update_tags self tagnames<block_start><if_stmt>tagnames<block_start><await>PostTag.update_multi(self.id tagnames)<block_end><return><true><block_end>@property@cache(MC_KEY_TAGS_BY_POST_ID%('{self.id}'))<async_keyword><def_stmt>tags self<block_start>pts=<await>PostTag.async_filter(post_id=self.id)<if_stmt><not>pts<block_start><return>[]<block_end>ids=[item['tag_id']<for>item pts]<line_sep>tags=<await>Tag.async_in('id' ids)<line_sep>tags=[Tag(**t)<for>t tags]<line_sep><return>tags<block_end>@property<async_keyword><def_stmt>author self<block_start>print('user_id' self.author_id)<line_sep>rv=<await>User.cache(id=self.author_id)<line_sep><return>{'name':rv['name'] 'id':self.author_id 'avatar':rv['avatar']}<block_end>@property<def_stmt>is_page self<block_start><return>self.type<eq>self.TYPE_PAGE<block_end>@property<def_stmt>preview_url self<block_start><return>f'/{self.__class__.__name__.lower()}/{self.id}/preview'<block_end><async_keyword><def_stmt>set_content self content<block_start><return><await>self.set_props_by_key('content' content)<block_end><async_keyword><def_stmt>asave self *args **kwargs<block_start>content=kwargs.pop('content' <none>)<if_stmt>content<is><not><none><block_start><await>self.set_content('content' content)<block_end><return><await>super().asave(*args **kwargs)<block_end>@property<async_keyword><def_stmt>content self<block_start>rv=<await>self.get_props_by_key('content')<if_stmt>rv<block_start><return>rv.decode('utf-8')<block_end><block_end>@classmethod@cache(MC_KEY_POST_BY_SLUG%'{slug}')<async_keyword><def_stmt>get_by_slug cls slug<block_start><return><await>cls.async_first(slug=slug)<block_end>@classmethod@cache(MC_KEY_ALL_POSTS%'{with_page}')<async_keyword><def_stmt>get_all cls with_page=<true><block_start><if_stmt>with_page<block_start>posts=<await>Post.async_filter(status=Post.STATUS_ONLINE)<block_end><else_stmt><block_start>posts=<await>Post.async_filter(status=Post.STATUS_ONLINE type=Post.TYPE_ARTICLE)<block_end><return>sorted(posts key=<lambda>p:p['created_at'] reverse=<true>)<block_end>@property<def_stmt>url self<block_start><if_stmt>self.is_page<block_start><return>f'/page/{self.slug}'<block_end><return>f'/post/{getattr(self config.PERMALINK_TYPE)<or>self.id}/'<block_end>@property<async_keyword><def_stmt>html_content self<block_start>content=<await>self.content<if_stmt><not>content<block_start><return>''<block_end><return>markdown(content)<block_end>@property<async_keyword><def_stmt>excerpt self<block_start><if_stmt>self.summary<block_start><return>self.summary<block_end>s=MLStripper()<line_sep>s.feed(<await>self.html_content)<line_sep><return>trunc_utf8(BQ_REGEX.sub('' s.get_data()).replace('\n' '') 100)<block_end>@property<async_keyword><def_stmt>toc self<block_start>content=<await>self.content<if_stmt><not>content<block_start><return>''<block_end>toc.reset_toc()<line_sep>toc_md.parse(content)<line_sep><return>toc.render_toc(level=4)<block_end>@classmethod<async_keyword><def_stmt>cache cls ident<block_start><if_stmt>str(ident).isdigit()<block_start><return><await>super().cache(id=ident)<block_end><return><await>cls.get_by_slug(ident)<block_end><async_keyword><def_stmt>clear_mc self<block_start>print('Clear POst MC' self.created_at)<try_stmt><block_start>keys=[MC_KEY_FEED MC_KEY_SITEMAP MC_KEY_SEARCH MC_KEY_ARCHIVES MC_KEY_TAGS MC_KEY_RELATED%(self.id 4) MC_KEY_POST_BY_SLUG%self.slug MC_KEY_ARCHIVE%self.created_at.year]<block_end><except_stmt><block_start><import_stmt>traceback<line_sep>traceback.print_exc()<block_end><for_stmt>i [<true> <false>]<block_start>keys.append(MC_KEY_ALL_POSTS%i)<block_end><for_stmt>tag <await>self.tags<block_start>keys.append(MC_KEY_TAG%tag.id)<block_end><await>clear_mc(*keys)<block_end><async_keyword><def_stmt>incr_pageview self increment=1<block_start>redis=<await>self.redis<try_stmt><block_start><await>redis.sadd(RK_ALL_POST_IDS self.id)<line_sep><await>redis.sadd(RK_VISITED_POST_IDS self.id)<line_sep><return><await>redis.hincrby(RK_PAGEVIEW.format(self.id) PAGEVIEW_FIELD increment)<block_end><except_stmt><block_start><return>self.pageview<block_end><block_end>@property<async_keyword><def_stmt>pageview_ self<block_start><try_stmt><block_start><return>int(<await>(<await>self.redis).hget(RK_PAGEVIEW.format(self.id) PAGEVIEW_FIELD)<or>0)<block_end><except_stmt>RedisError<block_start><return>self.pageview<block_end><block_end><block_end><class_stmt>Tag(BaseModel)<block_start>name=Column(String(100) unique=<true>)<line_sep>@classmethod<def_stmt>create cls **kwargs<block_start>name=kwargs.pop('name')<line_sep>kwargs['name']=name.lower()<line_sep><return>super().acreate(**kwargs)<block_end>@classmethod<async_keyword><def_stmt>get_by_name cls name<block_start><return><await>cls.async_filter(name=name)<block_end><block_end><class_stmt>PostTag(BaseModel)<block_start>post_id=Column(Integer())<line_sep>tag_id=Column(Integer())<line_sep>updated_at=Column(DateTime server_default=func.now() nullable=<false>)<line_sep>@classmethod<async_keyword><def_stmt>update_multi cls post_id tags:list<block_start>origin_tags_id=[t['tag_id']<for>t (<await>PostTag.async_filter(post_id=post_id))]<line_sep>origin_tags_name=set([t['name']<for>t <await>Tag.async_in('id' origin_tags_id)])<line_sep>need_add=set(tags)-origin_tags_name<line_sep>need_del=origin_tags_name-set(tags)<line_sep>need_add_tags_id=[]<line_sep>need_del_tags_id=set()<for_stmt>tag_name need_add<block_start>rv=<await>Tag.get_or_create(name=tag_name)<if_stmt>isinstance(rv int)<block_start>need_add_tags_id.append(rv)<block_end><else_stmt><block_start>need_add_tags_id.append(rv['id'])<block_end><block_end><for_stmt>tag_name need_del<block_start>rv=<await>Tag.get_or_create(name=tag_name)<if_stmt>isinstance(rv int)<block_start>need_del_tags_id.append(rv)<block_end><else_stmt><block_start>need_del_tags_id.add(rv['id'])<block_end><block_end><if_stmt>need_del_tags_id<block_start><for_stmt>id list(need_del_tags_id)<block_start><await>cls.adelete(post_id=post_id tag_id=id)<block_end><block_end><for_stmt>tag_id need_add_tags_id<block_start><await>cls.get_or_create(post_id=post_id tag_id=tag_id)<block_end><await>clear_mc(MC_KEY_TAGS_BY_POST_ID%post_id)<block_end><block_end> |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
<import_from_stmt>typing Pattern<import_from_stmt>recognizers_text RegExpUtility<import_from_stmt>...resources.chinese_date_time ChineseDateTime<import_from_stmt>..base_set SetExtractorConfiguration<import_from_stmt>.duration_extractor ChineseDurationExtractor<import_from_stmt>.time_extractor ChineseTimeExtractor<import_from_stmt>.date_extractor ChineseDateExtractor<import_from_stmt>.datetime_extractor ChineseDateTimeExtractor<class_stmt>ChineseSetExtractorConfiguration(SetExtractorConfiguration)<block_start>@property<def_stmt>last_regex self<arrow>Pattern<block_start><return>self._last_regex<block_end>@property<def_stmt>each_prefix_regex self<arrow>Pattern<block_start><return>self._each_prefix_regex<block_end>@property<def_stmt>periodic_regex self<arrow>any<block_start><return><none><block_end>@property<def_stmt>each_unit_regex self<arrow>Pattern<block_start><return>self._each_unit_regex<block_end>@property<def_stmt>each_day_regex self<arrow>Pattern<block_start><return>self._each_day_regex<block_end>@property<def_stmt>before_each_day_regex self<arrow>Pattern<block_start><return>self._before_each_day_regex<block_end>@property<def_stmt>set_week_day_regex self<arrow>any<block_start><return><none><block_end>@property<def_stmt>set_each_regex self<arrow>any<block_start><return><none><block_end>@property<def_stmt>duration_extractor self<arrow>ChineseDurationExtractor<block_start><return>self._duration_extractor<block_end>@property<def_stmt>time_extractor self<arrow>ChineseTimeExtractor<block_start><return>self._time_extractor<block_end>@property<def_stmt>date_extractor self<arrow>ChineseDateExtractor<block_start><return>self._date_extractor<block_end>@property<def_stmt>date_time_extractor self<arrow>ChineseDateTimeExtractor<block_start><return>self._date_time_extractor<block_end>@property<def_stmt>date_period_extractor self<arrow>any<block_start><return><none><block_end>@property<def_stmt>time_period_extractor self<arrow>any<block_start><return><none><block_end>@property<def_stmt>date_time_period_extractor self<arrow>any<block_start><return><none><block_end><def_stmt>__init__ self<block_start>self._last_regex=RegExpUtility.get_safe_reg_exp(ChineseDateTime.SetLastRegex)<line_sep>self._each_prefix_regex=RegExpUtility.get_safe_reg_exp(ChineseDateTime.SetEachPrefixRegex)<line_sep>self._each_unit_regex=RegExpUtility.get_safe_reg_exp(ChineseDateTime.SetEachUnitRegex)<line_sep>self._each_day_regex=RegExpUtility.get_safe_reg_exp(ChineseDateTime.SetEachDayRegex)<line_sep>self._before_each_day_regex=RegExpUtility.get_safe_reg_exp(ChineseDateTime.SetEachDayRegex)<line_sep>self._duration_extractor=ChineseDurationExtractor()<line_sep>self._time_extractor=ChineseTimeExtractor()<line_sep>self._date_extractor=ChineseDateExtractor()<line_sep>self._date_time_extractor=ChineseDateTimeExtractor()<block_end><block_end> |
<import_stmt>argparse<import_stmt>logging<import_stmt>os<import_stmt>shutil<import_from_stmt>typing List<import_from_stmt>rasa model<import_from_stmt>rasa.cli.default_arguments add_model_param<import_from_stmt>rasa.cli.utils get_validated_path<import_from_stmt>rasa.constants DEFAULT_ACTIONS_PATH DEFAULT_CREDENTIALS_PATH DEFAULT_ENDPOINTS_PATH DEFAULT_MODELS_PATH <import_from_stmt>rasa.model get_latest_model<line_sep>logger=logging.getLogger(__name__)<line_sep># noinspection PyProtectedMember
<def_stmt>add_subparser subparsers:argparse._SubParsersAction parents:List[argparse.ArgumentParser]<block_start>run_parser=subparsers.add_parser("run" parents=parents conflict_handler="resolve" formatter_class=argparse.ArgumentDefaultsHelpFormatter help="Start a Rasa server which loads a trained model")<line_sep>add_run_arguments(run_parser)<line_sep>run_parser.set_defaults(func=run)<line_sep>run_subparsers=run_parser.add_subparsers()<line_sep>run_core_parser=run_subparsers.add_parser("core" parents=parents conflict_handler="resolve" formatter_class=argparse.ArgumentDefaultsHelpFormatter help="Run a trained Core model")<line_sep>add_run_arguments(run_core_parser)<line_sep>run_core_parser.set_defaults(func=run)<line_sep>nlu_subparser=run_subparsers.add_parser("nlu" parents=parents conflict_handler="resolve" formatter_class=argparse.ArgumentDefaultsHelpFormatter help="Run a trained NLU model")<line_sep>_add_nlu_arguments(nlu_subparser)<line_sep>nlu_subparser.set_defaults(func=run_nlu)<line_sep>sdk_subparser=run_subparsers.add_parser("actions" parents=parents conflict_handler="resolve" formatter_class=argparse.ArgumentDefaultsHelpFormatter help="Run the action server")<line_sep>_adk_sdk_arguments(sdk_subparser)<line_sep>sdk_subparser.set_defaults(func=run_actions)<block_end><def_stmt>add_run_arguments parser:argparse.ArgumentParser<block_start><import_from_stmt>rasa.core.cli.run add_run_arguments<line_sep>add_run_arguments(parser)<line_sep>add_model_param(parser)<line_sep>parser.add_argument("--credentials" type=str default="credentials.yml" help="Authentication credentials for the connector as a yml file")<block_end><def_stmt>_add_nlu_arguments parser:argparse.ArgumentParser<block_start><import_from_stmt>rasa_nlu.cli.server add_server_arguments<line_sep>add_server_arguments(parser)<line_sep>parser.add_argument('--path' default=DEFAULT_MODELS_PATH type=str help="Working directory of the server. Models are"<concat>"loaded from this directory and trained models "<concat>"will be saved here")<line_sep>add_model_param(parser "NLU")<block_end><def_stmt>_adk_sdk_arguments parser:argparse.ArgumentParser<block_start><import_stmt>rasa_core_sdk.cli.arguments<as>sdk<line_sep>sdk.add_endpoint_arguments(parser)<line_sep>parser.add_argument('--actions' type=str default="actions" help="name of action package to be loaded")<block_end><def_stmt>run_nlu args:argparse.Namespace<block_start><import_stmt>rasa_nlu.server<import_stmt>tempfile<line_sep>args.model=get_validated_path(args.path "path" DEFAULT_MODELS_PATH)<line_sep>model_archive=get_latest_model(args.model)<line_sep>working_directory=tempfile.mkdtemp()<line_sep>unpacked_model=model.unpack_model(model_archive working_directory)<line_sep>args.path=os.path.dirname(unpacked_model)<line_sep>rasa_nlu.server.main(args)<line_sep>shutil.rmtree(unpacked_model)<block_end><def_stmt>run_actions args:argparse.Namespace<block_start><import_stmt>rasa_core_sdk.endpoint<as>sdk<import_stmt>sys<line_sep>args.actions=args.actions<or>DEFAULT_ACTIONS_PATH<line_sep># insert current path in syspath so module is found
sys.path.insert(1 os.getcwd())<line_sep>path=args.actions.replace('.' os.sep)+".py"<line_sep>_=get_validated_path(path "action" DEFAULT_ACTIONS_PATH)<line_sep>sdk.main(args)<block_end><def_stmt>run args:argparse.Namespace<block_start><import_stmt>rasa.run<line_sep>args.model=get_validated_path(args.model "model" DEFAULT_MODELS_PATH)<line_sep>args.endpoints=get_validated_path(args.endpoints "endpoints" DEFAULT_ENDPOINTS_PATH <true>)<line_sep>args.credentials=get_validated_path(args.credentials "credentials" DEFAULT_CREDENTIALS_PATH <true>)<line_sep>rasa.run(**vars(args))<block_end> |
"""
Global fixtures and functions for pytest
pytest can only share fixtures between modules if they are declared here.
"""<import_stmt>logging<import_stmt>os<import_stmt>pytest<import_from_stmt>loguru logger<import_stmt>genomepy.providers<import_from_stmt>genomepy.providers.base BaseProvider<import_from_stmt>genomepy.providers.ensembl EnsemblProvider<import_from_stmt>genomepy.providers.gencode GencodeProvider<import_from_stmt>genomepy.providers.local LocalProvider<import_from_stmt>genomepy.providers.ncbi NcbiProvider<import_from_stmt>genomepy.providers.ucsc UcscProvider<import_from_stmt>genomepy.providers.url UrlProvider<line_sep>@pytest.fixture(scope="function")<def_stmt>caplog caplog<block_start>"""Fixture is necessary to be able to check loguru log messages"""<class_stmt>PropogateHandler(logging.Handler)<block_start><def_stmt>emit self record<block_start>logging.getLogger(record.name).handle(record)<block_end><block_end>handler_id=logger.add(PropogateHandler() format="{message} {extra}")<line_sep><yield>caplog<line_sep>logger.remove(handler_id)<block_end><def_stmt>teardown gprefix skip=<none><block_start><for_stmt>ext ["fa.fai" "fa.sizes" "gaps.bed" "fa.gz.fai" "fa.gz.sizes" "annotation.gtf" "annotation.bed" ]<block_start><if_stmt>skip<and>ext<in>skip<block_start><continue><block_end>file=gprefix+ext<if_stmt>os.path.exists(file)<block_start>os.remove(file)<block_end><block_end>gdir=os.path.dirname(gprefix)<line_sep>readme=os.path.join(gdir "README.txt")<if_stmt>os.path.exists(readme)<block_start>os.remove(readme)<block_end><block_end>@pytest.fixture(scope="function")<def_stmt>small_genome <block_start><yield>genomepy.Genome("tests/data/small_genome.fa.gz")<line_sep>teardown("tests/data/small_genome.")<block_end>@pytest.fixture(scope="function")<def_stmt>gap_genome <block_start><yield>genomepy.Genome("tests/data/gap.fa")<line_sep>teardown("tests/data/gap.")<block_end>@pytest.fixture(scope="function")<def_stmt>annot <block_start>genome_file="tests/data/regexp/regexp.fa"<line_sep>gtf_file="tests/data/regexp/regexp.annotation.gtf"<line_sep>bed_file="tests/data/regexp/regexp.annotation.bed"<line_sep>genomepy.Genome(genome_file)<with_stmt>open(gtf_file "w")<as>f<block_start>f.write("# skip this line\n")<line_sep>f.write("""chrM\tvanHeeringen-lab\tNP_059343.1\t15307\t16448\t42\t+\t.\tattributes""")<block_end><with_stmt>open(bed_file "w")<as>f<block_start>f.write("""chrM\t15307\t16448\tNP_059343.1\t42\t+\t15307\t16448\t0\t1\t1141,\t0,""")<block_end><yield>genomepy.Annotation("regexp" genomes_dir="tests/data")<line_sep>teardown("tests/data/regexp/regexp.")<block_end><def_stmt>validate_annot fname ftype<block_start>"""fname = path, ftype = 'bed' or 'gtf'."""<assert_stmt>os.path.exists(fname)<line_sep>columns=12<if>ftype<eq>"bed"<else>9<line_sep>start,end=(3 4)<if>ftype<eq>"gtf"<else>(1 2)<with_stmt>open(fname "r")<as>f<block_start><for_stmt>line f<block_start><if_stmt>line.startswith("#")<block_start><continue><block_end>vals=line.split("\t")<assert_stmt>columns<eq>len(vals)<line_sep>int(vals[start]) int(vals[end])<line_sep><break><block_end><block_end><block_end>@pytest.fixture(scope="function")<def_stmt>base <block_start><return>BaseProvider()<block_end>@pytest.fixture(scope="function")<def_stmt>ensembl <block_start><return>EnsemblProvider()<block_end>@pytest.fixture(scope="function")<def_stmt>ucsc <block_start><return>UcscProvider()<block_end>@pytest.fixture(scope="function")<def_stmt>gencode <block_start><return>GencodeProvider()<block_end>@pytest.fixture(scope="function")<def_stmt>ncbi <block_start><return>NcbiProvider()<block_end>@pytest.fixture(scope="function")<def_stmt>local <block_start><return>LocalProvider()<block_end>@pytest.fixture(scope="function")<def_stmt>url <block_start><return>UrlProvider()<block_end>@pytest.fixture(scope="function")<def_stmt>provider <block_start><return>genomepy.Provider()<block_end> |
"""
Utilities for NumPy arrays and matrices that contain numbers with
uncertainties.
This package contains:
1) utilities that help with the creation and manipulation of NumPy
arrays and matrices of numbers with uncertainties;
2) generalizations of multiple NumPy functions so that they also work
with arrays that contain numbers with uncertainties.
- Arrays of numbers with uncertainties can be built as follows:
arr = unumpy.uarray([1, 2], [0.01, 0.002]) # (values, uncertainties)
NumPy arrays of numbers with uncertainties can also be built directly
through NumPy, thanks to NumPy's support of arrays of arbitrary objects:
arr = numpy.array([uncertainties.ufloat(1, 0.1),...])
- Matrices of numbers with uncertainties are best created in one of
two ways:
mat = unumpy.umatrix(([1, 2], [0.01, 0.002])) # (values, uncertainties)
Matrices can also be built by converting arrays of numbers with
uncertainties, through the unumpy.matrix class:
mat = unumpy.matrix(arr)
unumpy.matrix objects behave like numpy.matrix objects of numbers with
uncertainties, but with better support for some operations (such as
matrix inversion):
# The inverse or pseudo-inverse of a unumpy.matrix can be calculated:
print mat.I # Would not work with numpy.matrix([[ufloat(...),...]]).I
- Nominal values and uncertainties of arrays can be directly accessed:
print unumpy.nominal_values(arr) # [ 1. 2.]
print unumpy.std_devs(mat) # [ 0.01 0.002]
- This module defines uncertainty-aware mathematical functions that
generalize those from uncertainties.umath so that they work on NumPy
arrays of numbers with uncertainties instead of just scalars:
print unumpy.cos(arr) # Array with the cosine of each element
NumPy's function names are used, and not those of the math module (for
instance, unumpy.arccos is defined, like in NumPy, and is not named
acos like in the standard math module).
The definitions of the mathematical quantities calculated by these
functions are available in the documentation of uncertainties.umath.
- The unumpy.ulinalg module contains more uncertainty-aware functions
for arrays that contain numbers with uncertainties (see the
documentation for this module).
This module requires the NumPy package.
(c) 2009-2016 by <NAME> (EOL) <<EMAIL>>.
Please send feature requests, bug reports, or feedback to this address.
This software is released under a dual license. (1) The BSD license.
(2) Any other license, as long as it is obtained from the original
author."""<line_sep># Local modules:
<import_from_stmt>.core *<import_from_stmt>. ulinalg# Local sub-module
# __all__ is set so that pydoc shows all important functions:
__all__=core.__all__<line_sep># "import numpy" makes numpy.linalg available. This behavior is
# copied here, for maximum compatibility:
__all__.append('ulinalg')<line_sep> |
<import_from_stmt>typing List Optional Union<import_from_stmt>aiocqhttp Event<as>CQEvent<import_from_stmt>aiocqhttp.bus EventBus<import_from_stmt>. NoneBot<import_from_stmt>.log logger<import_from_stmt>.exceptions CQHttpError<import_from_stmt>.session BaseSession<import_from_stmt>.typing NoticeHandler_T RequestHandler_T<class_stmt>EventHandler<block_start>"""INTERNAL API"""<line_sep>__slots__=('events' 'func')<def_stmt>__init__ self events:List[str] func:Union[NoticeHandler_T RequestHandler_T]<block_start>self.events=events<line_sep>self.func=func<block_end><block_end><class_stmt>EventManager<block_start>"""INTERNAL API"""<line_sep>bus=EventBus()<line_sep>@classmethod<def_stmt>add_event_handler cls handler:EventHandler<arrow><none><block_start><for_stmt>event handler.events<block_start>cls.bus.subscribe(event handler.func)<block_end><block_end>@classmethod<def_stmt>remove_event_handler cls handler:EventHandler<arrow><none><block_start><for_stmt>event handler.events<block_start>cls.bus.unsubscribe(event handler.func)<block_end><block_end>@classmethod<def_stmt>switch_event_handler_global cls handler:EventHandler state:Optional[bool]=<none><arrow><none><block_start><for_stmt>event handler.events<block_start><if_stmt>handler.func<in>cls.bus._subscribers[event]<and><not>state<block_start>cls.bus.unsubscribe(event handler.func)<block_end><elif_stmt>handler.func<not><in>cls.bus._subscribers[event]<and>state<is><not><false><block_start>cls.bus.subscribe(event handler.func)<block_end><block_end><block_end><block_end><class_stmt>NoticeSession(BaseSession)<block_start>__slots__=()<def_stmt>__init__ self bot:NoneBot event:CQEvent<block_start>super().__init__(bot event)<block_end><block_end><class_stmt>RequestSession(BaseSession)<block_start>__slots__=()<def_stmt>__init__ self bot:NoneBot event:CQEvent<block_start>super().__init__(bot event)<block_end><async_keyword><def_stmt>approve self remark:str=''<arrow><none><block_start>"""
Approve the request.
:param remark: remark of friend (only works in friend request)
"""<try_stmt><block_start><await>self.bot.call_action(action='.handle_quick_operation_async' self_id=self.event.self_id context=self.event operation={'approve':<true> 'remark':remark})<block_end><except_stmt>CQHttpError<block_start><pass><block_end><block_end><async_keyword><def_stmt>reject self reason:str=''<arrow><none><block_start>"""
Reject the request.
:param reason: reason to reject (only works in group request)
"""<try_stmt><block_start><await>self.bot.call_action(action='.handle_quick_operation_async' self_id=self.event.self_id context=self.event operation={'approve':<false> 'reason':reason})<block_end><except_stmt>CQHttpError<block_start><pass><block_end><block_end><block_end><async_keyword><def_stmt>handle_notice_or_request bot:NoneBot event:CQEvent<arrow><none><block_start>"""INTERNAL API"""<if_stmt>event.type<eq>'notice'<block_start>_log_notice(event)<line_sep>session=NoticeSession(bot event)<block_end><else_stmt># must be 'request'
<block_start>_log_request(event)<line_sep>session=RequestSession(bot event)<block_end>ev_name=event.name<line_sep>logger.debug(f'Emitting event: {ev_name}')<try_stmt><block_start><await>EventManager.bus.emit(ev_name session)<block_end><except_stmt>Exception<as>e<block_start>logger.error(f'An exception occurred while handling event {ev_name}:')<line_sep>logger.exception(e)<block_end><block_end><def_stmt>_log_notice event:CQEvent<arrow><none><block_start>logger.info(f'Notice: {event}')<block_end><def_stmt>_log_request event:CQEvent<arrow><none><block_start>logger.info(f'Request: {event}')<block_end>__all__=['NoticeSession' 'RequestSession' ]<line_sep> |
# This example code was modified from https://github.com/ericjang/maml-jax .
#
# The original code comes with the following license:
# https://github.com/ericjang/maml-jax/blob/master/LICENSE
# Copyright <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
<import_from_stmt>functorch grad vmap<import_stmt>matplotlib.pyplot<as>plt<import_stmt>math<import_stmt>torch<import_stmt>numpy<as>np<import_from_stmt>torch.nn functional<as>F<import_stmt>matplotlib<as>mpl<line_sep>mpl.use('Agg')<def_stmt>net params x<block_start>x=F.linear(x params[0] params[1])<line_sep>x=F.relu(x)<line_sep>x=F.linear(x params[2] params[3])<line_sep>x=F.relu(x)<line_sep>x=F.linear(x params[4] params[5])<line_sep><return>x<block_end>params=[torch.Tensor(40 1).uniform_(-1. 1.).requires_grad_() torch.Tensor(40).zero_().requires_grad_() torch.Tensor(40 40).uniform_(-1./math.sqrt(40) 1./math.sqrt(40)).requires_grad_() torch.Tensor(40).zero_().requires_grad_() torch.Tensor(1 40).uniform_(-1./math.sqrt(40) 1./math.sqrt(40)).requires_grad_() torch.Tensor(1).zero_().requires_grad_() ]<line_sep># The prototype doesn't like F.mse_loss.
<def_stmt>mse_loss x y<block_start><return>torch.mean((x-y)<power>2)<block_end>opt=torch.optim.Adam(params lr=1e-3)<line_sep>alpha=0.1<line_sep>K=20<line_sep>losses=[]<line_sep>num_tasks=4<def_stmt>sample_tasks outer_batch_size inner_batch_size# Select amplitude and phase for the task
<block_start>As=[]<line_sep>phases=[]<for_stmt>_ range(outer_batch_size)<block_start>As.append(np.random.uniform(low=0.1 high=.5))<line_sep>phases.append(np.random.uniform(low=0. high=np.pi))<block_end><def_stmt>get_batch <block_start>xs,ys=[] []<for_stmt>A,phase zip(As phases)<block_start>x=np.random.uniform(low=-5. high=5. size=(inner_batch_size 1))<line_sep>y=A<times>np.sin(x+phase)<line_sep>xs.append(x)<line_sep>ys.append(y)<block_end><return>torch.tensor(xs dtype=torch.float) torch.tensor(ys dtype=torch.float)<block_end>x1,y1=get_batch()<line_sep>x2,y2=get_batch()<line_sep><return>x1 y1 x2 y2<block_end><for_stmt>it range(20000)<block_start>loss2=0.0<line_sep>opt.zero_grad()<def_stmt>get_loss_for_task x1 y1 x2 y2<block_start><def_stmt>inner_loss params x1 y1<block_start>f=net(params x1)<line_sep>loss=mse_loss(f y1)<line_sep><return>loss<block_end>grads=grad(inner_loss)(tuple(params) x1 y1)<line_sep>new_params=[(params[i]-alpha<times>grads[i])<for>i range(len(params))]<line_sep>v_f=net(new_params x2)<line_sep><return>mse_loss(v_f y2)<block_end>task=sample_tasks(num_tasks K)<line_sep>inner_losses=vmap(get_loss_for_task)(task[0] task[1] task[2] task[3])<line_sep>loss2=sum(inner_losses)/len(inner_losses)<line_sep>loss2.backward()<line_sep>opt.step()<if_stmt>it%100<eq>0<block_start>print('Iteration %d -- Outer Loss: %.4f'%(it loss2))<block_end>losses.append(loss2)<block_end>t_A=torch.tensor(0.0).uniform_(0.1 0.5)<line_sep>t_b=torch.tensor(0.0).uniform_(0.0 math.pi)<line_sep>t_x=torch.empty(4 1).uniform_(-5 5)<line_sep>t_y=t_A<times>torch.sin(t_x+t_b)<line_sep>opt.zero_grad()<line_sep>t_params=params<for_stmt>k range(5)<block_start>t_f=net(t_x t_params)<line_sep>t_loss=F.l1_loss(t_f t_y)<line_sep>grads=torch.autograd.grad(t_loss t_params create_graph=<true>)<line_sep>t_params=[(t_params[i]-alpha<times>grads[i])<for>i range(len(params))]<block_end>test_x=torch.arange(-2<times>math.pi 2<times>math.pi step=0.01).unsqueeze(1)<line_sep>test_y=t_A<times>torch.sin(test_x+t_b)<line_sep>test_f=net(test_x t_params)<line_sep>plt.plot(test_x.data.numpy() test_y.data.numpy() label='sin(x)')<line_sep>plt.plot(test_x.data.numpy() test_f.data.numpy() label='net(x)')<line_sep>plt.plot(t_x.data.numpy() t_y.data.numpy() 'o' label='Examples')<line_sep>plt.legend()<line_sep>plt.savefig('maml-sine.png')<line_sep>plt.figure()<line_sep>plt.plot(np.convolve(losses [.05]<times>20))<line_sep>plt.savefig('losses.png')<line_sep> |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Conv1DTranspose streaming aware layer."""<import_from_stmt>kws_streaming.layers modes<import_from_stmt>kws_streaming.layers.compat tf<class_stmt>Conv1DTranspose(tf.keras.layers.Conv1DTranspose)<block_start>"""streaming aware Conv1DTranspose layer.
Attributes:
mode: Training or inference modes: non streaming, streaming.
inference_batch_size: batch size in inference mode
state_shape: shape of remainder state
crop_output: if True output will be cropped: aligned by stride
**kwargs: additional layer arguments
"""<def_stmt>__init__ self mode=modes.Modes.TRAINING inference_batch_size=1 pad_time_dim='causal' state_shape=<none> crop_output=<true> **kwargs<block_start>super(Conv1DTranspose self).__init__(**kwargs)<if_stmt>(kwargs.get('activation')<not><in>[<none> 'linear'])<and>self.use_bias<block_start><raise>ValueError('activation should be disabled because we need to '<concat>'subtract bias from remainder state, in streaming mode' kwargs.get('activation'))<block_end>self.mode=mode<line_sep>self.inference_batch_size=inference_batch_size<line_sep>self.pad_time_dim=pad_time_dim<line_sep>self.state_shape=state_shape<line_sep>self.crop_output=crop_output<line_sep>self.overlap=self.kernel_size[0]-self.strides[0]<line_sep>self.overlap=max(self.overlap 0)<if_stmt>pad_time_dim<not><in>['same' 'causal']<block_start><raise>ValueError('pad_time_dim (\'%s\') must be either \'same\' or \'causal\''%pad_time_dim)<block_end><if_stmt>'padding'<in>kwargs<and>kwargs['padding']<ne>'valid'<block_start><raise>ValueError('padding (\'%s\') must be \'valid\'. Use pad_time_dim to make the '<concat>'layer causal (\'causal\') or with lookahead (\'same\')'%kwargs['padding'])<block_end><block_end><def_stmt>build self input_shape<block_start>super(Conv1DTranspose self).build(input_shape)<if_stmt>input_shape.rank<l>2<block_start><raise>ValueError('input_shape.rank:%d must at least 2'%input_shape.rank)<block_end><if_stmt>self.mode<in>[modes.Modes.STREAM_INTERNAL_STATE_INFERENCE modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE]<block_start><if_stmt>input_shape.as_list()[1]<is><none><block_start><raise>ValueError('in streaming mode time dimension of input packet '<concat>'should not be dynamic: TFLite limitation')<block_end>self.output_time_dim=input_shape.as_list()[1]<times>self.strides[0]<if_stmt>self.overlap<g>0<block_start>self.state_shape=[self.inference_batch_size self.overlap self.filters]<if_stmt>self.mode<eq>modes.Modes.STREAM_INTERNAL_STATE_INFERENCE<block_start>self.states=self.add_weight(name='states' shape=self.state_shape trainable=<false> initializer=tf.zeros_initializer)<block_end><elif_stmt>self.mode<eq>modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE# For streaming inference with extrnal states,
# the states are passed in as input.
<block_start>self.input_state=tf.keras.layers.Input(shape=self.state_shape[1:] batch_size=self.inference_batch_size name=self.name+'/input_state_remainder')<line_sep>self.output_state=<none><block_end><block_end><block_end><block_end><def_stmt>call self inputs<block_start><if_stmt>self.mode<eq>modes.Modes.STREAM_INTERNAL_STATE_INFERENCE<block_start><return>self._streaming_internal_state(inputs)<block_end><elif_stmt>self.mode<eq>modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE# in streaming inference mode with external state
# in addition to the output we return the output state.
<block_start>output,self.output_state=self._streaming_external_state(inputs self.input_state)<line_sep><return>output<block_end><elif_stmt>self.mode<in>(modes.Modes.TRAINING modes.Modes.NON_STREAM_INFERENCE)# run non streamable training or non streamable inference
<block_start><return>self._non_streaming(inputs)<block_end><else_stmt><block_start><raise>ValueError(f'Encountered unexpected mode `{self.mode}`.')<block_end><block_end><def_stmt>get_config self<block_start>config=super(Conv1DTranspose self).get_config()<line_sep># only variables which are listed in constructor can be updated here
# because they will be used to construct the class from config
config.update({'mode':self.mode 'inference_batch_size':self.inference_batch_size 'pad_time_dim':self.pad_time_dim 'state_shape':self.state_shape 'crop_output':self.crop_output })<line_sep><return>config<block_end><def_stmt>_streaming_internal_state self inputs<block_start>outputs=super(Conv1DTranspose self).call(inputs)<if_stmt>self.overlap<eq>0<block_start><if_stmt>self.crop_output<block_start><return>tf.identity(outputs[: 0:self.output_time_dim :])<block_end><else_stmt><block_start><return>tf.identity(outputs)<block_end><block_end>output_shape=outputs.shape.as_list()<line_sep># need to add remainder state to a specific region of output as below:
# outputs[:,0:self.overlap,:] = outputs[:,0:self.overlap,:] + self.states
# but 'Tensor' object does not support item assignment,
# so doing it through full summation below
output_shape[1]<augsub>self.state_shape[1]<line_sep>padded_remainder=tf.concat([self.states tf.zeros(output_shape tf.float32)] 1)<line_sep>outputs=outputs+padded_remainder<line_sep># extract remainder state and substruct bias if it is used:
# bias will be added in the next iteration again and remainder
# should have only convolution part, so that bias is not added twice
<if_stmt>self.use_bias<block_start>new_state=outputs[: -self.overlap: :]-self.bias<block_end><else_stmt><block_start>new_state=outputs[: -self.overlap: :]<block_end>assign_states=self.states.assign(new_state)<with_stmt>tf.control_dependencies([assign_states])<block_start><if_stmt>self.crop_output<block_start><return>tf.identity(outputs[: 0:self.output_time_dim :])<block_end><else_stmt><block_start><return>tf.identity(outputs)<block_end><block_end><block_end><def_stmt>_streaming_external_state self inputs states<block_start>outputs=super(Conv1DTranspose self).call(inputs)<if_stmt>self.overlap<eq>0<block_start><if_stmt>self.crop_output<block_start><return>outputs[: 0:self.output_time_dim :] []<block_end><else_stmt><block_start><return>outputs []<block_end><block_end>output_shape=outputs.shape.as_list()<line_sep>output_shape[1]<augsub>self.state_shape[1]<line_sep>padded_remainder=tf.concat([states tf.zeros(output_shape tf.float32)] 1)<line_sep>outputs=outputs+padded_remainder<if_stmt>self.use_bias<block_start>new_state=outputs[: -self.overlap: :]-self.bias<block_end><else_stmt><block_start>new_state=outputs[: -self.overlap: :]<block_end><if_stmt>self.crop_output<block_start><return>outputs[: 0:self.output_time_dim :] new_state<block_end><else_stmt><block_start><return>outputs new_state<block_end><block_end><def_stmt>_non_streaming self inputs<block_start>outputs=super(Conv1DTranspose self).call(inputs)<line_sep># during training or non streaming inference, input shape can be dynamic
output_time_dim=tf.shape(inputs)[1]<times>self.strides[0]<if_stmt>self.crop_output<block_start><if_stmt>self.pad_time_dim<eq>'same'<block_start>crop_left=self.overlap<floordiv>2<line_sep><return>outputs[: crop_left:crop_left+output_time_dim :]<block_end><else_stmt><block_start><return>outputs[: 0:output_time_dim :]<block_end><block_end><else_stmt><block_start><return>outputs<block_end><block_end><def_stmt>get_input_state self# input state will be used only for STREAM_EXTERNAL_STATE_INFERENCE mode
<block_start><if_stmt>self.mode<eq>modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE<block_start><return>[self.input_state]<block_end><else_stmt><block_start><raise>ValueError('Expected the layer to be in external streaming mode, '<concat>f'not `{self.mode}`.')<block_end><block_end><def_stmt>get_output_state self# output state will be used only for STREAM_EXTERNAL_STATE_INFERENCE mode
<block_start><if_stmt>self.mode<eq>modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE<block_start><return>[self.output_state]<block_end><else_stmt><block_start><raise>ValueError('Expected the layer to be in external streaming mode, '<concat>f'not `{self.mode}`.')<block_end><block_end><block_end> |
# Copyright 2014 Intel Corporation, All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the"License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
<import_stmt>logging<import_from_stmt>django.utils.translation ugettext_lazy<as>_<import_from_stmt>horizon forms<line_sep>LOG=logging.getLogger(__name__)<class_stmt>AddOpenstackEndpointForm(forms.SelfHandlingForm)<block_start>failure_url='horizon:vsm:openstackconnect:index'<line_sep>os_tenant_name=forms.CharField(label=_("Tenant Name") max_length=255 min_length=1 error_messages={'required':_('This field is required.')})<line_sep>os_username=forms.CharField(label=_("UserName") max_length=255 min_length=1 error_messages={'required':_('This field is required.')})<line_sep>os_password=forms.CharField(label=_("Password") widget=forms.PasswordInput(render_value=<false>) max_length=255 min_length=1 error_messages={'required':_('This field is required.')})<line_sep>os_auth_url=forms.CharField(label=_("Auth Url") max_length=255 min_length=1 error_messages={'required':_('This field is required.')})<line_sep>os_region_name=forms.CharField(label=_("Region Name") max_length=255 min_length=0 required=<false>)<line_sep>ssh_user=forms.CharField(label=_("SSH User Name") max_length=255 min_length=1 error_messages={'required':_('This field is required.')})<def_stmt>handle self request data<block_start><pass><line_sep># TODO deliver a cluster id in data
# data['cluster_id'] = 1
# try:
# LOG.info("CEPH_LOG in ADD ip, %s" % str(data))
# os_tenant_name = data['os_tenant_name']
# os_username = data['os_username']
# os_password = data['<PASSWORD>']
# os_auth_url = data['os_auth_url']
# ip = os_auth_url.split(":")[1][2:]
# appnodes = vsm_api.appnode_list(request)
# for appnode in appnodes:
# old_os_auth_url = appnode.os_auth_url
# old_ip = old_os_auth_url.split(":")[1][2:]
# if ip == old_ip:
# messages.error(request, "duplicate ip address")
# return False
# body = {
# 'appnodes': {
# 'os_tenant_name': os_tenant_name,
# 'os_username': os_username,
# 'os_password': <PASSWORD>,
# 'os_auth_url': os_auth_url
# }
# }
# LOG.info("CEPH_LOG in handle body %s" % str(body))
# ret = vsm_api.add_appnodes(request, body['appnodes'])
#
# messages.success(request,
# _('Successfully add openstack: %s')
# % data['os_auth_url'])
# return ret
# except:
# redirect = reverse("horizon:vsm:openstackconnect:index")
# exceptions.handle(request,
# _('Unable to create appnode.'),
# redirect=redirect)
<block_end><block_end><class_stmt>UpdateOpenstackEndpointForm(forms.SelfHandlingForm)<block_start>id=forms.CharField(label=_("ID") widget=forms.HiddenInput)<line_sep>os_tenant_name=forms.CharField(label=_("Tenant Name") max_length=255 min_length=1 error_messages={'required':_('This field is required.')})<line_sep>os_username=forms.CharField(label=_("UserName") max_length=255 min_length=1 error_messages={'required':_('This field is required.')})<line_sep>os_password=forms.CharField(label=_("Password") widget=forms.PasswordInput(render_value=<false>) max_length=255 min_length=1 error_messages={'required':_('This field is required.')})<line_sep>os_auth_url=forms.CharField(label=_("Auth Url") max_length=255 min_length=1 error_messages={'required':_('This field is required.')})<line_sep>os_region_name=forms.CharField(label=_("Region Name") max_length=255 min_length=0 required=<false>)<line_sep>ssh_user=forms.CharField(label=_("SSH User Name") max_length=255 min_length=1 error_messages={'required':_('This field is required.')})<def_stmt>handle self request data<block_start><pass><line_sep># failed, succeeded = [], []
# id = data.pop('id')
# # ip = data.pop('ip')
# os_tenant_name = data.pop('os_tenant_name')
# os_username = data.pop('os_username')
# os_password = data.pop('os_password')
# os_auth_url = data.pop('os_auth_url')
# vsm_api.update_appnode(request, id,
# os_tenant_name=os_tenant_name,
# os_username=os_username,
# os_password=<PASSWORD>,
# os_auth_url=os_auth_url,
# ssh_status="",
# log_info="")
#
# messages.success(request, _('OpenStack auth has been updated successfully.'))
# return True
#
# if failed:
# failed = map(force_unicode, failed)
# messages.error(request,
# _('Unable to update %(attributes)s for the user.')
# % {"attributes": ", ".join(failed)})
# return True
<block_end><block_end> |
# Add support for multiple event queues
<def_stmt>upgrader cpt<block_start>cpt.set('Globals' 'numMainEventQueues' '1')<block_end>legacy_version=12<line_sep> |
"For loading module"<line_sep> |
# -*- coding: utf-8 -*-
<import_stmt>re<import_stmt>pytest<import_from_stmt>mimesis Hardware<import_from_stmt>mimesis.data CPU CPU_CODENAMES CPU_MODEL_CODES GENERATION GRAPHICS HDD_SSD MANUFACTURERS PHONE_MODELS RAM_SIZES RAM_TYPES RESOLUTIONS SCREEN_SIZES <import_from_stmt>. patterns<class_stmt>TestHardware(object)<block_start>@pytest.fixture<def_stmt>hard self<block_start><return>Hardware()<block_end><def_stmt>test_str self hard<block_start><assert_stmt>re.match(patterns.PROVIDER_STR_REGEX str(hard))<block_end><def_stmt>test_resolution self hard<block_start>result=hard.resolution()<assert_stmt>result<in>RESOLUTIONS<block_end><def_stmt>test_screen_size self hard<block_start>result=hard.screen_size()<assert_stmt>result<in>SCREEN_SIZES<block_end><def_stmt>test_generation self hard<block_start>result=hard.generation()<assert_stmt>result<in>GENERATION<assert_stmt>isinstance(result str)<block_end><def_stmt>test_cpu_model_code self hard<block_start>result=hard.cpu_model_code()<assert_stmt>result<in>CPU_MODEL_CODES<assert_stmt>isinstance(result str)<block_end><def_stmt>test_cpu_frequency self hard<block_start>result=hard.cpu_frequency().split("G")[0]<assert_stmt>float(result)<l>4.4<block_end><def_stmt>test_cpu self hard<block_start>result=hard.cpu()<assert_stmt>result<in>CPU<block_end><def_stmt>test_cpu_codename self hard<block_start>result=hard.cpu_codename()<assert_stmt>result<in>CPU_CODENAMES<block_end><def_stmt>test_ram_type self hard<block_start>result=hard.ram_type()<assert_stmt>result<in>RAM_TYPES<block_end><def_stmt>test_ram_size self hard<block_start>result=hard.ram_size()<assert_stmt>result<in>RAM_SIZES<block_end><def_stmt>test_ssd_or_hdd self hard<block_start>result=hard.ssd_or_hdd()<assert_stmt>result<in>HDD_SSD<block_end><def_stmt>test_graphics self hard<block_start>result=hard.graphics()<assert_stmt>result<in>GRAPHICS<block_end><def_stmt>test_manufacturer self hard<block_start>result=hard.manufacturer()<assert_stmt>result<in>MANUFACTURERS<block_end><def_stmt>test_phone_model self hard<block_start>result=hard.phone_model()<assert_stmt>result<in>PHONE_MODELS<block_end><block_end><class_stmt>TestSeededHardware(object)<block_start>@pytest.fixture<def_stmt>h1 self seed<block_start><return>Hardware(seed=seed)<block_end>@pytest.fixture<def_stmt>h2 self seed<block_start><return>Hardware(seed=seed)<block_end><def_stmt>test_resolution self h1 h2<block_start><assert_stmt>h1.resolution()<eq>h2.resolution()<block_end><def_stmt>test_screen_size self h1 h2<block_start><assert_stmt>h1.screen_size()<eq>h2.screen_size()<block_end><def_stmt>test_generation self h1 h2<block_start><assert_stmt>h1.generation()<eq>h2.generation()<block_end><def_stmt>test_cpu_model_code self h1 h2<block_start><assert_stmt>h1.cpu_model_code()<eq>h2.cpu_model_code()<block_end><def_stmt>test_cpu_frequency self h1 h2<block_start><assert_stmt>h1.cpu_frequency()<eq>h2.cpu_frequency()<block_end><def_stmt>test_cpu self h1 h2<block_start><assert_stmt>h1.cpu()<eq>h2.cpu()<block_end><def_stmt>test_cpu_codename self h1 h2<block_start><assert_stmt>h1.cpu_codename()<eq>h2.cpu_codename()<block_end><def_stmt>test_ram_type self h1 h2<block_start><assert_stmt>h1.ram_type()<eq>h2.ram_type()<block_end><def_stmt>test_ram_size self h1 h2<block_start><assert_stmt>h1.ram_size()<eq>h2.ram_size()<block_end><def_stmt>test_ssd_or_hdd self h1 h2<block_start><assert_stmt>h1.ssd_or_hdd()<eq>h2.ssd_or_hdd()<block_end><def_stmt>test_graphics self h1 h2<block_start><assert_stmt>h1.graphics()<eq>h2.graphics()<block_end><def_stmt>test_manufacturer self h1 h2<block_start><assert_stmt>h1.manufacturer()<eq>h2.manufacturer()<block_end><def_stmt>test_phone_model self h1 h2<block_start><assert_stmt>h1.phone_model()<eq>h2.phone_model()<block_end><block_end> |
# Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Compute v2 Server action implementations"""<import_stmt>importlib<import_from_stmt>osc_lib.command command<import_from_stmt>osc_lib exceptions<import_from_stmt>osc_lib utils<import_from_stmt>openstackclient.i18n _<class_stmt>CreateServerBackup(command.ShowOne)<block_start>_description=_("Create a server backup image")<line_sep>IMAGE_API_VERSIONS={"1":"openstackclient.image.v1.image" "2":"openstackclient.image.v2.image" }<def_stmt>get_parser self prog_name<block_start>parser=super(CreateServerBackup self).get_parser(prog_name)<line_sep>parser.add_argument('server' metavar='<server>' help=_('Server to back up (name or ID)') )<line_sep>parser.add_argument('--name' metavar='<image-name>' help=_('Name of the backup image (default: server name)') )<line_sep>parser.add_argument('--type' metavar='<backup-type>' help=_('Used to populate the backup_type property of the backup '<concat>'image (default: empty)') )<line_sep>parser.add_argument('--rotate' metavar='<count>' type=int help=_('Number of backups to keep (default: 1)') )<line_sep>parser.add_argument('--wait' action='store_true' help=_('Wait for backup image create to complete') )<line_sep><return>parser<block_end><def_stmt>take_action self parsed_args<block_start><def_stmt>_show_progress progress<block_start><if_stmt>progress<block_start>self.app.stderr.write('\rProgress: %s'%progress)<line_sep>self.app.stderr.flush()<block_end><block_end>compute_client=self.app.client_manager.sdk_connection.compute<line_sep>server=compute_client.find_server(parsed_args.server)<line_sep># Set sane defaults as this API wants all mouths to be fed
<if_stmt>parsed_args.name<is><none><block_start>backup_name=server.name<block_end><else_stmt><block_start>backup_name=parsed_args.name<block_end><if_stmt>parsed_args.type<is><none><block_start>backup_type=""<block_end><else_stmt><block_start>backup_type=parsed_args.type<block_end><if_stmt>parsed_args.rotate<is><none><block_start>backup_rotation=1<block_end><else_stmt><block_start>backup_rotation=parsed_args.rotate<block_end>compute_client.backup_server(server.id backup_name backup_type backup_rotation )<line_sep>image_client=self.app.client_manager.image<line_sep>image=image_client.find_image(backup_name ignore_missing=<false>)<if_stmt>parsed_args.wait<block_start><if_stmt>utils.wait_for_status(image_client.get_image image.id callback=_show_progress )<block_start>self.app.stdout.write('\n')<block_end><else_stmt><block_start>msg=_('Error creating server backup: %s')%parsed_args.name<line_sep><raise>exceptions.CommandError(msg)<block_end><block_end><if_stmt>self.app.client_manager._api_version['image']<eq>'1'<block_start>info={}<line_sep>info.update(image._info)<line_sep>info['properties']=utils.format_dict(info.get('properties' {}))<block_end><else_stmt># Get the right image module to format the output
<block_start>image_module=importlib.import_module(self.IMAGE_API_VERSIONS[self.app.client_manager._api_version['image']])<line_sep>info=image_module._format_image(image)<block_end><return>zip(*sorted(info.items()))<block_end><block_end> |
<import_from_stmt>sklearn.decomposition FactorAnalysis<import_from_stmt>sklearn.decomposition FastICA<import_from_stmt>sklearn.decomposition LatentDirichletAllocation<import_from_stmt>sklearn.decomposition TruncatedSVD<import_from_stmt>sklearn.decomposition NMF<import_from_stmt>sklearn.manifold Isomap<import_from_stmt>sklearn.manifold MDS<import_from_stmt>sklearn.manifold LocallyLinearEmbedding<import_from_stmt>sklearn.manifold SpectralEmbedding<import_from_stmt>sklearn.manifold TSNE<import_from_stmt>umap UMAP<import_from_stmt>sklearn.discriminant_analysis LinearDiscriminantAnalysis<import_from_stmt>ml.analysis.pca PCA<import_stmt>keras<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<class_stmt>Autoencoder<block_start><def_stmt>__init__ self n_components n_layers=1 **kwargs<block_start>self.n_components=n_components<line_sep>self.n_layers=n_layers<line_sep>self.kwargs=kwargs<block_end><def_stmt>fit self X y=<none><block_start>input_=keras.layers.Input(shape=(X.shape[1]))<line_sep>encoded=keras.layers.Dense(self.n_components activation='relu')(input_)<line_sep>decoded=keras.layers.Dense(X.shape[1] activation='relu')(encoded)<line_sep>self.autoencoder=keras.Model(input_ decoded)<line_sep>self.encoder=keras.Model(input_ encoded)<line_sep>self.autoencoder.compile(loss=keras.losses.MeanSquaredError())<line_sep>print(X.shape[1])<line_sep>self.autoencoder.fit(X X epochs=100 batch_size=64 shuffle=<true>)<block_end><def_stmt>transform self X y=<none><block_start><return>self.encoder.predict(X)<block_end><def_stmt>fit_transform self X y=<none><block_start>self.fit(X)<line_sep><return>self.encoder.predict(X)<block_end><block_end><class_stmt>DimensionalityReducer<block_start><def_stmt>__init__ self reducer **kwargs<block_start>"""
Constructor
Parameters
----------
selector : str
name of algorithm to be applied
**kwargs :
optional and positional arguments of the choosen algorithm (selector)
Returns
-------
FeatureSelector
Examples
---------
variance thresholding: f = FeatureSelector('variance', threshold=0.3) #Instantiating
f.fit(X[,y]) #fitting (y is optional for variance thresholding)
X = f.transform(X) #transforming
filter-based, k best (MAD): f = FeatureSelector('univariate_kbest', score_func=FeatureSelector.mean_abs_diff, k=2) #Instantiating
#score_func can be any function f: R^n -> R^n (n = number of columns)
f.fit(X,y) #fitting
X = f.transform(X) #transforming
wrapper, recursive: f = FeatureSelector('recursive', estimator = LinearSVC(), n_features_to_select=2) #Instantiating
#estimator should be an instance of a classification or regression model class from scikit-learn
#one can use a custom class but it must be compatible with scikit-learn arquitecture
f.fit(X,y) #fitting
X = f.transform(X) #transforming
wrapper, sequential: f = FeatureSelector('sequential', estimator = LinearSVC(), direction='forward') #Instantiating
#estimator should be an instance of a classification or regression model class from scikit-learn
#one can use a custom class but it must be compatible with scikit-learn arquitecture
f.fit(X,y) #fitting
X = f.transform(X) #transforming
to better understand the optional arguments of each algorithm see: https://scikit-learn.org/stable/modules/feature_selection.html
"""<line_sep>self.reducer=reducer<line_sep>self.reducers={'factor_analysis':FactorAnalysis 'pca':PCA 'ica':FastICA 'isomap':Isomap 'locally_linear_embedding':LocallyLinearEmbedding 'spectral_embedding':SpectralEmbedding 'tsne':TSNE 'mds':MDS 'umap':UMAP 'latent_dirichlet':LatentDirichletAllocation 'truncated_svd':TruncatedSVD 'nmf':NMF 'linear_discriminant':LinearDiscriminantAnalysis 'autoencoder':Autoencoder}<line_sep>self.kwargs=kwargs<line_sep>self.fitted=<false><line_sep>self.reduction=self.reducers[self.reducer](**self.kwargs)<block_end><def_stmt>fit self X:pd.DataFrame y=<none><block_start>"""
Identify the features to be selected.
Parameters
----------
X : pd.DataFrame
features to be selected
y : pd.DataFrame
target values
Returns
-------
None
"""<line_sep>self.columns=X.columns<line_sep>self.reduction.fit(X y)<line_sep>self.fitted=<true><block_end><def_stmt>transform self df:pd.DataFrame y=<none><block_start>"""
Select features based on fit
Parameters
----------
pd.DataFrame
dataframe with features to be selected
Returns
-------
df : pd.DataFrame
dataframe with selected features only
"""<if_stmt><not>self.fitted<block_start><raise>Exception("Not yet trained.")<block_end><return>self.reduction.transform(df)<block_end><def_stmt>fit_transform self df:pd.DataFrame y=<none><block_start>"""
Select features based on fit
Parameters
----------
pd.DataFrame
dataframe with features to be selected
Returns
-------
df : pd.DataFrame
dataframe with selected features only
"""<line_sep><return>self.reduction.fit_transform(df y)<block_end><def_stmt>inverse_transform self df:pd.DataFrame<block_start>"""
Apply the invese_transform of vectorizer to each column
Options: index, bag_of_words and tf_idf
Parameters
----------
df : pd.DataFrame
dataframe with columns to be unvectorizer
Returns
-------
pd.DataFrame
"""<if_stmt><not>self.fitted<block_start><raise>Exception("Not yet trained.")<block_end><return>self.reduction.inverse_transform(df)<block_end><block_end> |
# IMPORTATION STANDARD
<import_stmt>requests<import_stmt>logging<import_from_stmt>typing Optional<line_sep># IMPORTATION THIRD PARTY
# IMPORTATION INTERNAL
<import_stmt>degiro_connector.core.constants.urls<as>urls<import_from_stmt>degiro_connector.quotecast.models.quotecast_pb2 Quotecast <import_from_stmt>degiro_connector.core.abstracts.abstract_action AbstractAction<class_stmt>ActionSubscribe(AbstractAction)<block_start>@staticmethod<def_stmt>quotecast_request_to_api request:Quotecast.Request<arrow>str<block_start>payload='{"controlData":"'<for_stmt>vwd_id request.subscriptions<block_start><for_stmt>metric_name request.subscriptions[vwd_id]<block_start>payload<augadd>"a_req("+vwd_id+"."+metric_name+");"<block_end><block_end><for_stmt>vwd_id request.unsubscriptions<block_start><for_stmt>metric_name request.unsubscriptions[vwd_id]<block_start>payload<augadd>"a_rel("+vwd_id+"."+metric_name+");"<block_end><block_end>payload<augadd>'"}'<line_sep><return>payload<block_end>@classmethod<def_stmt>subscribe cls request:Quotecast.Request session_id:str session:requests.Session=<none> logger:logging.Logger=<none> <arrow>Optional[bool]<block_start>"""Adds/removes metric from the data-stream.
Args:
request (QuotecastAPI.Request):
List of subscriptions & unsubscriptions to do.
Example :
request = Quotecast.Request()
request.subscriptions['360015751'].extend([
'LastPrice',
'LastVolume',
])
request.subscriptions['AAPL.BATS,E'].extend([
'LastPrice',
'LastVolume',
])
request.unsubscriptions['360015751'].extend([
'LastPrice',
'LastVolume',
])
session_id (str):
API's session id.
session (requests.Session, optional):
This object will be generated if None.
Defaults to None.
logger (logging.Logger, optional):
This object will be generated if None.
Defaults to None.
Raises:
BrokenPipeError:
A new "session_id" is required.
Returns:
bool:
Whether or not the subscription succeeded.
"""<if_stmt>logger<is><none><block_start>logger=cls.build_logger()<block_end><if_stmt>session<is><none><block_start>session=cls.build_session()<block_end>url=urls.QUOTECAST<line_sep>url=f"{url}/{session_id}"<line_sep>data=cls.quotecast_request_to_api(request=request)<line_sep>logger.info("subscribe:data %s" data[:100])<line_sep>session_request=requests.Request(method="POST" url=url data=data)<line_sep>prepped=session.prepare_request(request=session_request)<line_sep>response=<false><try_stmt><block_start>raw_response=session.send(request=prepped verify=<false>)<if_stmt>raw_response.text<eq>'[{"m":"sr"}]'<block_start><raise>BrokenPipeError('A new "session_id" is required.')<block_end><else_stmt><block_start>response=<true><block_end><block_end><except_stmt>Exception<as>e<block_start>logger.fatal(e)<line_sep><return><none><block_end><return>response<block_end><def_stmt>call self request:Quotecast.Request<arrow>Optional[bool]<block_start>session_id=self.connection_storage.session_id<line_sep>session=self.session_storage.session<line_sep>logger=self.logger<line_sep><return>self.subscribe(request=request session_id=session_id session=session logger=logger )<block_end><block_end> |
<import_stmt>logging<import_from_stmt>seahub.group.utils is_group_admin<import_from_stmt>seahub.constants PERMISSION_ADMIN PERMISSION_READ_WRITE CUSTOM_PERMISSION_PREFIX<import_from_stmt>seahub.share.models ExtraSharePermission ExtraGroupsSharePermission CustomSharePermissions<import_from_stmt>seahub.utils is_valid_org_id<import_stmt>seaserv<import_from_stmt>seaserv seafile_api<line_sep>logger=logging.getLogger(__name__)<def_stmt>normalize_custom_permission_name permission<block_start><try_stmt><block_start><if_stmt>CUSTOM_PERMISSION_PREFIX<in>permission<block_start>permission=permission.split('-')[1]<block_end>CustomSharePermissions.objects.get(id=int(permission))<block_end><except_stmt>Exception<as>e<block_start>logger.warning(e)<line_sep><return><none><block_end><return>CUSTOM_PERMISSION_PREFIX+'-'+str(permission)<block_end><def_stmt>is_repo_admin username repo_id# repo is shared to user with admin permission
<block_start><try_stmt><block_start>user_share_permission=ExtraSharePermission.objects.get_user_permission(repo_id username)<if_stmt>user_share_permission<eq>PERMISSION_ADMIN<block_start><return><true><block_end># get all groups that repo is shared to with admin permission
group_ids=ExtraGroupsSharePermission.objects.get_admin_groups_by_repo(repo_id)<for_stmt>group_id group_ids<block_start><if_stmt>is_group_admin(group_id username)<block_start><return><true><block_end><block_end><block_end><except_stmt>Exception<as>e<block_start>logger.error(e)<line_sep><return><false><block_end>repo_owner=seafile_api.get_repo_owner(repo_id)<or>seafile_api.get_org_repo_owner(repo_id)<if_stmt><not>repo_owner<block_start>logger.error('repo %s owner is None'%repo_id)<line_sep><return><false><block_end># repo owner
<if_stmt>username<eq>repo_owner<block_start><return><true><block_end># user is department admin
<if_stmt>'@seafile_group'<in>repo_owner# is group owned repo
<block_start>group_id=int(repo_owner.split('@')[0])<if_stmt>is_group_admin(group_id username)<block_start><return><true><block_end><block_end><return><false><block_end><def_stmt>share_dir_to_user repo path owner share_from share_to permission org_id=<none># Share repo or subdir to user with permission(r, rw, admin).
<block_start>extra_share_permission=''<if_stmt>permission<eq>PERMISSION_ADMIN<block_start>extra_share_permission=permission<line_sep>permission=PERMISSION_READ_WRITE<block_end><if_stmt>is_valid_org_id(org_id)<block_start><if_stmt>path<eq>'/'<block_start>seaserv.seafserv_threaded_rpc.org_add_share(org_id repo.repo_id owner share_to permission)<block_end><else_stmt><block_start>seafile_api.org_share_subdir_to_user(org_id repo.repo_id path owner share_to permission)<block_end><block_end><else_stmt><block_start><if_stmt>path<eq>'/'<block_start>seafile_api.share_repo(repo.repo_id owner share_to permission)<block_end><else_stmt><block_start>seafile_api.share_subdir_to_user(repo.repo_id path owner share_to permission)<block_end><block_end><if_stmt>path<eq>'/'<and>extra_share_permission<eq>PERMISSION_ADMIN<block_start>ExtraSharePermission.objects.create_share_permission(repo.repo_id share_to extra_share_permission)<block_end><block_end><def_stmt>share_dir_to_group repo path owner share_from gid permission org_id=<none># Share repo or subdir to group with permission(r, rw, admin).
<block_start>extra_share_permission=''<if_stmt>permission<eq>PERMISSION_ADMIN<block_start>extra_share_permission=permission<line_sep>permission=PERMISSION_READ_WRITE<block_end><if_stmt>is_valid_org_id(org_id)<block_start><if_stmt>path<eq>'/'<block_start>seafile_api.add_org_group_repo(repo.repo_id org_id gid owner permission)<block_end><else_stmt><block_start>seafile_api.org_share_subdir_to_group(org_id repo.repo_id path owner gid permission)<block_end><block_end><else_stmt><block_start><if_stmt>path<eq>'/'<block_start>seafile_api.set_group_repo(repo.repo_id gid owner permission)<block_end><else_stmt><block_start>seafile_api.share_subdir_to_group(repo.repo_id path owner gid permission)<block_end><block_end># add share permission if between is admin and is extra permission.
<if_stmt>path<eq>'/'<and>extra_share_permission<eq>PERMISSION_ADMIN<block_start>ExtraGroupsSharePermission.objects.create_share_permission(repo.repo_id gid extra_share_permission)<block_end><block_end><def_stmt>update_user_dir_permission repo_id path owner share_to permission org_id=<none># Update the user's permission(r, rw, admin) in the repo or subdir.
<block_start>extra_share_permission=''<if_stmt>permission<eq>PERMISSION_ADMIN<block_start>extra_share_permission=permission<line_sep>permission=PERMISSION_READ_WRITE<block_end><if_stmt>is_valid_org_id(org_id)<block_start><if_stmt>path<eq>'/'<block_start>seafile_api.org_set_share_permission(org_id repo_id owner share_to permission)<block_end><else_stmt><block_start>seafile_api.org_update_share_subdir_perm_for_user(org_id repo_id path owner share_to permission)<block_end><block_end><else_stmt><block_start><if_stmt>path<eq>'/'<block_start>seafile_api.set_share_permission(repo_id owner share_to permission)<block_end><else_stmt><block_start>seafile_api.update_share_subdir_perm_for_user(repo_id path owner share_to permission)<block_end><block_end><if_stmt>path<eq>'/'<block_start>ExtraSharePermission.objects.update_share_permission(repo_id share_to extra_share_permission)<block_end><block_end><def_stmt>update_group_dir_permission repo_id path owner gid permission org_id=<none># Update the group's permission(r, rw, admin) in the repo or subdir.
<block_start>extra_share_permission=''<if_stmt>permission<eq>PERMISSION_ADMIN<block_start>extra_share_permission=permission<line_sep>permission=PERMISSION_READ_WRITE<block_end><if_stmt>is_valid_org_id(org_id)<block_start><if_stmt>path<eq>'/'<block_start>seaserv.seafserv_threaded_rpc.set_org_group_repo_permission(org_id gid repo_id permission)<block_end><else_stmt><block_start>seafile_api.org_update_share_subdir_perm_for_group(org_id repo_id path owner gid permission)<block_end><block_end><else_stmt><block_start><if_stmt>path<eq>'/'<block_start>seafile_api.set_group_repo_permission(gid repo_id permission)<block_end><else_stmt><block_start>seafile_api.update_share_subdir_perm_for_group(repo_id path owner gid permission)<block_end><block_end># update extra share permission if updated is repo
<if_stmt>path<eq>'/'<block_start>ExtraGroupsSharePermission.objects.update_share_permission(repo_id gid extra_share_permission)<block_end><block_end><def_stmt>check_user_share_out_permission repo_id path share_to is_org=<false># Return the permission you share to others.
<block_start>path=<none><if>path<eq>'/'<else>path<line_sep>repo=seafile_api.get_shared_repo_by_path(repo_id path share_to is_org)<if_stmt><not>repo<block_start><return><none><block_end>permission=repo.permission<if_stmt>path<is><none><block_start>extra_permission=ExtraSharePermission.objects.get_user_permission(repo_id share_to)<line_sep>permission=extra_permission<if>extra_permission<else>repo.permission<block_end><return>permission<block_end><def_stmt>check_user_share_in_permission repo_id share_to is_org=<false># Return the permission to share to you.
<block_start>repo=seafile_api.get_shared_repo_by_path(repo_id <none> share_to is_org)<if_stmt><not>repo<block_start><return><none><block_end>extra_permission=ExtraSharePermission.objects.get_user_permission(repo_id share_to)<line_sep><return>extra_permission<if>extra_permission<else>repo.permission<block_end><def_stmt>check_group_share_out_permission repo_id path group_id is_org=<false># Return the permission that share to other's group.
<block_start>path=<none><if>path<eq>'/'<else>path<line_sep>repo=seafile_api.get_group_shared_repo_by_path(repo_id path group_id is_org)<if_stmt><not>repo<block_start><return><none><block_end>permission=repo.permission<if_stmt>path<is><none><block_start>extra_permission=ExtraGroupsSharePermission.objects.get_group_permission(repo_id group_id)<line_sep>permission=extra_permission<if>extra_permission<else>repo.permission<block_end><return>permission<block_end><def_stmt>check_group_share_in_permission repo_id group_id is_org=<false># Returns the permission to share the group you joined.
<block_start>repo=seafile_api.get_group_shared_repo_by_path(repo_id <none> group_id is_org)<if_stmt><not>repo<block_start><return><none><block_end>extra_permission=ExtraGroupsSharePermission.objects.get_group_permission(repo_id group_id)<line_sep><return>extra_permission<if>extra_permission<else>repo.permission<block_end><def_stmt>has_shared_to_user repo_id path username org_id=<none><block_start><if_stmt>is_valid_org_id(org_id)# when calling seafile API to share authority related functions, change the uesrname to repo owner.
<block_start>repo_owner=seafile_api.get_org_repo_owner(repo_id)<if_stmt>path<eq>'/'<block_start>share_items=seafile_api.list_org_repo_shared_to(org_id repo_owner repo_id)<block_end><else_stmt><block_start>share_items=seafile_api.get_org_shared_users_for_subdir(org_id repo_id path repo_owner)<block_end><block_end><else_stmt><block_start>repo_owner=seafile_api.get_repo_owner(repo_id)<if_stmt>path<eq>'/'<block_start>share_items=seafile_api.list_repo_shared_to(repo_owner repo_id)<block_end><else_stmt><block_start>share_items=seafile_api.get_shared_users_for_subdir(repo_id path repo_owner)<block_end><block_end><return>username<in>[item.user<for>item share_items]<block_end><def_stmt>has_shared_to_group repo_id path gid org_id=<none><block_start><if_stmt>is_valid_org_id(org_id)# when calling seafile API to share authority related functions, change the uesrname to repo owner.
<block_start>repo_owner=seafile_api.get_org_repo_owner(repo_id)<if_stmt>path<eq>'/'<block_start>share_items=seafile_api.list_org_repo_shared_group(org_id repo_owner repo_id)<block_end><else_stmt><block_start>share_items=seafile_api.get_org_shared_groups_for_subdir(org_id repo_id path repo_owner)<block_end><block_end><else_stmt><block_start>repo_owner=seafile_api.get_repo_owner(repo_id)<if_stmt>path<eq>'/'<block_start>share_items=seafile_api.list_repo_shared_group_by_user(repo_owner repo_id)<block_end><else_stmt><block_start>share_items=seafile_api.get_shared_groups_for_subdir(repo_id path repo_owner)<block_end><block_end><return>gid<in>[item.group_id<for>item share_items]<block_end> |
<import_stmt>_sk_fail<line_sep>_sk_fail._("pyclbr")<line_sep> |
"""
Linear regression objects for panel data
"""<line_sep># pylint: disable-msg=W0231
# pylint: disable-msg=E1101,E1103
<import_from_future_stmt> division<import_from_stmt>pandas.compat range<import_from_stmt>pandas compat<import_stmt>warnings<import_stmt>numpy<as>np<import_from_stmt>pandas.core.panel Panel<import_from_stmt>pandas.core.frame DataFrame<import_from_stmt>pandas.core.reshape get_dummies<import_from_stmt>pandas.core.series Series<import_from_stmt>pandas.core.sparse SparsePanel<import_from_stmt>pandas.stats.ols OLS MovingOLS<import_stmt>pandas.stats.common<as>com<import_stmt>pandas.stats.math<as>math<import_from_stmt>pandas.util.decorators cache_readonly<class_stmt>PanelOLS(OLS)<block_start>"""Implements panel OLS.
See ols function docs
"""<line_sep>_panel_model=<true><def_stmt>__init__ self y x weights=<none> intercept=<true> nw_lags=<none> entity_effects=<false> time_effects=<false> x_effects=<none> cluster=<none> dropped_dummies=<none> verbose=<false> nw_overlap=<false><block_start>self._x_orig=x<line_sep>self._y_orig=y<line_sep>self._weights=weights<line_sep>self._intercept=intercept<line_sep>self._nw_lags=nw_lags<line_sep>self._nw_overlap=nw_overlap<line_sep>self._entity_effects=entity_effects<line_sep>self._time_effects=time_effects<line_sep>self._x_effects=x_effects<line_sep>self._dropped_dummies=dropped_dummies<or>{}<line_sep>self._cluster=com._get_cluster_type(cluster)<line_sep>self._verbose=verbose<line_sep>(self._x self._x_trans self._x_filtered self._y self._y_trans)=self._prepare_data()<line_sep>self._index=self._x.index.levels[0]<line_sep>self._T=len(self._index)<block_end><def_stmt>log self msg<block_start><if_stmt>self._verbose# pragma: no cover
<block_start>print(msg)<block_end><block_end><def_stmt>_prepare_data self<block_start>"""Cleans and stacks input data into DataFrame objects
If time effects is True, then we turn off intercepts and omit an item
from every (entity and x) fixed effect.
Otherwise:
- If we have an intercept, we omit an item from every fixed effect.
- Else, we omit an item from every fixed effect except one of them.
The categorical variables will get dropped from x.
"""<line_sep>(x x_filtered y weights cat_mapping)=self._filter_data()<line_sep>self.log('Adding dummies to X variables')<line_sep>x=self._add_dummies(x cat_mapping)<line_sep>self.log('Adding dummies to filtered X variables')<line_sep>x_filtered=self._add_dummies(x_filtered cat_mapping)<if_stmt>self._x_effects<block_start>x=x.drop(self._x_effects axis=1)<line_sep>x_filtered=x_filtered.drop(self._x_effects axis=1)<block_end><if_stmt>self._time_effects<block_start>x_regressor=x.sub(x.mean(level=0) level=0)<line_sep>unstacked_y=y.unstack()<line_sep>y_regressor=unstacked_y.sub(unstacked_y.mean(1) axis=0).stack()<line_sep>y_regressor.index=y.index<block_end><elif_stmt>self._intercept# only add intercept when no time effects
<block_start>self.log('Adding intercept')<line_sep>x=x_regressor=add_intercept(x)<line_sep>x_filtered=add_intercept(x_filtered)<line_sep>y_regressor=y<block_end><else_stmt><block_start>self.log('No intercept added')<line_sep>x_regressor=x<line_sep>y_regressor=y<block_end><if_stmt>weights<is><not><none><block_start><if_stmt><not>y_regressor.index.equals(weights.index)<block_start><raise>AssertionError("y_regressor and weights must have the "<concat>"same index")<block_end><if_stmt><not>x_regressor.index.equals(weights.index)<block_start><raise>AssertionError("x_regressor and weights must have the "<concat>"same index")<block_end>rt_weights=np.sqrt(weights)<line_sep>y_regressor=y_regressor<times>rt_weights<line_sep>x_regressor=x_regressor.mul(rt_weights axis=0)<block_end><return>x x_regressor x_filtered y y_regressor<block_end><def_stmt>_filter_data self<block_start>"""
"""<line_sep>data=self._x_orig<line_sep>cat_mapping={}<if_stmt>isinstance(data DataFrame)<block_start>data=data.to_panel()<block_end><else_stmt><block_start><if_stmt>isinstance(data Panel)<block_start>data=data.copy()<block_end><if_stmt><not>isinstance(data SparsePanel)<block_start>data,cat_mapping=self._convert_x(data)<block_end><if_stmt><not>isinstance(data Panel)<block_start>data=Panel.from_dict(data intersect=<true>)<block_end><block_end>x_names=data.items<if_stmt>self._weights<is><not><none><block_start>data['__weights__']=self._weights<block_end># Filter x's without y (so we can make a prediction)
filtered=data.to_frame()<line_sep># Filter all data together using to_frame
# convert to DataFrame
y=self._y_orig<if_stmt>isinstance(y Series)<block_start>y=y.unstack()<block_end>data['__y__']=y<line_sep>data_long=data.to_frame()<line_sep>x_filt=filtered.filter(x_names)<line_sep>x=data_long.filter(x_names)<line_sep>y=data_long['__y__']<if_stmt>self._weights<is><not><none><and><not>self._weights.empty<block_start>weights=data_long['__weights__']<block_end><else_stmt><block_start>weights=<none><block_end><return>x x_filt y weights cat_mapping<block_end><def_stmt>_convert_x self x# Converts non-numeric data in x to floats. x_converted is the
# DataFrame with converted values, and x_conversion is a dict that
# provides the reverse mapping. For example, if 'A' was converted to 0
# for x named 'variety', then x_conversion['variety'][0] is 'A'.
<block_start>x_converted={}<line_sep>cat_mapping={}<line_sep># x can be either a dict or a Panel, but in Python 3, dicts don't have
# .iteritems
iteritems=getattr(x 'iteritems' x.items)<for_stmt>key,df iteritems()<block_start><if_stmt><not>isinstance(df DataFrame)<block_start><raise>AssertionError("all input items must be DataFrames, "<concat>"at least one is of "<concat>"type {0}".format(type(df)))<block_end><if_stmt>_is_numeric(df)<block_start>x_converted[key]=df<block_end><else_stmt><block_start><try_stmt><block_start>df=df.astype(float)<block_end><except_stmt>(TypeError ValueError)<block_start>values=df.values<line_sep>distinct_values=sorted(set(values.flat))<line_sep>cat_mapping[key]=dict(enumerate(distinct_values))<line_sep>new_values=np.searchsorted(distinct_values values)<line_sep>x_converted[key]=DataFrame(new_values index=df.index columns=df.columns)<block_end><block_end><block_end><if_stmt>len(cat_mapping)<eq>0<block_start>x_converted=x<block_end><return>x_converted cat_mapping<block_end><def_stmt>_add_dummies self panel mapping<block_start>"""
Add entity and / or categorical dummies to input X DataFrame
Returns
-------
DataFrame
"""<line_sep>panel=self._add_entity_effects(panel)<line_sep>panel=self._add_categorical_dummies(panel mapping)<line_sep><return>panel<block_end><def_stmt>_add_entity_effects self panel<block_start>"""
Add entity dummies to panel
Returns
-------
DataFrame
"""<import_from_stmt>pandas.core.reshape make_axis_dummies<if_stmt><not>self._entity_effects<block_start><return>panel<block_end>self.log('-- Adding entity fixed effect dummies')<line_sep>dummies=make_axis_dummies(panel 'minor')<if_stmt><not>self._use_all_dummies<block_start><if_stmt>'entity'<in>self._dropped_dummies<block_start>to_exclude=str(self._dropped_dummies.get('entity'))<block_end><else_stmt><block_start>to_exclude=dummies.columns[0]<block_end><if_stmt>to_exclude<not><in>dummies.columns<block_start><raise>Exception('%s not in %s'%(to_exclude dummies.columns))<block_end>self.log('-- Excluding dummy for entity: %s'%to_exclude)<line_sep>dummies=dummies.filter(dummies.columns.difference([to_exclude]))<block_end>dummies=dummies.add_prefix('FE_')<line_sep>panel=panel.join(dummies)<line_sep><return>panel<block_end><def_stmt>_add_categorical_dummies self panel cat_mappings<block_start>"""
Add categorical dummies to panel
Returns
-------
DataFrame
"""<if_stmt><not>self._x_effects<block_start><return>panel<block_end>dropped_dummy=(self._entity_effects<and><not>self._use_all_dummies)<for_stmt>effect self._x_effects<block_start>self.log('-- Adding fixed effect dummies for %s'%effect)<line_sep>dummies=get_dummies(panel[effect])<line_sep>val_map=cat_mappings.get(effect)<if_stmt>val_map<block_start>val_map=dict((v k)<for>k,v compat.iteritems(val_map))<block_end><if_stmt>dropped_dummy<or><not>self._use_all_dummies<block_start><if_stmt>effect<in>self._dropped_dummies<block_start>to_exclude=mapped_name=self._dropped_dummies.get(effect)<if_stmt>val_map<block_start>mapped_name=val_map[to_exclude]<block_end><block_end><else_stmt><block_start>to_exclude=mapped_name=dummies.columns[0]<block_end><if_stmt>mapped_name<not><in>dummies.columns# pragma: no cover
<block_start><raise>Exception('%s not in %s'%(to_exclude dummies.columns))<block_end>self.log('-- Excluding dummy for %s: %s'%(effect to_exclude))<line_sep>dummies=dummies.filter(dummies.columns.difference([mapped_name]))<line_sep>dropped_dummy=<true><block_end>dummies=_convertDummies(dummies cat_mappings.get(effect))<line_sep>dummies=dummies.add_prefix('%s_'%effect)<line_sep>panel=panel.join(dummies)<block_end><return>panel<block_end>@property<def_stmt>_use_all_dummies self<block_start>"""
In the case of using an intercept or including time fixed
effects, completely partitioning the sample would make the X
not full rank.
"""<line_sep><return>(<not>self._intercept<and><not>self._time_effects)<block_end>@cache_readonly<def_stmt>_beta_raw self<block_start>"""Runs the regression and returns the beta."""<line_sep>X=self._x_trans.values<line_sep>Y=self._y_trans.values.squeeze()<line_sep>beta,_,_,_=np.linalg.lstsq(X Y)<line_sep><return>beta<block_end>@cache_readonly<def_stmt>beta self<block_start><return>Series(self._beta_raw index=self._x.columns)<block_end>@cache_readonly<def_stmt>_df_model_raw self<block_start>"""Returns the raw model degrees of freedom."""<line_sep><return>self._df_raw-1<block_end>@cache_readonly<def_stmt>_df_resid_raw self<block_start>"""Returns the raw residual degrees of freedom."""<line_sep><return>self._nobs-self._df_raw<block_end>@cache_readonly<def_stmt>_df_raw self<block_start>"""Returns the degrees of freedom."""<line_sep>df=math.rank(self._x_trans.values)<if_stmt>self._time_effects<block_start>df<augadd>self._total_times<block_end><return>df<block_end>@cache_readonly<def_stmt>_r2_raw self<block_start>Y=self._y_trans.values.squeeze()<line_sep>X=self._x_trans.values<line_sep>resid=Y-np.dot(X self._beta_raw)<line_sep>SSE=(resid<power>2).sum()<if_stmt>self._use_centered_tss<block_start>SST=((Y-np.mean(Y))<power>2).sum()<block_end><else_stmt><block_start>SST=(Y<power>2).sum()<block_end><return>1-SSE/SST<block_end>@property<def_stmt>_use_centered_tss self# has_intercept = np.abs(self._resid_raw.sum()) < _FP_ERR
<block_start><return>self._intercept<or>self._entity_effects<or>self._time_effects<block_end>@cache_readonly<def_stmt>_r2_adj_raw self<block_start>"""Returns the raw r-squared adjusted values."""<line_sep>nobs=self._nobs<line_sep>factors=(nobs-1)/(nobs-self._df_raw)<line_sep><return>1-(1-self._r2_raw)<times>factors<block_end>@cache_readonly<def_stmt>_resid_raw self<block_start>Y=self._y.values.squeeze()<line_sep>X=self._x.values<line_sep><return>Y-np.dot(X self._beta_raw)<block_end>@cache_readonly<def_stmt>resid self<block_start><return>self._unstack_vector(self._resid_raw)<block_end>@cache_readonly<def_stmt>_rmse_raw self<block_start>"""Returns the raw rmse values."""<line_sep># X = self._x.values
# Y = self._y.values.squeeze()
X=self._x_trans.values<line_sep>Y=self._y_trans.values.squeeze()<line_sep>resid=Y-np.dot(X self._beta_raw)<line_sep>ss=(resid<power>2).sum()<line_sep><return>np.sqrt(ss/(self._nobs-self._df_raw))<block_end>@cache_readonly<def_stmt>_var_beta_raw self<block_start>cluster_axis=<none><if_stmt>self._cluster<eq>'time'<block_start>cluster_axis=0<block_end><elif_stmt>self._cluster<eq>'entity'<block_start>cluster_axis=1<block_end>x=self._x<line_sep>y=self._y<if_stmt>self._time_effects<block_start>xx=_xx_time_effects(x y)<block_end><else_stmt><block_start>xx=np.dot(x.values.T x.values)<block_end><return>_var_beta_panel(y x self._beta_raw xx self._rmse_raw cluster_axis self._nw_lags self._nobs self._df_raw self._nw_overlap)<block_end>@cache_readonly<def_stmt>_y_fitted_raw self<block_start>"""Returns the raw fitted y values."""<line_sep><return>np.dot(self._x.values self._beta_raw)<block_end>@cache_readonly<def_stmt>y_fitted self<block_start><return>self._unstack_vector(self._y_fitted_raw index=self._x.index)<block_end><def_stmt>_unstack_vector self vec index=<none><block_start><if_stmt>index<is><none><block_start>index=self._y_trans.index<block_end>panel=DataFrame(vec index=index columns=['dummy'])<line_sep><return>panel.to_panel()['dummy']<block_end><def_stmt>_unstack_y self vec<block_start>unstacked=self._unstack_vector(vec)<line_sep><return>unstacked.reindex(self.beta.index)<block_end>@cache_readonly<def_stmt>_time_obs_count self<block_start><return>self._y_trans.count(level=0).values<block_end>@cache_readonly<def_stmt>_time_has_obs self<block_start><return>self._time_obs_count<g>0<block_end>@property<def_stmt>_nobs self<block_start><return>len(self._y)<block_end><block_end><def_stmt>_convertDummies dummies mapping# cleans up the names of the generated dummies
<block_start>new_items=[]<for_stmt>item dummies.columns<block_start><if_stmt><not>mapping<block_start>var=str(item)<if_stmt>isinstance(item float)<block_start>var='%g'%item<block_end>new_items.append(var)<block_end><else_stmt># renames the dummies if a conversion dict is provided
<block_start>new_items.append(mapping[int(item)])<block_end><block_end>dummies=DataFrame(dummies.values index=dummies.index columns=new_items)<line_sep><return>dummies<block_end><def_stmt>_is_numeric df<block_start><for_stmt>col df<block_start><if_stmt>df[col].dtype.name<eq>'object'<block_start><return><false><block_end><block_end><return><true><block_end><def_stmt>add_intercept panel name='intercept'<block_start>"""
Add column of ones to input panel
Parameters
----------
panel: Panel / DataFrame
name: string, default 'intercept']
Returns
-------
New object (same type as input)
"""<line_sep>panel=panel.copy()<line_sep>panel[name]=1.<line_sep><return>panel.consolidate()<block_end><class_stmt>MovingPanelOLS(MovingOLS PanelOLS)<block_start>"""Implements rolling/expanding panel OLS.
See ols function docs
"""<line_sep>_panel_model=<true><def_stmt>__init__ self y x weights=<none> window_type='expanding' window=<none> min_periods=<none> min_obs=<none> intercept=<true> nw_lags=<none> nw_overlap=<false> entity_effects=<false> time_effects=<false> x_effects=<none> cluster=<none> dropped_dummies=<none> verbose=<false><block_start>self._args=dict(intercept=intercept nw_lags=nw_lags nw_overlap=nw_overlap entity_effects=entity_effects time_effects=time_effects x_effects=x_effects cluster=cluster dropped_dummies=dropped_dummies verbose=verbose)<line_sep>PanelOLS.__init__(self y=y x=x weights=weights **self._args)<line_sep>self._set_window(window_type window min_periods)<if_stmt>min_obs<is><none><block_start>min_obs=len(self._x.columns)+1<block_end>self._min_obs=min_obs<block_end>@cache_readonly<def_stmt>resid self<block_start><return>self._unstack_y(self._resid_raw)<block_end>@cache_readonly<def_stmt>y_fitted self<block_start><return>self._unstack_y(self._y_fitted_raw)<block_end>@cache_readonly<def_stmt>y_predict self<block_start>"""Returns the predicted y values."""<line_sep><return>self._unstack_y(self._y_predict_raw)<block_end><def_stmt>lagged_y_predict self lag=1<block_start>"""
Compute forecast Y value lagging coefficient by input number
of time periods
Parameters
----------
lag : int
Returns
-------
DataFrame
"""<line_sep>x=self._x.values<line_sep>betas=self._beta_matrix(lag=lag)<line_sep><return>self._unstack_y((betas<times>x).sum(1))<block_end>@cache_readonly<def_stmt>_rolling_ols_call self<block_start><return>self._calc_betas(self._x_trans self._y_trans)<block_end>@cache_readonly<def_stmt>_df_raw self<block_start>"""Returns the degrees of freedom."""<line_sep>df=self._rolling_rank()<if_stmt>self._time_effects<block_start>df<augadd>self._window_time_obs<block_end><return>df[self._valid_indices]<block_end>@cache_readonly<def_stmt>_var_beta_raw self<block_start>"""Returns the raw covariance of beta."""<line_sep>x=self._x<line_sep>y=self._y<line_sep>dates=x.index.levels[0]<line_sep>cluster_axis=<none><if_stmt>self._cluster<eq>'time'<block_start>cluster_axis=0<block_end><elif_stmt>self._cluster<eq>'entity'<block_start>cluster_axis=1<block_end>nobs=self._nobs<line_sep>rmse=self._rmse_raw<line_sep>beta=self._beta_raw<line_sep>df=self._df_raw<line_sep>window=self._window<if_stmt><not>self._time_effects# Non-transformed X
<block_start>cum_xx=self._cum_xx(x)<block_end>results=[]<for_stmt>n,i enumerate(self._valid_indices)<block_start><if_stmt>self._is_rolling<and>i<ge>window<block_start>prior_date=dates[i-window+1]<block_end><else_stmt><block_start>prior_date=dates[0]<block_end>date=dates[i]<line_sep>x_slice=x.truncate(prior_date date)<line_sep>y_slice=y.truncate(prior_date date)<if_stmt>self._time_effects<block_start>xx=_xx_time_effects(x_slice y_slice)<block_end><else_stmt><block_start>xx=cum_xx[i]<if_stmt>self._is_rolling<and>i<ge>window<block_start>xx=xx-cum_xx[i-window]<block_end><block_end>result=_var_beta_panel(y_slice x_slice beta[n] xx rmse[n] cluster_axis self._nw_lags nobs[n] df[n] self._nw_overlap)<line_sep>results.append(result)<block_end><return>np.array(results)<block_end>@cache_readonly<def_stmt>_resid_raw self<block_start>beta_matrix=self._beta_matrix(lag=0)<line_sep>Y=self._y.values.squeeze()<line_sep>X=self._x.values<line_sep>resid=Y-(X<times>beta_matrix).sum(1)<line_sep><return>resid<block_end>@cache_readonly<def_stmt>_y_fitted_raw self<block_start>x=self._x.values<line_sep>betas=self._beta_matrix(lag=0)<line_sep><return>(betas<times>x).sum(1)<block_end>@cache_readonly<def_stmt>_y_predict_raw self<block_start>"""Returns the raw predicted y values."""<line_sep>x=self._x.values<line_sep>betas=self._beta_matrix(lag=1)<line_sep><return>(betas<times>x).sum(1)<block_end><def_stmt>_beta_matrix self lag=0<block_start><if_stmt>lag<l>0<block_start><raise>AssertionError("'lag' must be greater than or equal to 0, "<concat>"input was {0}".format(lag))<block_end>index=self._y_trans.index<line_sep>major_labels=index.labels[0]<line_sep>labels=major_labels-lag<line_sep>indexer=self._valid_indices.searchsorted(labels side='left')<line_sep>beta_matrix=self._beta_raw[indexer]<line_sep>beta_matrix[labels<l>self._valid_indices[0]]=np.NaN<line_sep><return>beta_matrix<block_end>@cache_readonly<def_stmt>_enough_obs self# XXX: what's the best way to determine where to start?
# TODO: write unit tests for this
<block_start>rank_threshold=len(self._x.columns)+1<if_stmt>self._min_obs<l>rank_threshold# pragma: no cover
<block_start>warnings.warn('min_obs is smaller than rank of X matrix')<block_end>enough_observations=self._nobs_raw<ge>self._min_obs<line_sep>enough_time_periods=self._window_time_obs<ge>self._min_periods<line_sep><return>enough_time_periods&enough_observations<block_end><block_end><def_stmt>create_ols_dict attr<block_start><def_stmt>attr_getter self<block_start>d={}<for_stmt>k,v compat.iteritems(self.results)<block_start>result=getattr(v attr)<line_sep>d[k]=result<block_end><return>d<block_end><return>attr_getter<block_end><def_stmt>create_ols_attr attr<block_start><return>property(create_ols_dict(attr))<block_end><class_stmt>NonPooledPanelOLS(object)<block_start>"""Implements non-pooled panel OLS.
Parameters
----------
y : DataFrame
x : Series, DataFrame, or dict of Series
intercept : bool
True if you want an intercept.
nw_lags : None or int
Number of Newey-West lags.
window_type : {'full_sample', 'rolling', 'expanding'}
'full_sample' by default
window : int
size of window (for rolling/expanding OLS)
"""<line_sep>ATTRIBUTES=['beta' 'df' 'df_model' 'df_resid' 'f_stat' 'p_value' 'r2' 'r2_adj' 'resid' 'rmse' 'std_err' 'summary_as_matrix' 't_stat' 'var_beta' 'x' 'y' 'y_fitted' 'y_predict']<def_stmt>__init__ self y x window_type='full_sample' window=<none> min_periods=<none> intercept=<true> nw_lags=<none> nw_overlap=<false><block_start><for_stmt>attr self.ATTRIBUTES<block_start>setattr(self.__class__ attr create_ols_attr(attr))<block_end>results={}<for_stmt>entity y<block_start>entity_y=y[entity]<line_sep>entity_x={}<for_stmt>x_var x<block_start>entity_x[x_var]=x[x_var][entity]<block_end><import_from_stmt>pandas.stats.interface ols<line_sep>results[entity]=ols(y=entity_y x=entity_x window_type=window_type window=window min_periods=min_periods intercept=intercept nw_lags=nw_lags nw_overlap=nw_overlap)<block_end>self.results=results<block_end><block_end><def_stmt>_var_beta_panel y x beta xx rmse cluster_axis nw_lags nobs df nw_overlap<block_start>xx_inv=math.inv(xx)<line_sep>yv=y.values<if_stmt>cluster_axis<is><none><block_start><if_stmt>nw_lags<is><none><block_start><return>xx_inv<times>(rmse<power>2)<block_end><else_stmt><block_start>resid=yv-np.dot(x.values beta)<line_sep>m=(x.values.T<times>resid).T<line_sep>xeps=math.newey_west(m nw_lags nobs df nw_overlap)<line_sep><return>np.dot(xx_inv np.dot(xeps xx_inv))<block_end><block_end><else_stmt><block_start>Xb=np.dot(x.values beta).reshape((len(x.values) 1))<line_sep>resid=DataFrame(yv[: <none>]-Xb index=y.index columns=['resid'])<if_stmt>cluster_axis<eq>1<block_start>x=x.swaplevel(0 1).sortlevel(0)<line_sep>resid=resid.swaplevel(0 1).sortlevel(0)<block_end>m=_group_agg(x.values<times>resid.values x.index._bounds <lambda>x:np.sum(x axis=0))<if_stmt>nw_lags<is><none><block_start>nw_lags=0<block_end>xox=0<for_stmt>i range(len(x.index.levels[0]))<block_start>xox<augadd>math.newey_west(m[i:i+1] nw_lags nobs df nw_overlap)<block_end><return>np.dot(xx_inv np.dot(xox xx_inv))<block_end><block_end><def_stmt>_group_agg values bounds f<block_start>"""
R-style aggregator
Parameters
----------
values : N-length or N x K ndarray
bounds : B-length ndarray
f : ndarray aggregation function
Returns
-------
ndarray with same length as bounds array
"""<if_stmt>values.ndim<eq>1<block_start>N=len(values)<line_sep>result=np.empty(len(bounds) dtype=float)<block_end><elif_stmt>values.ndim<eq>2<block_start>N,K=values.shape<line_sep>result=np.empty((len(bounds) K) dtype=float)<block_end>testagg=f(values[:min(1 len(values))])<if_stmt>isinstance(testagg np.ndarray)<and>testagg.ndim<eq>2<block_start><raise>AssertionError('Function must reduce')<block_end><for_stmt>i,left_bound enumerate(bounds)<block_start><if_stmt>i<eq>len(bounds)-1<block_start>right_bound=N<block_end><else_stmt><block_start>right_bound=bounds[i+1]<block_end>result[i]=f(values[left_bound:right_bound])<block_end><return>result<block_end><def_stmt>_xx_time_effects x y<block_start>"""
Returns X'X - (X'T) (T'T)^-1 (T'X)
"""<line_sep># X'X
xx=np.dot(x.values.T x.values)<line_sep>xt=x.sum(level=0).values<line_sep>count=y.unstack().count(1).values<line_sep>selector=count<g>0<line_sep># X'X - (T'T)^-1 (T'X)
xt=xt[selector]<line_sep>count=count[selector]<line_sep><return>xx-np.dot(xt.T/count xt)<block_end> |
<import_from_future_stmt> print_function<import_from_stmt>drawille Canvas<import_stmt>math<line_sep>s=Canvas()<for_stmt>x range(1800)<block_start>s.set(x/10 math.sin(math.radians(x))<times>10)<block_end>print(s.frame())<line_sep>s.clear()<for_stmt>x range(0 1800 10)<block_start>s.set(x/10 10+math.sin(math.radians(x))<times>10)<line_sep>s.set(x/10 10+math.cos(math.radians(x))<times>10)<block_end>print(s.frame())<line_sep>s.clear()<for_stmt>x range(0 3600 20)<block_start>s.set(x/20 4+math.sin(math.radians(x))<times>4)<block_end>print(s.frame())<line_sep>s.clear()<for_stmt>x range(0 360 4)<block_start>s.set(x/4 30+math.sin(math.radians(x))<times>30)<block_end><for_stmt>x range(30)<block_start><for_stmt>y range(30)<block_start>s.set(x y)<line_sep>s.toggle(x+30 y+30)<line_sep>s.toggle(x+60 y)<block_end><block_end>print(s.frame())<line_sep> |
"""The tests for the Async Media player helper functions."""<import_stmt>pytest<import_stmt>homeassistant.components.media_player<as>mp<import_from_stmt>homeassistant.const STATE_IDLE STATE_OFF STATE_ON STATE_PAUSED STATE_PLAYING <class_stmt>ExtendedMediaPlayer(mp.MediaPlayerEntity)<block_start>"""Media player test class."""<def_stmt>__init__ self hass<block_start>"""Initialize the test media player."""<line_sep>self.hass=hass<line_sep>self._volume=0<line_sep>self._state=STATE_OFF<block_end>@property<def_stmt>state self<block_start>"""State of the player."""<line_sep><return>self._state<block_end>@property<def_stmt>volume_level self<block_start>"""Volume level of the media player (0..1)."""<line_sep><return>self._volume<block_end>@property<def_stmt>supported_features self<block_start>"""Flag media player features that are supported."""<line_sep><return>(mp.const.MediaPlayerEntityFeature.VOLUME_SET|mp.const.MediaPlayerEntityFeature.VOLUME_STEP|mp.const.MediaPlayerEntityFeature.PLAY|mp.const.MediaPlayerEntityFeature.PAUSE|mp.const.MediaPlayerEntityFeature.TURN_OFF|mp.const.MediaPlayerEntityFeature.TURN_ON)<block_end><def_stmt>set_volume_level self volume<block_start>"""Set volume level, range 0..1."""<line_sep>self._volume=volume<block_end><def_stmt>volume_up self<block_start>"""Turn volume up for media player."""<if_stmt>self.volume_level<l>1<block_start>self.set_volume_level(min(1 self.volume_level+0.1))<block_end><block_end><def_stmt>volume_down self<block_start>"""Turn volume down for media player."""<if_stmt>self.volume_level<g>0<block_start>self.set_volume_level(max(0 self.volume_level-0.1))<block_end><block_end><def_stmt>media_play self<block_start>"""Play the media player."""<line_sep>self._state=STATE_PLAYING<block_end><def_stmt>media_pause self<block_start>"""Plause the media player."""<line_sep>self._state=STATE_PAUSED<block_end><def_stmt>media_play_pause self<block_start>"""Play or pause the media player."""<if_stmt>self._state<eq>STATE_PLAYING<block_start>self._state=STATE_PAUSED<block_end><else_stmt><block_start>self._state=STATE_PLAYING<block_end><block_end><def_stmt>turn_on self<block_start>"""Turn on state."""<line_sep>self._state=STATE_ON<block_end><def_stmt>turn_off self<block_start>"""Turn off state."""<line_sep>self._state=STATE_OFF<block_end><def_stmt>toggle self<block_start>"""Toggle the power on the media player."""<if_stmt>self._state<in>[STATE_OFF STATE_IDLE]<block_start>self._state=STATE_ON<block_end><else_stmt><block_start>self._state=STATE_OFF<block_end><block_end><block_end><class_stmt>SimpleMediaPlayer(mp.MediaPlayerEntity)<block_start>"""Media player test class."""<def_stmt>__init__ self hass<block_start>"""Initialize the test media player."""<line_sep>self.hass=hass<line_sep>self._volume=0<line_sep>self._state=STATE_OFF<block_end>@property<def_stmt>state self<block_start>"""State of the player."""<line_sep><return>self._state<block_end>@property<def_stmt>volume_level self<block_start>"""Volume level of the media player (0..1)."""<line_sep><return>self._volume<block_end>@property<def_stmt>supported_features self<block_start>"""Flag media player features that are supported."""<line_sep><return>(mp.const.MediaPlayerEntityFeature.VOLUME_SET|mp.const.MediaPlayerEntityFeature.VOLUME_STEP|mp.const.MediaPlayerEntityFeature.PLAY|mp.const.MediaPlayerEntityFeature.PAUSE|mp.const.MediaPlayerEntityFeature.TURN_OFF|mp.const.MediaPlayerEntityFeature.TURN_ON)<block_end><def_stmt>set_volume_level self volume<block_start>"""Set volume level, range 0..1."""<line_sep>self._volume=volume<block_end><def_stmt>media_play self<block_start>"""Play the media player."""<line_sep>self._state=STATE_PLAYING<block_end><def_stmt>media_pause self<block_start>"""Plause the media player."""<line_sep>self._state=STATE_PAUSED<block_end><def_stmt>turn_on self<block_start>"""Turn on state."""<line_sep>self._state=STATE_ON<block_end><def_stmt>turn_off self<block_start>"""Turn off state."""<line_sep>self._state=STATE_OFF<block_end><block_end>@pytest.fixture(params=[ExtendedMediaPlayer SimpleMediaPlayer])<def_stmt>player hass request<block_start>"""Return a media player."""<line_sep><return>request.param(hass)<block_end><async_keyword><def_stmt>test_volume_up player<block_start>"""Test the volume_up and set volume methods."""<assert_stmt>player.volume_level<eq>0<line_sep><await>player.async_set_volume_level(0.5)<assert_stmt>player.volume_level<eq>0.5<line_sep><await>player.async_volume_up()<assert_stmt>player.volume_level<eq>0.6<block_end><async_keyword><def_stmt>test_volume_down player<block_start>"""Test the volume_down and set volume methods."""<assert_stmt>player.volume_level<eq>0<line_sep><await>player.async_set_volume_level(0.5)<assert_stmt>player.volume_level<eq>0.5<line_sep><await>player.async_volume_down()<assert_stmt>player.volume_level<eq>0.4<block_end><async_keyword><def_stmt>test_media_play_pause player<block_start>"""Test the media_play_pause method."""<assert_stmt>player.state<eq>STATE_OFF<line_sep><await>player.async_media_play_pause()<assert_stmt>player.state<eq>STATE_PLAYING<line_sep><await>player.async_media_play_pause()<assert_stmt>player.state<eq>STATE_PAUSED<block_end><async_keyword><def_stmt>test_turn_on_off player<block_start>"""Test the turn on and turn off methods."""<assert_stmt>player.state<eq>STATE_OFF<line_sep><await>player.async_turn_on()<assert_stmt>player.state<eq>STATE_ON<line_sep><await>player.async_turn_off()<assert_stmt>player.state<eq>STATE_OFF<block_end><async_keyword><def_stmt>test_toggle player<block_start>"""Test the toggle method."""<assert_stmt>player.state<eq>STATE_OFF<line_sep><await>player.async_toggle()<assert_stmt>player.state<eq>STATE_ON<line_sep><await>player.async_toggle()<assert_stmt>player.state<eq>STATE_OFF<block_end> |
# Unless explicitly stated otherwise all files in this repository are licensed under the BSD-3-Clause License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2015-Present Datadog, Inc
<import_from_stmt>datadog.util.format force_to_epoch_seconds<import_from_stmt>datadog.api.resources GetableAPIResource CreateableAPIResource UpdatableAPIResource ListableAPIResource DeletableAPIResource ActionAPIResource <class_stmt>ServiceLevelObjective(GetableAPIResource CreateableAPIResource UpdatableAPIResource ListableAPIResource DeletableAPIResource ActionAPIResource )<block_start>"""
A wrapper around Service Level Objective HTTP API.
"""<line_sep>_resource_name="slo"<line_sep>@classmethod<def_stmt>create cls attach_host_name=<false> method="POST" id=<none> params=<none> **body<block_start>"""
Create a SLO
:returns: created SLO details
"""<line_sep><return>super(ServiceLevelObjective cls).create(attach_host_name=<false> method="POST" id=<none> params=params **body)<block_end>@classmethod<def_stmt>get cls id **params<block_start>"""
Get a specific SLO details.
:param id: SLO id to get details for
:type id: str
:returns: SLO details
"""<line_sep><return>super(ServiceLevelObjective cls).get(id **params)<block_end>@classmethod<def_stmt>get_all cls query=<none> ids=<none> offset=0 limit=100 **params<block_start>"""
Get all SLO details.
:param query: optional search query - syntax in UI && online documentation
:type query: str
:param ids: optional list of SLO ids to get many specific SLOs at once.
:type ids: list(str)
:param offset: offset of results to use (default 0)
:type offset: int
:param limit: limit of results to return (default: 100)
:type limit: int
:returns: SLOs matching the query
"""<line_sep>search_terms={}<if_stmt>query<block_start>search_terms["query"]=query<block_end><if_stmt>ids<block_start>search_terms["ids"]=ids<block_end>search_terms["offset"]=offset<line_sep>search_terms["limit"]=limit<line_sep><return>super(ServiceLevelObjective cls).get_all(**search_terms)<block_end>@classmethod<def_stmt>update cls id params=<none> **body<block_start>"""
Update a specific SLO details.
:param id: SLO id to update details for
:type id: str
:returns: SLO details
"""<line_sep><return>super(ServiceLevelObjective cls).update(id params **body)<block_end>@classmethod<def_stmt>delete cls id **params<block_start>"""
Delete a specific SLO.
:param id: SLO id to delete
:type id: str
:returns: SLO ids removed
"""<line_sep><return>super(ServiceLevelObjective cls).delete(id **params)<block_end>@classmethod<def_stmt>bulk_delete cls ops **params<block_start>"""
Bulk Delete Timeframes from multiple SLOs.
:param ops: a dictionary mapping of SLO ID to timeframes to remove.
:type ops: dict(str, list(str))
:returns: Dictionary representing the API's JSON response
`errors` - errors with operation
`data` - updates and deletions
"""<line_sep><return>super(ServiceLevelObjective cls)._trigger_class_action("POST" "bulk_delete" body=ops params=params suppress_response_errors_on_codes=[200] )<block_end>@classmethod<def_stmt>delete_many cls ids **params<block_start>"""
Delete Multiple SLOs
:param ids: a list of SLO IDs to remove
:type ids: list(str)
:returns: Dictionary representing the API's JSON response see `data` list(slo ids) && `errors`
"""<line_sep><return>super(ServiceLevelObjective cls)._trigger_class_action("DELETE" "" params=params body={"ids":ids} suppress_response_errors_on_codes=[200] )<block_end>@classmethod<def_stmt>can_delete cls ids **params<block_start>"""
Check if the following SLOs can be safely deleted.
This is used to check if SLO has any references to it.
:param ids: a list of SLO IDs to check
:type ids: list(str)
:returns: Dictionary representing the API's JSON response
"data.ok" represents a list of SLO ids that have no known references.
"errors" contains a dictionary of SLO ID to known reference(s).
"""<line_sep>params["ids"]=ids<line_sep><return>super(ServiceLevelObjective cls)._trigger_class_action("GET" "can_delete" params=params body=<none> suppress_response_errors_on_codes=[200] )<block_end>@classmethod<def_stmt>history cls id from_ts to_ts **params<block_start>"""
Get the SLO's history from the given time range.
:param id: SLO ID to query
:type id: str
:param from_ts: `from` timestamp in epoch seconds to query
:type from_ts: int|datetime.datetime
:param to_ts: `to` timestamp in epoch seconds to query, must be > `from_ts`
:type to_ts: int|datetime.datetime
:returns: Dictionary representing the API's JSON response
"data.ok" represents a list of SLO ids that have no known references.
"errors" contains a dictionary of SLO ID to known reference(s).
"""<line_sep>params["id"]=id<line_sep>params["from_ts"]=force_to_epoch_seconds(from_ts)<line_sep>params["to_ts"]=force_to_epoch_seconds(to_ts)<line_sep><return>super(ServiceLevelObjective cls)._trigger_class_action("GET" "history" id=id params=params body=<none> suppress_response_errors_on_codes=[200] )<block_end><block_end> |
"""Dataset class for CleverHans
"""<line_sep># pylint: disable=missing-docstring
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_future_stmt> unicode_literals<import_stmt>array<import_stmt>functools<import_stmt>gzip<import_stmt>operator<import_stmt>os<import_stmt>struct<import_stmt>tempfile<import_stmt>sys<import_stmt>warnings<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<try_stmt><block_start><import_from_stmt>tensorflow.python.keras.utils np_utils<import_from_stmt>tensorflow.keras.datasets cifar10<block_end><except_stmt>ImportError# In tf 1.8, np_utils doesn't seem to be publicly exposed.
# In later tf versions, it is, and in pre-tf keras it was too.
<block_start><import_from_stmt>tensorflow.python.keras _impl<line_sep>np_utils=_impl.keras.utils.np_utils<line_sep># In tf 1.8, "from tensorflow.keras.datasets import cifar10" doesn't work even though the module exists
cifar10=tf.keras.datasets.cifar10<line_sep>warnings.warn("Support for TensorFlow versions prior to 1.12 is deprecated."<concat>" CleverHans using earlier versions may quit working on or after 2019-07-07.")<block_end><import_from_stmt>cleverhans utils<class_stmt>Dataset(object)<block_start>"""Abstract base class representing a dataset."""<line_sep># The number of classes in the dataset. Should be specified by subclasses.
NB_CLASSES=<none><def_stmt>__init__ self kwargs=<none><block_start><if_stmt>kwargs<is><none><block_start>kwargs={}<block_end><if_stmt>"self"<in>kwargs<block_start><del_stmt>kwargs["self"]<block_end>self.kwargs=kwargs<block_end><def_stmt>get_factory self<block_start>"""Returns a picklable callable that recreates the dataset."""<line_sep><return>Factory(type(self) self.kwargs)<block_end><def_stmt>get_set self which_set<block_start>"""Returns the training set or test set as an (x_data, y_data) tuple.
:param which_set: 'train' or 'test'
"""<line_sep><return>(getattr(self "x_"+which_set) getattr(self "y_"+which_set))<block_end><def_stmt>to_tensorflow self<block_start><raise>NotImplementedError()<block_end>@classmethod<def_stmt>in_memory_dataset cls x y shuffle=<none> repeat=<true><block_start><assert_stmt>x.shape[0]<eq>y.shape[0]<line_sep>d=tf.data.Dataset.range(x.shape[0])<if_stmt>repeat<block_start>d=d.repeat()<block_end><if_stmt>shuffle<block_start>d=d.shuffle(shuffle)<block_end><def_stmt>lookup p<block_start><return>x[p] y[p]<block_end>d=d.map(<lambda>i:tf.py_func(lookup [i] [tf.float32]<times>2))<line_sep><return>d<block_end><block_end><class_stmt>MNIST(Dataset)<block_start>"""The MNIST dataset"""<line_sep>NB_CLASSES=10<def_stmt>__init__ self train_start=0 train_end=60000 test_start=0 test_end=10000 center=<false> max_val=1.0 <block_start>kwargs=locals()<if_stmt>"__class__"<in>kwargs<block_start><del_stmt>kwargs["__class__"]<block_end>super(MNIST self).__init__(kwargs)<line_sep>x_train,y_train,x_test,y_test=data_mnist(train_start=train_start train_end=train_end test_start=test_start test_end=test_end )<if_stmt>center<block_start>x_train=x_train<times>2.0-1.0<line_sep>x_test=x_test<times>2.0-1.0<block_end>x_train<augmul>max_val<line_sep>x_test<augmul>max_val<line_sep>self.x_train=x_train.astype("float32")<line_sep>self.y_train=y_train.astype("float32")<line_sep>self.x_test=x_test.astype("float32")<line_sep>self.y_test=y_test.astype("float32")<block_end><def_stmt>to_tensorflow self shuffle=4096<block_start><return>(self.in_memory_dataset(self.x_train self.y_train shuffle) self.in_memory_dataset(self.x_test self.y_test repeat=<false>) )<block_end><block_end><class_stmt>CIFAR10(Dataset)<block_start>"""The CIFAR-10 dataset"""<line_sep>NB_CLASSES=10<line_sep>LABEL_NAMES=["airplane" "automobile" "bird" "cat" "deer" "dog" "frog" "horse" "ship" "truck" ]<def_stmt>__init__ self train_start=0 train_end=60000 test_start=0 test_end=10000 center=<false> max_val=1.0 <block_start>kwargs=locals()<if_stmt>"__class__"<in>kwargs<block_start><del_stmt>kwargs["__class__"]<block_end>super(CIFAR10 self).__init__(kwargs)<line_sep>packed=data_cifar10(train_start=train_start train_end=train_end test_start=test_start test_end=test_end )<line_sep>x_train,y_train,x_test,y_test=packed<if_stmt>center<block_start>x_train=x_train<times>2.0-1.0<line_sep>x_test=x_test<times>2.0-1.0<block_end>x_train<augmul>max_val<line_sep>x_test<augmul>max_val<line_sep>self.x_train=x_train<line_sep>self.y_train=y_train<line_sep>self.x_test=x_test<line_sep>self.y_test=y_test<line_sep>self.max_val=max_val<block_end><def_stmt>to_tensorflow self shuffle=4096# This is much more efficient with data augmentation, see tutorials.
<block_start><return>(self.in_memory_dataset(self.x_train self.y_train shuffle) self.in_memory_dataset(self.x_test self.y_test repeat=<false>) )<block_end><block_end><class_stmt>Factory(object)<block_start>"""
A callable that creates an object of the specified type and configuration.
"""<def_stmt>__init__ self cls kwargs<block_start>self.cls=cls<line_sep>self.kwargs=kwargs<block_end><def_stmt>__call__ self<block_start>"""Returns the created object."""<line_sep><return>self.cls(**self.kwargs)<block_end><block_end><def_stmt>maybe_download_file url datadir=<none> force=<false><block_start><try_stmt><block_start><import_from_stmt>urllib.request urlretrieve<block_end><except_stmt>ImportError<block_start><import_from_stmt>urllib urlretrieve<block_end><if_stmt><not>datadir<block_start>datadir=tempfile.gettempdir()<block_end>file_name=url[url.rfind("/")+1:]<line_sep>dest_file=os.path.join(datadir file_name)<line_sep>isfile=os.path.isfile(dest_file)<if_stmt>force<or><not>isfile<block_start>urlretrieve(url dest_file)<block_end><return>dest_file<block_end><def_stmt>download_and_parse_mnist_file file_name datadir=<none> force=<false><block_start>url=os.path.join('https://storage.googleapis.com/cvdf-datasets/mnist/' file_name)<line_sep>file_name=maybe_download_file(url datadir=datadir force=force)<line_sep># Open the file and unzip it if necessary
<if_stmt>os.path.splitext(file_name)[1]<eq>".gz"<block_start>open_fn=gzip.open<block_end><else_stmt><block_start>open_fn=open<block_end># Parse the file
<with_stmt>open_fn(file_name "rb")<as>file_descriptor<block_start>header=file_descriptor.read(4)<assert_stmt>len(header)<eq>4<line_sep>zeros,data_type,n_dims=struct.unpack(">HBB" header)<assert_stmt>zeros<eq>0<line_sep>hex_to_data_type={0x08:"B" 0x09:"b" 0x0B:"h" 0x0C:"i" 0x0D:"f" 0x0E:"d" }<line_sep>data_type=hex_to_data_type[data_type]<line_sep># data_type unicode to ascii conversion (Python2 fix)
<if_stmt>sys.version_info[0]<l>3<block_start>data_type=data_type.encode("ascii" "ignore")<block_end>dim_sizes=struct.unpack(">"+"I"<times>n_dims file_descriptor.read(4<times>n_dims))<line_sep>data=array.array(data_type file_descriptor.read())<line_sep>data.byteswap()<line_sep>desired_items=functools.reduce(operator.mul dim_sizes)<assert_stmt>len(data)<eq>desired_items<line_sep><return>np.array(data).reshape(dim_sizes)<block_end><block_end><def_stmt>data_mnist datadir=tempfile.gettempdir() train_start=0 train_end=60000 test_start=0 test_end=10000 <block_start>"""
Load and preprocess MNIST dataset
:param datadir: path to folder where data should be stored
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:return: tuple of four arrays containing training data, training labels,
testing data and testing labels.
"""<assert_stmt>isinstance(train_start int)<assert_stmt>isinstance(train_end int)<assert_stmt>isinstance(test_start int)<assert_stmt>isinstance(test_end int)<line_sep>X_train=(download_and_parse_mnist_file("train-images-idx3-ubyte.gz" datadir=datadir)/255.0)<line_sep>Y_train=download_and_parse_mnist_file("train-labels-idx1-ubyte.gz" datadir=datadir)<line_sep>X_test=(download_and_parse_mnist_file("t10k-images-idx3-ubyte.gz" datadir=datadir)/255.0)<line_sep>Y_test=download_and_parse_mnist_file("t10k-labels-idx1-ubyte.gz" datadir=datadir)<line_sep>X_train=np.expand_dims(X_train -1)<line_sep>X_test=np.expand_dims(X_test -1)<line_sep>X_train=X_train[train_start:train_end]<line_sep>Y_train=Y_train[train_start:train_end]<line_sep>X_test=X_test[test_start:test_end]<line_sep>Y_test=Y_test[test_start:test_end]<line_sep>Y_train=utils.to_categorical(Y_train nb_classes=10)<line_sep>Y_test=utils.to_categorical(Y_test nb_classes=10)<line_sep><return>X_train Y_train X_test Y_test<block_end><def_stmt>data_cifar10 train_start=0 train_end=50000 test_start=0 test_end=10000<block_start>"""
Preprocess CIFAR10 dataset
:return:
"""<line_sep># These values are specific to CIFAR10
img_rows=32<line_sep>img_cols=32<line_sep>nb_classes=10<line_sep># the data, shuffled and split between train and test sets
(x_train y_train),(x_test y_test)=cifar10.load_data()<if_stmt>tf.keras.backend.image_data_format()<eq>"channels_first"<block_start>x_train=x_train.reshape(x_train.shape[0] 3 img_rows img_cols)<line_sep>x_test=x_test.reshape(x_test.shape[0] 3 img_rows img_cols)<block_end><else_stmt><block_start>x_train=x_train.reshape(x_train.shape[0] img_rows img_cols 3)<line_sep>x_test=x_test.reshape(x_test.shape[0] img_rows img_cols 3)<block_end>x_train=x_train.astype("float32")<line_sep>x_test=x_test.astype("float32")<line_sep>x_train<augdiv>255<line_sep>x_test<augdiv>255<line_sep>print("x_train shape:" x_train.shape)<line_sep>print(x_train.shape[0] "train samples")<line_sep>print(x_test.shape[0] "test samples")<line_sep># convert class vectors to binary class matrices
y_train=np_utils.to_categorical(y_train nb_classes)<line_sep>y_test=np_utils.to_categorical(y_test nb_classes)<line_sep>x_train=x_train[train_start:train_end : : :]<line_sep>y_train=y_train[train_start:train_end :]<line_sep>x_test=x_test[test_start:test_end :]<line_sep>y_test=y_test[test_start:test_end :]<line_sep><return>x_train y_train x_test y_test<block_end> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.