code
stringlengths 1
5.19M
| package
stringlengths 1
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: zwift_messages.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='zwift_messages.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n\x14zwift_messages.proto\"\xc6\x03\n\x0bPlayerState\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x11\n\tworldTime\x18\x02 \x01(\x03\x12\x10\n\x08\x64istance\x18\x03 \x01(\x05\x12\x10\n\x08roadTime\x18\x04 \x01(\x05\x12\x0c\n\x04laps\x18\x05 \x01(\x05\x12\r\n\x05speed\x18\x06 \x01(\x05\x12\x14\n\x0croadPosition\x18\x08 \x01(\x05\x12\x12\n\ncadenceUHz\x18\t \x01(\x05\x12\x11\n\theartrate\x18\x0b \x01(\x05\x12\r\n\x05power\x18\x0c \x01(\x05\x12\x0f\n\x07heading\x18\r \x01(\x03\x12\x0c\n\x04lean\x18\x0e \x01(\x05\x12\x10\n\x08\x63limbing\x18\x0f \x01(\x05\x12\x0c\n\x04time\x18\x10 \x01(\x05\x12\x0b\n\x03\x66\x31\x39\x18\x13 \x01(\x05\x12\x0b\n\x03\x66\x32\x30\x18\x14 \x01(\x05\x12\x10\n\x08progress\x18\x15 \x01(\x05\x12\x17\n\x0f\x63ustomisationId\x18\x16 \x01(\x03\x12\x14\n\x0cjustWatching\x18\x17 \x01(\x05\x12\x10\n\x08\x63\x61lories\x18\x18 \x01(\x05\x12\t\n\x01x\x18\x19 \x01(\x02\x12\x10\n\x08\x61ltitude\x18\x1a \x01(\x02\x12\t\n\x01y\x18\x1b \x01(\x02\x12\x17\n\x0fwatchingRiderId\x18\x1c \x01(\x05\x12\x0f\n\x07groupId\x18\x1d \x01(\x05\x12\r\n\x05sport\x18\x1f \x01(\x03\"\xd1\x01\n\x0e\x43lientToServer\x12\x11\n\tconnected\x18\x01 \x01(\x05\x12\x10\n\x08rider_id\x18\x02 \x01(\x05\x12\x12\n\nworld_time\x18\x03 \x01(\x03\x12\x1b\n\x05state\x18\x07 \x01(\x0b\x32\x0c.PlayerState\x12\r\n\x05seqno\x18\x04 \x01(\x05\x12\x0c\n\x04tag8\x18\x08 \x01(\x03\x12\x0c\n\x04tag9\x18\t \x01(\x03\x12\x13\n\x0blast_update\x18\n \x01(\x03\x12\r\n\x05tag11\x18\x0b \x01(\x03\x12\x1a\n\x12last_player_update\x18\x0c \x01(\x03\"\xe2\x01\n\rSegmentResult\x12\n\n\x02id\x18\x01 \x01(\x03\x12\x10\n\x08rider_id\x18\x02 \x01(\x03\x12\x19\n\x11\x65vent_subgroup_id\x18\x06 \x01(\x03\x12\x12\n\nfirst_name\x18\x07 \x01(\t\x12\x11\n\tlast_name\x18\x08 \x01(\t\x12\x17\n\x0f\x66inish_time_str\x18\n \x01(\t\x12\x12\n\nelapsed_ms\x18\x0b \x01(\x03\x12\x12\n\npowermeter\x18\x0c \x01(\x05\x12\x0e\n\x06weight\x18\r \x01(\x05\x12\r\n\x05power\x18\x0f \x01(\x05\x12\x11\n\theartrate\x18\x13 \x01(\x05\"z\n\x0eSegmentResults\x12\x10\n\x08world_id\x18\x01 \x01(\x03\x12\x12\n\nsegment_id\x18\x02 \x01(\x03\x12\x19\n\x11\x65vent_subgroup_id\x18\x03 \x01(\x03\x12\'\n\x0fsegment_results\x18\x04 \x03(\x0b\x32\x0e.SegmentResult\"\x11\n\x0fUnknownMessage1\"\x10\n\x0eUnknownMessage\"\xe1\x01\n\x0eServerToClient\x12\x0c\n\x04tag1\x18\x01 \x01(\x05\x12\x10\n\x08rider_id\x18\x02 \x01(\x05\x12\x12\n\nworld_time\x18\x03 \x01(\x03\x12\r\n\x05seqno\x18\x04 \x01(\x05\x12#\n\rplayer_states\x18\x08 \x03(\x0b\x32\x0c.PlayerState\x12\'\n\x0eplayer_updates\x18\t \x03(\x0b\x32\x0f.UnknownMessage\x12\r\n\x05tag11\x18\x0b \x01(\x03\x12\r\n\x05tag17\x18\x11 \x01(\x03\x12\x10\n\x08num_msgs\x18\x12 \x01(\x05\x12\x0e\n\x06msgnum\x18\x13 \x01(\x05\"u\n\x0fWorldAttributes\x12\x10\n\x08world_id\x18\x01 \x01(\x05\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0c\n\x04tag3\x18\x03 \x01(\x03\x12\x0c\n\x04tag5\x18\x04 \x01(\x03\x12\x12\n\nworld_time\x18\x06 \x01(\x03\x12\x12\n\nclock_time\x18\x07 \x01(\x03\"$\n\x0eWorldAttribute\x12\x12\n\nworld_time\x18\x02 \x01(\x03\"\xa9\x01\n\x15\x45ventSubgroupProtobuf\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\r\n\x05rules\x18\x08 \x01(\x05\x12\r\n\x05route\x18\x16 \x01(\x05\x12\x0c\n\x04laps\x18\x19 \x01(\x05\x12\x15\n\rstartLocation\x18\x1d \x01(\x05\x12\r\n\x05label\x18\x1e \x01(\x05\x12\x10\n\x08paceType\x18\x1f \x01(\x05\x12\x12\n\njerseyHash\x18$ \x01(\x05\"\xf1\x01\n\x0fRiderAttributes\x12\n\n\x02\x66\x32\x18\x02 \x01(\x05\x12\n\n\x02\x66\x33\x18\x03 \x01(\x05\x12;\n\x10\x61ttributeMessage\x18\x04 \x01(\x0b\x32!.RiderAttributes.AttributeMessage\x12\x0f\n\x07theirId\x18\n \x01(\x05\x12\x0b\n\x03\x66\x31\x33\x18\r \x01(\x05\x1ak\n\x10\x41ttributeMessage\x12\x0c\n\x04myId\x18\x01 \x01(\x05\x12\x0f\n\x07theirId\x18\x02 \x01(\x05\x12\x11\n\tfirstName\x18\x03 \x01(\t\x12\x10\n\x08lastName\x18\x04 \x01(\t\x12\x13\n\x0b\x63ountryCode\x18\x05 \x01(\x05\"&\n\x08Profiles\x12\x1a\n\x08profiles\x18\x01 \x03(\x0b\x32\x08.Profile\"\x8a\x03\n\x07Profile\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x11\n\tfirstName\x18\x04 \x01(\t\x12\x10\n\x08lastName\x18\x05 \x01(\t\x12\x0c\n\x04male\x18\x06 \x01(\x05\x12\x0e\n\x06weight\x18\t \x01(\x05\x12\x10\n\x08\x62odyType\x18\x0c \x01(\x05\x12\x13\n\x0b\x63ountryCode\x18\" \x01(\x05\x12\x15\n\rtotalDistance\x18# \x01(\x05\x12\x1c\n\x14totalDistanceClimbed\x18$ \x01(\x05\x12\x1a\n\x12totalTimeInMinutes\x18% \x01(\x05\x12\x16\n\x0etotalWattHours\x18) \x01(\x05\x12\x0e\n\x06height\x18* \x01(\x05\x12\x1d\n\x15totalExperiencePoints\x18. \x01(\x05\x12\x18\n\x10\x61\x63hievementLevel\x18\x31 \x01(\x05\x12\x13\n\x0bpowerSource\x18\x34 \x01(\x05\x12\x0b\n\x03\x61ge\x18\x37 \x01(\x05\x12\x1a\n\x12launchedGameClient\x18l \x01(\t\x12\x19\n\x11\x63urrentActivityId\x18m \x01(\x05\x62\x06proto3')
)
_PLAYERSTATE = _descriptor.Descriptor(
name='PlayerState',
full_name='PlayerState',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='PlayerState.id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='worldTime', full_name='PlayerState.worldTime', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='distance', full_name='PlayerState.distance', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='roadTime', full_name='PlayerState.roadTime', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='laps', full_name='PlayerState.laps', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='speed', full_name='PlayerState.speed', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='roadPosition', full_name='PlayerState.roadPosition', index=6,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cadenceUHz', full_name='PlayerState.cadenceUHz', index=7,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='heartrate', full_name='PlayerState.heartrate', index=8,
number=11, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='power', full_name='PlayerState.power', index=9,
number=12, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='heading', full_name='PlayerState.heading', index=10,
number=13, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lean', full_name='PlayerState.lean', index=11,
number=14, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='climbing', full_name='PlayerState.climbing', index=12,
number=15, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='time', full_name='PlayerState.time', index=13,
number=16, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='f19', full_name='PlayerState.f19', index=14,
number=19, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='f20', full_name='PlayerState.f20', index=15,
number=20, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='progress', full_name='PlayerState.progress', index=16,
number=21, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='customisationId', full_name='PlayerState.customisationId', index=17,
number=22, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='justWatching', full_name='PlayerState.justWatching', index=18,
number=23, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='calories', full_name='PlayerState.calories', index=19,
number=24, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='x', full_name='PlayerState.x', index=20,
number=25, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='altitude', full_name='PlayerState.altitude', index=21,
number=26, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='y', full_name='PlayerState.y', index=22,
number=27, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='watchingRiderId', full_name='PlayerState.watchingRiderId', index=23,
number=28, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='groupId', full_name='PlayerState.groupId', index=24,
number=29, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sport', full_name='PlayerState.sport', index=25,
number=31, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=25,
serialized_end=479,
)
_CLIENTTOSERVER = _descriptor.Descriptor(
name='ClientToServer',
full_name='ClientToServer',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='connected', full_name='ClientToServer.connected', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rider_id', full_name='ClientToServer.rider_id', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='world_time', full_name='ClientToServer.world_time', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='state', full_name='ClientToServer.state', index=3,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='seqno', full_name='ClientToServer.seqno', index=4,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tag8', full_name='ClientToServer.tag8', index=5,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tag9', full_name='ClientToServer.tag9', index=6,
number=9, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='last_update', full_name='ClientToServer.last_update', index=7,
number=10, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tag11', full_name='ClientToServer.tag11', index=8,
number=11, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='last_player_update', full_name='ClientToServer.last_player_update', index=9,
number=12, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=482,
serialized_end=691,
)
_SEGMENTRESULT = _descriptor.Descriptor(
name='SegmentResult',
full_name='SegmentResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='SegmentResult.id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rider_id', full_name='SegmentResult.rider_id', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='event_subgroup_id', full_name='SegmentResult.event_subgroup_id', index=2,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='first_name', full_name='SegmentResult.first_name', index=3,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='last_name', full_name='SegmentResult.last_name', index=4,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='finish_time_str', full_name='SegmentResult.finish_time_str', index=5,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='elapsed_ms', full_name='SegmentResult.elapsed_ms', index=6,
number=11, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='powermeter', full_name='SegmentResult.powermeter', index=7,
number=12, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='weight', full_name='SegmentResult.weight', index=8,
number=13, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='power', full_name='SegmentResult.power', index=9,
number=15, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='heartrate', full_name='SegmentResult.heartrate', index=10,
number=19, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=694,
serialized_end=920,
)
_SEGMENTRESULTS = _descriptor.Descriptor(
name='SegmentResults',
full_name='SegmentResults',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='world_id', full_name='SegmentResults.world_id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='segment_id', full_name='SegmentResults.segment_id', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='event_subgroup_id', full_name='SegmentResults.event_subgroup_id', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='segment_results', full_name='SegmentResults.segment_results', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=922,
serialized_end=1044,
)
_UNKNOWNMESSAGE1 = _descriptor.Descriptor(
name='UnknownMessage1',
full_name='UnknownMessage1',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1046,
serialized_end=1063,
)
_UNKNOWNMESSAGE = _descriptor.Descriptor(
name='UnknownMessage',
full_name='UnknownMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1065,
serialized_end=1081,
)
_SERVERTOCLIENT = _descriptor.Descriptor(
name='ServerToClient',
full_name='ServerToClient',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tag1', full_name='ServerToClient.tag1', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rider_id', full_name='ServerToClient.rider_id', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='world_time', full_name='ServerToClient.world_time', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='seqno', full_name='ServerToClient.seqno', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='player_states', full_name='ServerToClient.player_states', index=4,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='player_updates', full_name='ServerToClient.player_updates', index=5,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tag11', full_name='ServerToClient.tag11', index=6,
number=11, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tag17', full_name='ServerToClient.tag17', index=7,
number=17, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_msgs', full_name='ServerToClient.num_msgs', index=8,
number=18, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='msgnum', full_name='ServerToClient.msgnum', index=9,
number=19, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1084,
serialized_end=1309,
)
_WORLDATTRIBUTES = _descriptor.Descriptor(
name='WorldAttributes',
full_name='WorldAttributes',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='world_id', full_name='WorldAttributes.world_id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name', full_name='WorldAttributes.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tag3', full_name='WorldAttributes.tag3', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tag5', full_name='WorldAttributes.tag5', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='world_time', full_name='WorldAttributes.world_time', index=4,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='clock_time', full_name='WorldAttributes.clock_time', index=5,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1311,
serialized_end=1428,
)
_WORLDATTRIBUTE = _descriptor.Descriptor(
name='WorldAttribute',
full_name='WorldAttribute',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='world_time', full_name='WorldAttribute.world_time', index=0,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1430,
serialized_end=1466,
)
_EVENTSUBGROUPPROTOBUF = _descriptor.Descriptor(
name='EventSubgroupProtobuf',
full_name='EventSubgroupProtobuf',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='EventSubgroupProtobuf.id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name', full_name='EventSubgroupProtobuf.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rules', full_name='EventSubgroupProtobuf.rules', index=2,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='route', full_name='EventSubgroupProtobuf.route', index=3,
number=22, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='laps', full_name='EventSubgroupProtobuf.laps', index=4,
number=25, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='startLocation', full_name='EventSubgroupProtobuf.startLocation', index=5,
number=29, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='label', full_name='EventSubgroupProtobuf.label', index=6,
number=30, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='paceType', full_name='EventSubgroupProtobuf.paceType', index=7,
number=31, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='jerseyHash', full_name='EventSubgroupProtobuf.jerseyHash', index=8,
number=36, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1469,
serialized_end=1638,
)
_RIDERATTRIBUTES_ATTRIBUTEMESSAGE = _descriptor.Descriptor(
name='AttributeMessage',
full_name='RiderAttributes.AttributeMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='myId', full_name='RiderAttributes.AttributeMessage.myId', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='theirId', full_name='RiderAttributes.AttributeMessage.theirId', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='firstName', full_name='RiderAttributes.AttributeMessage.firstName', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lastName', full_name='RiderAttributes.AttributeMessage.lastName', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='countryCode', full_name='RiderAttributes.AttributeMessage.countryCode', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1775,
serialized_end=1882,
)
_RIDERATTRIBUTES = _descriptor.Descriptor(
name='RiderAttributes',
full_name='RiderAttributes',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='f2', full_name='RiderAttributes.f2', index=0,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='f3', full_name='RiderAttributes.f3', index=1,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='attributeMessage', full_name='RiderAttributes.attributeMessage', index=2,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='theirId', full_name='RiderAttributes.theirId', index=3,
number=10, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='f13', full_name='RiderAttributes.f13', index=4,
number=13, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_RIDERATTRIBUTES_ATTRIBUTEMESSAGE, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1641,
serialized_end=1882,
)
_PROFILES = _descriptor.Descriptor(
name='Profiles',
full_name='Profiles',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='profiles', full_name='Profiles.profiles', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1884,
serialized_end=1922,
)
_PROFILE = _descriptor.Descriptor(
name='Profile',
full_name='Profile',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='Profile.id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='firstName', full_name='Profile.firstName', index=1,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lastName', full_name='Profile.lastName', index=2,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='male', full_name='Profile.male', index=3,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='weight', full_name='Profile.weight', index=4,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bodyType', full_name='Profile.bodyType', index=5,
number=12, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='countryCode', full_name='Profile.countryCode', index=6,
number=34, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='totalDistance', full_name='Profile.totalDistance', index=7,
number=35, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='totalDistanceClimbed', full_name='Profile.totalDistanceClimbed', index=8,
number=36, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='totalTimeInMinutes', full_name='Profile.totalTimeInMinutes', index=9,
number=37, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='totalWattHours', full_name='Profile.totalWattHours', index=10,
number=41, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='height', full_name='Profile.height', index=11,
number=42, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='totalExperiencePoints', full_name='Profile.totalExperiencePoints', index=12,
number=46, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='achievementLevel', full_name='Profile.achievementLevel', index=13,
number=49, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='powerSource', full_name='Profile.powerSource', index=14,
number=52, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='age', full_name='Profile.age', index=15,
number=55, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='launchedGameClient', full_name='Profile.launchedGameClient', index=16,
number=108, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='currentActivityId', full_name='Profile.currentActivityId', index=17,
number=109, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1925,
serialized_end=2319,
)
_CLIENTTOSERVER.fields_by_name['state'].message_type = _PLAYERSTATE
_SEGMENTRESULTS.fields_by_name['segment_results'].message_type = _SEGMENTRESULT
_SERVERTOCLIENT.fields_by_name['player_states'].message_type = _PLAYERSTATE
_SERVERTOCLIENT.fields_by_name['player_updates'].message_type = _UNKNOWNMESSAGE
_RIDERATTRIBUTES_ATTRIBUTEMESSAGE.containing_type = _RIDERATTRIBUTES
_RIDERATTRIBUTES.fields_by_name['attributeMessage'].message_type = _RIDERATTRIBUTES_ATTRIBUTEMESSAGE
_PROFILES.fields_by_name['profiles'].message_type = _PROFILE
DESCRIPTOR.message_types_by_name['PlayerState'] = _PLAYERSTATE
DESCRIPTOR.message_types_by_name['ClientToServer'] = _CLIENTTOSERVER
DESCRIPTOR.message_types_by_name['SegmentResult'] = _SEGMENTRESULT
DESCRIPTOR.message_types_by_name['SegmentResults'] = _SEGMENTRESULTS
DESCRIPTOR.message_types_by_name['UnknownMessage1'] = _UNKNOWNMESSAGE1
DESCRIPTOR.message_types_by_name['UnknownMessage'] = _UNKNOWNMESSAGE
DESCRIPTOR.message_types_by_name['ServerToClient'] = _SERVERTOCLIENT
DESCRIPTOR.message_types_by_name['WorldAttributes'] = _WORLDATTRIBUTES
DESCRIPTOR.message_types_by_name['WorldAttribute'] = _WORLDATTRIBUTE
DESCRIPTOR.message_types_by_name['EventSubgroupProtobuf'] = _EVENTSUBGROUPPROTOBUF
DESCRIPTOR.message_types_by_name['RiderAttributes'] = _RIDERATTRIBUTES
DESCRIPTOR.message_types_by_name['Profiles'] = _PROFILES
DESCRIPTOR.message_types_by_name['Profile'] = _PROFILE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PlayerState = _reflection.GeneratedProtocolMessageType('PlayerState', (_message.Message,), dict(
DESCRIPTOR = _PLAYERSTATE,
__module__ = 'zwift_messages_pb2'
# @@protoc_insertion_point(class_scope:PlayerState)
))
_sym_db.RegisterMessage(PlayerState)
ClientToServer = _reflection.GeneratedProtocolMessageType('ClientToServer', (_message.Message,), dict(
DESCRIPTOR = _CLIENTTOSERVER,
__module__ = 'zwift_messages_pb2'
# @@protoc_insertion_point(class_scope:ClientToServer)
))
_sym_db.RegisterMessage(ClientToServer)
SegmentResult = _reflection.GeneratedProtocolMessageType('SegmentResult', (_message.Message,), dict(
DESCRIPTOR = _SEGMENTRESULT,
__module__ = 'zwift_messages_pb2'
# @@protoc_insertion_point(class_scope:SegmentResult)
))
_sym_db.RegisterMessage(SegmentResult)
SegmentResults = _reflection.GeneratedProtocolMessageType('SegmentResults', (_message.Message,), dict(
DESCRIPTOR = _SEGMENTRESULTS,
__module__ = 'zwift_messages_pb2'
# @@protoc_insertion_point(class_scope:SegmentResults)
))
_sym_db.RegisterMessage(SegmentResults)
UnknownMessage1 = _reflection.GeneratedProtocolMessageType('UnknownMessage1', (_message.Message,), dict(
DESCRIPTOR = _UNKNOWNMESSAGE1,
__module__ = 'zwift_messages_pb2'
# @@protoc_insertion_point(class_scope:UnknownMessage1)
))
_sym_db.RegisterMessage(UnknownMessage1)
UnknownMessage = _reflection.GeneratedProtocolMessageType('UnknownMessage', (_message.Message,), dict(
DESCRIPTOR = _UNKNOWNMESSAGE,
__module__ = 'zwift_messages_pb2'
# @@protoc_insertion_point(class_scope:UnknownMessage)
))
_sym_db.RegisterMessage(UnknownMessage)
ServerToClient = _reflection.GeneratedProtocolMessageType('ServerToClient', (_message.Message,), dict(
DESCRIPTOR = _SERVERTOCLIENT,
__module__ = 'zwift_messages_pb2'
# @@protoc_insertion_point(class_scope:ServerToClient)
))
_sym_db.RegisterMessage(ServerToClient)
WorldAttributes = _reflection.GeneratedProtocolMessageType('WorldAttributes', (_message.Message,), dict(
DESCRIPTOR = _WORLDATTRIBUTES,
__module__ = 'zwift_messages_pb2'
# @@protoc_insertion_point(class_scope:WorldAttributes)
))
_sym_db.RegisterMessage(WorldAttributes)
WorldAttribute = _reflection.GeneratedProtocolMessageType('WorldAttribute', (_message.Message,), dict(
DESCRIPTOR = _WORLDATTRIBUTE,
__module__ = 'zwift_messages_pb2'
# @@protoc_insertion_point(class_scope:WorldAttribute)
))
_sym_db.RegisterMessage(WorldAttribute)
EventSubgroupProtobuf = _reflection.GeneratedProtocolMessageType('EventSubgroupProtobuf', (_message.Message,), dict(
DESCRIPTOR = _EVENTSUBGROUPPROTOBUF,
__module__ = 'zwift_messages_pb2'
# @@protoc_insertion_point(class_scope:EventSubgroupProtobuf)
))
_sym_db.RegisterMessage(EventSubgroupProtobuf)
RiderAttributes = _reflection.GeneratedProtocolMessageType('RiderAttributes', (_message.Message,), dict(
AttributeMessage = _reflection.GeneratedProtocolMessageType('AttributeMessage', (_message.Message,), dict(
DESCRIPTOR = _RIDERATTRIBUTES_ATTRIBUTEMESSAGE,
__module__ = 'zwift_messages_pb2'
# @@protoc_insertion_point(class_scope:RiderAttributes.AttributeMessage)
))
,
DESCRIPTOR = _RIDERATTRIBUTES,
__module__ = 'zwift_messages_pb2'
# @@protoc_insertion_point(class_scope:RiderAttributes)
))
_sym_db.RegisterMessage(RiderAttributes)
_sym_db.RegisterMessage(RiderAttributes.AttributeMessage)
Profiles = _reflection.GeneratedProtocolMessageType('Profiles', (_message.Message,), dict(
DESCRIPTOR = _PROFILES,
__module__ = 'zwift_messages_pb2'
# @@protoc_insertion_point(class_scope:Profiles)
))
_sym_db.RegisterMessage(Profiles)
Profile = _reflection.GeneratedProtocolMessageType('Profile', (_message.Message,), dict(
DESCRIPTOR = _PROFILE,
__module__ = 'zwift_messages_pb2'
# @@protoc_insertion_point(class_scope:Profile)
))
_sym_db.RegisterMessage(Profile)
# @@protoc_insertion_point(module_scope)
|
zwift-client
|
/zwift-client-0.2.0.tar.gz/zwift-client-0.2.0/zwift/zwift_messages_pb2.py
|
zwift_messages_pb2.py
|
# -*- coding: utf-8 -*-
from . import zwift_messages_pb2
from .request import Request
COURSE_TO_WORLD = {3: 1, 4: 2, 5: 3, 6: 1}
class COURSES:
WATOPIA = 3
RICHMOND = 4
LONDON = 5
class World:
def __init__(self, world_id, get_access_token):
self.world_id = world_id
self.request = Request(get_access_token)
@property
def players(self):
return self.request.json('/relay/worlds/{}'.format(self.world_id))
def player_status(self, player_id):
buffer = self.request.protobuf(
'/relay/worlds/{}/players/{}'.format(self.world_id, player_id))
player_state = zwift_messages_pb2.PlayerState()
player_state.ParseFromString(buffer)
return PlayerStateWrapper(player_state)
class PlayerStateWrapper(object):
"""
Wrap a PlayerState instance to make it more usable.
Fields provided by wrapped player_state:
id, worldTime, distance, roadTime, laps, speed, roadPosition, cadenceUHz,
heartrate, power, heading, lean, climbing, time, f19, f20, progress,
customisationId, justWatching, calories, x, altitude, y, watchingRiderId,
groupId, sport
"""
class TURN_SIGNALS:
RIGHT = 'right'
LEFT = 'left'
STRAIGHT = 'straight'
def __init__(self, player_state):
self.player_state = player_state
@property
def ride_ons(self):
return (self.player_state.f19 >> 24) & 0xfff
@property
def is_turning(self):
return (self.player_state.f19 & 8) != 0
@property
def is_forward(self):
return (self.player_state.f19 & 4) != 0
@property
def course(self):
return (self.player_state.f19 & 0xff0000) >> 16
@property
def world(self):
return COURSE_TO_WORLD[self.course]
@property
def road_id(self):
return (self.player_state.f20 & 0xff00) >> 8
@property
def road_direction(self):
return (self.player_state.f20 & 0xffff000000) >> 24
@property
def turn_signal(self):
signal_code = self.player_state.f20 & 0x70
if signal_code == 0x10:
return self.TURN_SIGNALS.RIGHT
elif signal_code == 0x20:
return self.TURN_SIGNALS.LEFT
elif signal_code == 0x40:
return self.TURN_SIGNALS.STRAIGHT
else:
return None
@property
def power_up(self):
return self.player_state.f20 & 0xf
@property
def has_feather_boost(self):
return self.power_up == 0
@property
def has_draft_boost(self):
return self.power_up == 1
@property
def has_aero_boost(self):
return self.power_up == 5
@property
def cadence(self):
return int((self.player_state.cadenceUHz * 60) / 1000000)
def __getattr__(self, item):
"""
First try to get the requested item from the player_state. When it's
not found, try to get it directly from the wrapper.
"""
try:
return getattr(self.player_state, item)
except AttributeError:
return self.__getattribute__(item)
|
zwift-client
|
/zwift-client-0.2.0.tar.gz/zwift-client-0.2.0/zwift/world.py
|
world.py
|
# -*- coding: utf-8 -*-
"""Top-level package for Zwift Mobile API client."""
try:
from .client import Client
except ImportError:
pass
else:
__all__ = [Client]
__author__ = """Sander Smits"""
__email__ = '[email protected]'
__version__ = '0.2.0'
|
zwift-client
|
/zwift-client-0.2.0.tar.gz/zwift-client-0.2.0/zwift/__init__.py
|
__init__.py
|
# -*- coding: utf-8 -*-
import requests
from .error import RequestException
def download_file(url):
resp = requests.get(url)
if not resp.ok:
raise RequestException("{} - {}".format(
resp.status_code, resp.reason))
return resp.content
class Request:
"""Class for handling requests."""
BASE_URL = 'https://us-or-rly101.zwift.com'
DEFAULT_HEADERS = {
"User-Agent": "Zwift/115 CFNetwork/758.0.2 Darwin/15.0.0"}
def __init__(self, get_access_token):
self.get_access_token = get_access_token
def get_headers(self, accept_type):
headers = {
"Accept": accept_type,
"Authorization": "Bearer " + self.get_access_token()
}
headers.update(self.DEFAULT_HEADERS)
return headers
def json(self, url):
headers = self.get_headers(accept_type='application/json')
resp = requests.get(self.BASE_URL + url, headers=headers)
if not resp.ok:
raise RequestException("{} - {}".format(
resp.status_code, resp.reason))
return resp.json()
def protobuf(self, url):
headers = self.get_headers(accept_type='application/x-protobuf-lite')
resp = requests.get(self.BASE_URL + url, headers=headers)
if not resp.ok:
raise RequestException("{} - {}".format(
resp.status_code, resp.reason))
return resp.content
|
zwift-client
|
/zwift-client-0.2.0.tar.gz/zwift-client-0.2.0/zwift/request.py
|
request.py
|
# -*- coding: utf-8 -*-
from .request import Request
class Profile:
def __init__(self, player_id, get_access_token):
self.player_id = player_id
self.request = Request(get_access_token)
@property
def profile(self):
return self.request.json('/api/profiles/{}'.format(self.player_id))
def check_player_id(self):
"""Most /api/profiles endpoints require the real player id."""
if self.player_id == 'me':
profile = self.profile
self.player_id = profile['id']
@property
def followers(self):
self.check_player_id()
return self.request.json(
'/api/profiles/{}/followers'.format(self.player_id))
@property
def followees(self):
self.check_player_id()
return self.request.json(
'/api/profiles/{}/followees'.format(self.player_id))
def get_activities(self, start=0, limit=10):
self.check_player_id()
return self.request.json(
'/api/profiles/{}/activities?start={}&limit={}'.format(
self.player_id, start, limit))
@property
def latest_activity(self):
activities = self.get_activities(0, 1)
return activities[0] if len(activities) == 1 else None
|
zwift-client
|
/zwift-client-0.2.0.tar.gz/zwift-client-0.2.0/zwift/profile.py
|
profile.py
|
import xml.etree.ElementTree as ET
def parse_cadence(row: str) -> int:
"""Parses cadence value string 'Xrpm' and returns X as int"""
keyword = 'rpm'
if keyword not in row: return -1, row
if ',' in row: keyword += ','
cadence, rest = row.split(keyword)
if '/' in cadence: cadence = sum([int(c) for c in cadence.split('/')])/2
return int(cadence), rest
def parse_power(row: str) -> int:
"""Parses power value string 'X%' or 'XW' and returns X as int"""
power = row
if '%' in power: power, _ = power.split('%')
if 'W' in power: power, _ = power.split('W')
return float(power)/100
def parse_duration(row: str) -> int:
"""Parses duration value string 'Xhr', 'Ymin' or 'Zsec' and returns (X::Y::Z) as seconds"""
import re
def filter_digits(s): return "".join(re.findall('\d+', s))
seconds = 0
if 'hr' in row:
hr, row = row.split('hr')
seconds += int(filter_digits(hr)) * 3600
if 'min' in row:
min, row = row.split('min')
seconds += int(filter_digits(min)) * 60
if 'sec' in row:
sec, _ = row.split('sec')
seconds += int(filter_digits(sec))
return seconds
class ZSteadyState:
def __init__(self, row: str) -> None:
duration, row = [r.strip() for r in row.split('@')]
duration = parse_duration(duration)
cadence, row = parse_cadence(row)
self.duration = duration
self.power = parse_power(row)
self.cadence = cadence
def __repr__(self) -> str:
return f'SteadyState (duration: {self.duration} power: {self.power} cadence: {self.cadence}'
def to_xml(self, root: ET.Element) -> ET.Element:
"""Creates an XML element from the steady state interval data
Params
root : ET.Element
Root of the created steady state interval XML element
"""
interval = ET.SubElement(root, 'SteadyState')
interval.set('Duration', str(self.duration))
interval.set('Power', str(self.power))
if self.cadence > 0: interval.set('Cadence', str(self.cadence))
return interval
class ZRangedInterval():
def __init__(self, row: str) -> None:
duration, row = row.split('from')
cadence = -1
if '@' in duration:
duration, cadence = duration.split('@')
cadence, _ = parse_cadence(cadence)
duration = parse_duration(duration)
from_power, to_power = [parse_power(p) for p in row.split('to')]
self.duration = duration
self.from_power = from_power
self.to_power = to_power
self.cadence = cadence
self.name = "Warmup" if from_power < to_power else "Cooldown"
def __repr__(self) -> str:
return f'{self.name} (duration: {self.duration} from_power: {self.from_power} to_power: {self.to_power} cadence: {self.cadence})'
def to_xml(self, root: ET.Element) -> ET.Element:
"""Creates an XML element from the ranged interval interval data
Params
root : ET.Element
Root of the created free ranged interval XML element
"""
interval = ET.SubElement(root, self.name)
interval.set('Duration', str(self.duration))
interval.set('PowerLow', str(self.from_power))
interval.set("PowerHigh", str(self.to_power))
if self.cadence > 0: interval.set('Cadence', str(self.cadence))
class ZIntervalsT():
def __init__(self, row: str):
number, rest = row.split('x')
rest = rest.replace("rpm,", 'rpm')
first_interval, second_interval = [ZSteadyState(r) for r in rest.split(',')]
self.number = number
self.first_interval = first_interval
self.second_interval = second_interval
def __repr__(self) -> str:
return f'IntervalT ({self.number} x {self.first_interval}, {self.second_interval})'
def to_xml(self, root: ET.Element) -> ET.Element:
"""Creates an XML element from the intervals data
Params
root : ET.Element
Root of the created free ride intervals XML element
"""
interval = ET.SubElement(root, 'IntervalsT')
interval.set('Repeat', str(self.number))
interval.set('OnDuration', str(self.first_interval.duration))
interval.set('OffDuration', str(self.second_interval.duration))
interval.set("OnPower", str(self.first_interval.power))
interval.set('OffPower', str(self.second_interval.power))
interval.set('Cadence', str(self.first_interval.cadence))
interval.set("CadenceResting", str(self.second_interval.cadence))
class ZFreeRide():
def __init__(self, row: str):
duration, _ = row.split('free ride')
cadence = -1
if '@' in duration:
duration, cadence = duration.split("@")
cadence = parse_cadence(cadence)
duration = parse_duration(duration)
self.duration = duration
self.cadence = cadence
self.flat_road = 1
def __repr__(self) -> str:
return f'ZFreeRide (duration: {self.duration} cadence: {self.cadence})'
def to_xml(self, root: ET.Element) -> ET.Element:
"""Creates an XML element from the free ride interval data
Params
root : ET.Element
Root of the created free ride interval XML element
"""
interval = ET.SubElement(root, 'FreeRide')
interval.set('Duration', str(self.duration))
interval.set('FlatRoad', str(self.flat_road))
if self.cadence > 0: interval.set("Cadence", str(self.cadence))
pass
|
zwift-workouts-parser
|
/zwift_workouts_parser-1.0.0-py3-none-any.whl/zwift_parser/zwift_intervals.py
|
zwift_intervals.py
|
from zwift_workout import ZWorkout, ZWorkoutParseMode
class Parser:
"""
A class is used to parse any **bike** zwift workout, presented on the
www.whatsonzwift.com
"""
def __init__(self, export_dir, urls, skip: bool = False, replace: bool = False) -> None:
"""
Parameters
----------
export_dir : str
The folder that is used to save all the parsed workouts.
urls : List[str]
A list of urls that need to be parsed, can be either a
direct link to a single workout, plan or a page which
contains multiple plans/workouts.
skip : Bool
Should the workouts that can be downloaded be skipped by
the parser?
replace : Bool
Should the workouts that can be downloaded be replaced with
the files uploaded to the site?
"""
self.export_dir = export_dir
self.skip = skip
self.replace = replace
for i, url in enumerate(urls):
print(f'Parsing url {url} ({i+1}/{len(urls)})')
parsed = self.__try_parse(url)
if not parsed:
print(f"Couldn't find a parser for {url} hence skipping it.")
continue
def __try_parse(self, url):
parsed = self.__try_parse_plans(url);
if not parsed:
parsed = self.__try_parse_workout(url);
return parsed
def __try_parse_plans(self, url):
plans_data = Parser.__get_web_content(url, 'div', 'card')
if not plans_data: return False;
any_parsed = False
for i, plan_data in enumerate(plans_data):
card_sports = plan_data.find('div', class_='card-sports')
if not card_sports: continue
card_classes = card_sports.i['class']
valid = ZWorkout.is_valid_sport_type(card_classes)
url = plan_data.find('a', class_='button')['href']
if not valid:
print(f"Couldn't parse {url} because some of the {card_classes} sports are not suppored yet")
continue
print(f"Parsing plan ({i+1}/{len(plans_data)})")
self.__try_parse_workout(url)
any_parsed = True
return any_parsed
def __try_parse_workout(self, url):
workouts_data = Parser.__get_web_content(url, 'article', 'workout')
if not workouts_data:
print(f"Couldn't get workout data by {url} for unknown reason.")
return False
for i, workout_data in enumerate(workouts_data):
print(f"- Parsing workout ({i+1}/{len(workouts_data)})")
mode = ZWorkoutParseMode.DEFAULT
if self.skip: mode = ZWorkoutParseMode.SKIP
elif self.replace: mode = ZWorkoutParseMode.REPLACE
ZWorkout(workout_data, mode).save(self.export_dir)
return True
def __get_web_content(url, tag, tag_class):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36 Vivaldi/4.3',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'
}
import ssl, certifi, urllib.request
req = urllib.request.Request(url, headers=headers)
context = ssl.create_default_context(cafile=certifi.where())
response = urllib.request.urlopen(req, context=context)
content = response.read().decode('utf-8')
from bs4 import BeautifulSoup
soup = BeautifulSoup(content, features='html.parser')
return soup.find_all(tag, class_ = tag_class)
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Parses Zwift workouts from www.whatsonzwift.com")
parser.add_argument('--skip', action='store_true', help='skips workouts which can be downloaded from the site')
parser.add_argument('--replace', action='store_false', help='replaces workouts which can be downloaded from the site with their uploaded files')
parser.add_argument('urls', metavar='URLs', type=str, nargs="+", help="an URL of the workout to parse")
parser.add_argument('-ed', '--export_dir', nargs="?", default='export', help="output directory of the parsed workouts")
args = parser.parse_args()
if args.urls: Parser(**vars(args))
|
zwift-workouts-parser
|
/zwift_workouts_parser-1.0.0-py3-none-any.whl/zwift_parser/zwift_parser.py
|
zwift_parser.py
|
from typing import List
import xml.etree.ElementTree as ET
from bs4 import BeautifulSoup, element
from zwift_intervals import *
from enum import Enum
class ZWorkoutParseMode(Enum):
DEFAULT = 1
SKIP = 2
REPLACE = 3
class ZWorkout():
def is_valid_sport_type(class_values: List[str]):
"""Checks if workout's sport type is supported by the parser
At the moment the parser supports only the bike workouts,
so this function checks if there is a bike type in the sport
types.
Parameters
----------
class_values : List[str]
A list of the class values on the workout's html page
"""
return len([s for s in class_values if 'bike' in s]) > 0
def parse_interval(raw_str: str):
"""Returns an interval based on some specific format of input raw interval string
Return
------
ZFreeRide - If the raw interval string contains a 'free ride' sub-string in it
for example '10 min free ride'
ZRangedInterval - If the raw interval string contains a 'from','to' pair of the sub-strings
for example '1 min from 50 to 90% FTP'
ZIntervalT - If the raw interval string contains a 'x' symbol (meaning times)
for example '10x 3min @ 100% FTP, 1 min @ 55% FTP'
ZSteadyState - Otherwise
for example '3min @ 100rpm, 95% FTP'
"""
if 'free ride' in raw_str: return ZFreeRide(raw_str) #10min free ride
if 'from' in raw_str and 'to' in raw_str: return ZRangedInterval(raw_str) #1min from 50 to 90% FTP
if 'x' in raw_str: return ZIntervalsT(raw_str) #10x 3min @ 100% FTP, 1min @ 55% FTP
return ZSteadyState(raw_str) #3min @ 100rpmm, 95% FTP
def __init__(self, article: element.Tag, mode: ZWorkoutParseMode = ZWorkoutParseMode.DEFAULT) -> None:
self.path, self.filename = (None, None)
self.mode = mode
breadcrumbs = article.select_one('div.breadcrumbs')
sport_type = breadcrumbs.find('h4')['class']
self.valid = ZWorkout.is_valid_sport_type(sport_type)
if not self.valid: return
try:
breadcrumbs = [item.string.strip() for item in breadcrumbs]
except Exception as e:
#Sometimes if @ is contained in the breadcrumbs, it might be obfuscated with Cloudflare, so
# it's not really possible to deobfuscate it back. This is why we just ignore it.
self.valid = False
return
breadcrumbs = [slugify(b) for b in breadcrumbs if len(b) > 0 and b != '»' and b != 'Workouts']
self.filename = breadcrumbs.pop(-1)
self.path = '/'.join(breadcrumbs)
self.intervals = []
download_button = [a for a in article.find_all('a') if a.string and 'Download workout' in a.string]
self.download_link = download_button[0]['href'] if download_button and self.mode is not ZWorkoutParseMode.DEFAULT else None
if not self.download_link:
def convert_to_string(data):
output = []
if isinstance(data, element.NavigableString): return data.string
for content in data.contents:
if isinstance(content, str): output.append(content)
else: output.extend([convert_to_string(c) for c in content.contents])
return "".join(output)
data = article.select_one('div.one-third.column.workoutlist')
for div in data.find_all('div'):
interval = "".join([convert_to_string(c) for c in div.contents])
self.intervals.append(ZWorkout.parse_interval(interval))
overview = article.select_one('div.overview')
self.author = 'Zwift Workouts Parser'
self.description = overview.next_sibling
if 'Author:' in overview.next_sibling.get_text():
self.author = overview.next_sibling
self.description = self.author.next_sibling
if not isinstance(self.author, str) and 'Author:' in self.author.get_text():
_, self.author = self.author.get_text().split('Author:')
self.description = self.description.get_text("\n")
self.name = 'Zwift Workout'
self.sport_type = 'bike'
self.lookup = {
'author': self.author,
'name': self.name,
'description': self.description,
'sport_type': self.sport_type,
}
def save(self, export_dir: str):
"""Saves workout to a specific folder
Params
------
export_dir : str
Folder to save the workout
"""
if not self.valid: return
workout_fullname = f"{self.path}/{self.filename}"
text = ""
if not self.download_link:
data = self.to_xml()
import xml.etree.ElementTree as ET
text = ET.tostring(data)
xml_header = b'<?xml version="1.0" encoding="utf-8"?>'
text = BeautifulSoup(text, 'xml').prettify().encode('utf-8')
text = text.replace(xml_header, b'').strip()
elif self.mode is ZWorkoutParseMode.REPLACE:
import requests
url = f"https://whatsonzwift.com{self.download_link}"
text = requests.get(url, allow_redirects=True).content
else:
print(f"-- Skipped workout {workout_fullname}")
return
directory = f"{export_dir}/{self.path}"
from os import path, makedirs
if not path.isdir(directory): makedirs(directory)
with open(f"{directory}/{slugify(self.filename, True)}.zwo", 'wb') as f:
f.write(text)
file_version = "Original" if self.download_link else "Parsed"
print(f"-- Parsed workout {workout_fullname} ({file_version})")
def to_xml(self, root : ET.Element = None) -> ET.Element:
"""Creates an XML element from the workout data
Params
root : ET.Element
Root of the created workout XML element
"""
root = ET.Element('workout_file')
for k,v in self.lookup.items():
ET.SubElement(root, k).text = v
tags = ET.SubElement(root, 'tags')
for t in tags:
tag = ET.SubElement(tags, 'tag')
tag.set('name', t)
workout = ET.SubElement(root, 'workout')
for i in self.intervals: i.to_xml(workout)
return root
import unicodedata
import re
def slugify(value, allow_unicode=False):
"""
Taken from https://github.com/django/django/blob/master/django/utils/text.py
Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated
dashes to single dashes. Remove characters that aren't alphanumerics,
underscores, or hyphens. Convert to lowercase. Also strip leading and
trailing whitespace, dashes, and underscores.
"""
value = str(value)
if allow_unicode:
value = unicodedata.normalize('NFKC', value)
else:
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s-]', '', value)
return re.sub(r'[-\s]+', '-', value).strip('-_')
|
zwift-workouts-parser
|
/zwift_workouts_parser-1.0.0-py3-none-any.whl/zwift_parser/zwift_workout.py
|
zwift_workout.py
|
__version__ = '0.1.0'
def zwixel():
def rgb(fg = [255, 255, 255], bg = None):
try: return f"\033[38;2;{fg[0]};{fg[1]};{fg[2]};48;2;{bg[0]};{bg[1]};{bg[2]}m"
except: return f"\033[38;2;{fg[0]};{fg[1]};{fg[2]}m"
boards = [[" " for i in range(20)] for i in range(20)]
reset = "\033[0m"
black = rgb(bg = [0, 0, 0]) + " " + reset
lpp = [[0, 0], [1, 0], [3, 0], [4, 0], [9, 0], [11, 0], [16, 0], [2, 1], [3, 1], [8, 1], [10, 1], [13, 1], [15, 1], [16, 1], [17, 1], [2, 2], [14, 2], [16, 2], [17, 2], [18, 2], [1, 3], [16, 3], [17, 3], [2, 5], [0, 10], [1, 10], [0, 11], [19, 10], [1, 13], [0, 14], [0, 15], [1, 15], [0, 16], [1, 16], [2, 16], [4, 17], [6, 17], [7, 17], [8, 17], [9, 17], [16, 17], [6, 18], [7, 18], [6, 19]]
lp = rgb(bg = [156, 36, 176]) + " " + reset
for i in lpp: boards[i[1]][i[0]] = lp
dpp = [[5, 0], [6, 0], [13, 0], [18, 0], [19, 1], [19, 2], [0, 3], [18, 6], [0, 7], [2, 8], [2, 9], [0, 18], [1, 18], [8, 18], [9, 18], [13, 18], [2, 19], [3, 19], [4, 19], [18, 19], [19, 19]]
dp = rgb(bg = [101, 25, 114]) + " " + reset
for i in dpp: boards[i[1]][i[0]] = dp
bp = [[2, 0], [10, 0], [1, 1], [4, 1], [6, 1], [9, 1], [12, 1], [14, 1], [3, 2], [4, 2], [5, 2], [12, 2], [13, 2], [15, 2], [2, 3], [3, 3], [15, 3], [3, 4], [17, 4], [0, 5], [2, 6], [19, 6], [18, 7], [1, 8], [19, 8], [0, 9], [2, 10], [1, 11], [2, 11], [18, 11], [19, 12], [18, 13], [1, 14], [19, 14], [19, 15], [19, 16], [1, 17], [5, 17], [10, 17], [11, 17], [12, 17], [14, 17], [18, 17], [3, 18], [4, 18], [5, 18], [14, 18], [15, 18], [16, 18], [17, 18], [18, 18], [7, 19], [8, 19]]
b = rgb(bg = [63, 81, 181]) + " " + reset
for i in bp: boards[i[1]][i[0]] = b
bp = [[0, 2], [1, 2], [18, 3], [19, 3], [0, 4], [1, 4], [2, 4], [18, 4], [19, 4], [1, 5], [18, 5], [19, 5], [0, 6], [1, 6], [1, 7], [2, 7], [19, 7], [0, 8], [1, 9], [19, 9], [19, 11], [0, 12], [1, 12], [18, 12], [0, 13], [19, 13], [7, 7], [8, 7], [7, 8], [8, 8], [12, 8], [13, 8], [12, 9], [13, 9], [7, 11], [8, 11], [9, 11], [9, 12], [10, 12], [11, 12], [12, 12], [13, 12]]
for i in bp: boards[i[1]][i[0]] = black
wp = [[6, 2], [7, 2], [8, 2], [9, 2], [10, 2], [11, 2], [4, 3], [5, 3], [11, 3], [12, 3], [13, 3], [14, 3], [4, 4], [14, 4], [15, 4], [16, 4], [3, 5], [4, 5], [16, 5], [17, 5], [3, 6], [17, 6], [3, 7], [17, 7], [3, 8], [17, 8], [18, 8], [3, 9], [18, 9], [3, 10], [18, 10], [3, 11], [17, 11], [2, 12], [17, 12], [2, 13], [17, 13], [2, 14], [17, 14], [18, 14], [2, 15], [18, 15], [3, 16], [18, 16], [4, 16], [5, 16], [6, 16], [7, 16], [8, 16], [9, 16], [10, 16], [11, 16], [12, 16], [13, 16], [14, 16], [15, 16], [16, 16], [17, 16]]
w = rgb(bg = [255, 255, 255]) + " " + reset
for i in wp: boards[i[1]][i[0]] = w
for i, v in enumerate(boards[0]):
if v == " ": boards[0][i] = black
for i, v in enumerate(boards[1]):
if v == " ": boards[1][i] = black
for i, v in enumerate(boards[17]):
if v == " ": boards[17][i] = black
for i, v in enumerate(boards[18]):
if v == " ": boards[18][i] = black
for i, v in enumerate(boards[19]):
if v == " ": boards[19][i] = black
for i, v in enumerate(boards):
for j, k in enumerate(v):
if boards[i][j] == " ": boards[i][j] = rgb(bg = [255, 235, 59]) + " " + reset
return boards
|
zwixel-pwack
|
/zwixel_pwack-0.1.0-py3-none-any.whl/zwixel_pwack/__init__.py
|
__init__.py
|
print("zwjhello")
|
zwjhello
|
/zwjhello-0.1.0.tar.gz/zwjhello-0.1.0/zwjhello.py
|
zwjhello.py
|
A JIRA workflow automation toolset for personal use.
|
zwjira
|
/zwjira-0.1.1.tar.gz/zwjira-0.1.1/README.md
|
README.md
|
# -*- coding: utf-8 -*-
from setuptools import setup
packages = \
['zwjira']
package_data = \
{'': ['*']}
install_requires = \
['atlassian-python-api>=3.31.1,<4.0.0', 'jira>=3.4.1,<4.0.0']
entry_points = \
{'console_scripts': ['zwjira = zwjira.cli:main']}
setup_kwargs = {
'name': 'zwjira',
'version': '0.1.1',
'description': 'A JIRA workflow automation toolset for personal use',
'long_description': 'A JIRA workflow automation toolset for personal use.\n',
'author': 'neothenil',
'author_email': '[email protected]',
'maintainer': None,
'maintainer_email': None,
'url': 'https://github.com/neothenil/zwjira',
'packages': packages,
'package_data': package_data,
'install_requires': install_requires,
'entry_points': entry_points,
'python_requires': '>=3.10,<4.0',
}
setup(**setup_kwargs)
|
zwjira
|
/zwjira-0.1.1.tar.gz/zwjira-0.1.1/setup.py
|
setup.py
|
from distutils.core import setup
setup(
name='zwkMathTest', # 对外我们模块的名字
version='1.0', # 版本号
description='这是第一个对外发布的模块,测试哦', #描述
author='zwk', # 作者
author_email='[email protected]', py_modules=['zwkMathTest.test1','zwkMathTest.test2'] # 要发布的模块
)
|
zwkMathTest
|
/zwkMathTest-1.0.tar.gz/zwkMathTest-1.0/setup.py
|
setup.py
|
#coding=utf-8
from distutils.core import setup
setup(
name='zwkPython', # 对外我们模块的名字
version='1.0', # 版本号
description='a test for create my own module', #描述
author='zwk', # 作者
author_email='[email protected]', py_modules=['zwkPython.test'] # 要发布的模块
)
|
zwkPython
|
/zwkPython-1.0.tar.gz/zwkPython-1.0/setup.py
|
setup.py
|
import tushare as ts
import pandas as pd
import kit.date_kit as date_kit
def get_daily_stock(pro,ts_code,start_date,end_date,limit,offset,num:int = 0):
start_date = date_kit.to_yyyymmdd(start_date)
end_date = date_kit.to_yyyymmdd(end_date)
temp = pro.daily(ts_code=ts_code, limit=limit, offset=offset * limit, start_date=start_date, end_date=end_date)
if temp.empty:
if num > 5:
print("已经重试了5次,还是没有数据",ts_code)
return pd.DataFrame()
return get_daily_stock(pro,ts_code, start_date,end_date ,limit, offset, num + 1)
else:
return temp
def get_daily(pro,symbol,start,end,limit,offset):
temp = get_daily_stock(pro,symbol,start,end,limit,offset,0)
return temp
def get_daily_adj_factor(pro,ts_code,start_date,end_date,limit,offset,num:int = 0):
start_date = date_kit.to_yyyymmdd(start_date)
end_date = date_kit.to_yyyymmdd(end_date)
temp = pro.adj_factor(ts_code=ts_code, limit=limit, offset=offset * limit, start_date=start_date, end_date=end_date)
if temp.empty:
if num > 5:
print("已经重试了5次,还是没有数据",ts_code)
return pd.DataFrame()
return get_daily_adj_factor(pro,ts_code, start_date,end_date ,limit, offset, num + 1)
else:
return temp
def daily_adj_factor(symbol,start,end,limit,offset):
temp = get_daily_adj_factor(symbol,start,end,limit,offset,0)
return temp
|
zwkit
|
/zwkit-0.4.34-py3-none-any.whl/tushare_help/tushare_helpkit.py
|
tushare_helpkit.py
|
def winsorize_std(series, n=3):
mean, std = series.mean(), series.std()
return series.clip(mean - std * n, mean + std * n)
def winsorize_mad(series, n=3):
median, mad = series.median(), series.mad()
return series.clip(median - mad * n, median + mad * n)
def standardize(series):
return (series - series.mean()) / series.std()
def neutralize(series):
# 待补充
return series
|
zwkit
|
/zwkit-0.4.34-py3-none-any.whl/factor/utils.py
|
utils.py
|
import talib
def indicator(df):
df['ma5'] = talib.MA(df['close'], timeperiod=5)
df['ma10'] = talib.MA(df['close'], timeperiod=10)
df['ma20'] = talib.MA(df['close'], timeperiod=20)
df['macd'], df['macdsignal'], df['macdhist'] = talib.MACD(df['close'], fastperiod=12, slowperiod=26, signalperiod=9)
df['rsi'] = talib.RSI(df['close'], timeperiod=14)
df['cci'] = talib.CCI(df['high'], df['low'], df['close'], timeperiod=14)
df['dx'] = talib.DX(df['high'], df['low'], df['close'], timeperiod=14)
df['slowk'], df['slowd'] = talib.STOCH(df['high'], df['low'], df['close'], fastk_period=9, slowk_period=3, slowk_matype=0, slowd_period=3, slowd_matype=0)
df['fastk'], df['fastd'] = talib.STOCHF(df['high'], df['low'], df['close'], fastk_period=9, fastd_period=3, fastd_matype=0)
df['willr'] = talib.WILLR(df['high'], df['low'], df['close'], timeperiod=14)
df['mom'] = talib.MOM(df['close'], timeperiod=10)
df['roc'] = talib.ROC(df['close'], timeperiod=10)
df['trix'] = talib.TRIX(df['close'], timeperiod=30)
df['aroonup'], df['aroondown'] = talib.AROON(df['high'], df['low'], timeperiod=14)
df['aroonosc'] = talib.AROONOSC(df['high'], df['low'], timeperiod=14)
df['bop'] = talib.BOP(df['open'], df['high'], df['low'], df['close'])
df['midpoint'] = talib.MIDPOINT(df['close'], timeperiod=14)
df['midprice'] = talib.MIDPRICE(df['high'], df['low'], timeperiod=14)
df['sar'] = talib.SAR(df['high'], df['low'], acceleration=0, maximum=0)
df['trange'] = talib.TRANGE(df['high'], df['low'], df['close'])
df['atr'] = talib.ATR(df['high'], df['low'], df['close'], timeperiod=14)
df['natr'] = talib.NATR(df['high'], df['low'], df['close'], timeperiod=14)
df['adx'] = talib.ADX(df['high'], df['low'], df['close'], timeperiod=14)
df['adxr'] = talib.ADXR(df['high'], df['low'], df['close'], timeperiod=14)
df['apo'] = talib.APO(df['close'], fastperiod=12, slowperiod=26, matype=0)
df['ppo'] = talib.PPO(df['close'], fastperiod=12, slowperiod=26, matype=0)
df['mfi'] = talib.MFI(df['high'], df['low'], df['close'], df['vol'], timeperiod=14)
df['minus_di'] = talib.MINUS_DI(df['high'], df['low'], df['close'], timeperiod=14)
df['minus_dm'] = talib.MINUS_DM(df['high'], df['low'], timeperiod=14)
df['plus_di'] = talib.PLUS_DI(df['high'], df['low'], df['close'], timeperiod=14)
df['plus_dm'] = talib.PLUS_DM(df['high'], df['low'], timeperiod=14)
df['rocp'] = talib.ROCP(df['close'], timeperiod=10)
df['rocr'] = talib.ROCR(df['close'], timeperiod=10)
df['rocr100'] = talib.ROCR100(df['close'], timeperiod=10)
df['ultosc'] = talib.ULTOSC(df['high'], df['low'], df['close'], timeperiod1=7, timeperiod2=14, timeperiod3=28)
df['ema5'] = talib.EMA(df['close'], timeperiod=5)
df['ema10'] = talib.EMA(df['close'], timeperiod=10)
df['ema20'] = talib.EMA(df['close'], timeperiod=20)
df['dema5'] = talib.DEMA(df['close'], timeperiod=5)
df['dema10'] = talib.DEMA(df['close'], timeperiod=10)
df['dema20'] = talib.DEMA(df['close'], timeperiod=20)
df['tema5'] = talib.TEMA(df['close'], timeperiod=5)
df['tema10'] = talib.TEMA(df['close'], timeperiod=10)
df['tema20'] = talib.TEMA(df['close'], timeperiod=20)
df['trima5'] = talib.TRIMA(df['close'], timeperiod=5)
df['trima10'] = talib.TRIMA(df['close'], timeperiod=10)
df['trima20'] = talib.TRIMA(df['close'], timeperiod=20)
df['wma5'] = talib.WMA(df['close'], timeperiod=5)
df['wma10'] = talib.WMA(df['close'], timeperiod=10)
df['wma20'] = talib.WMA(df['close'], timeperiod=20)
df['kama5'] = talib.KAMA(df['close'], timeperiod=5)
df['kama10'] = talib.KAMA(df['close'], timeperiod=10)
df['kama20'] = talib.KAMA(df['close'], timeperiod=20)
df['obv'] = talib.OBV(df['close'], df['vol'])
|
zwkit
|
/zwkit-0.4.34-py3-none-any.whl/indicator/base.py
|
base.py
|
import platform
# 判断当前系统是mac还是windows
def get_system():
"""
判断当前系统是mac还是windows
:return:
"""
system = platform.system()
if system == 'Windows':
return 'Windows'
elif system == 'Darwin':
return 'Mac'
else:
return 'Linux'
def get_system_dir(cloud:str,pj:str):
platform = get_system()
if platform == 'Windows':
return f'//{cloud}//file//{pj}//'
elif platform == 'Mac':
return f'/Volumes/file/{pj}/'
else:
return f'/mnt/{cloud}/file/{pj}/'
def get_system_data_dir(cloud:str,pj:str,data:str):
platform = get_system()
if platform == 'Windows':
return f'//{cloud}//file//{pj}//{data}//'
elif platform == 'Mac':
return f'/Volumes/file/{pj}/{data}/'
else:
return f'/mnt/{cloud}/file/{pj}/{data}/'
|
zwkit
|
/zwkit-0.4.34-py3-none-any.whl/zwhy/zw.py
|
zw.py
|
import pandas as pd
import re
def get_before_date(date, days=60):
"""
取得指定日期前60天的日期
:param date:
:param days:
:return:
"""
date = pd.to_datetime(date)
date = date - pd.Timedelta(days=days)
return date.strftime("%Y-%m-%d")
def now(type="%Y%m%d%H%M%S"):
"""
获取当前时间
:return:
"""
return pd.to_datetime("now").strftime(type)
#调用is_yyyymmdd和is_yyyy_mm_dd判断是否为yyyymmdd或者yyyy-mm-dd
def to_yyyymmdd(date):
if is_yyyymmdd(date):
return date
elif is_yyyy_mm_dd(date):
return date.replace('-', '')
else:
return None
def to_yyyy_mm_dd(date):
if is_yyyymmdd(date):
return date[0:4] + '-' + date[4:6] + '-' + date[6:8]
elif is_yyyy_mm_dd(date):
return date
else:
return None
#用正则表达式判断是否为yyyymmdd
def is_yyyymmdd(date):
"""
用正则表达式判断是否为yyyymmdd
:param date:
:return:
"""
pattern = re.compile(r'^\d{4}\d{2}\d{2}$')
match = pattern.match(date)
if match:
return True
else:
return False
#用正则表达式判断是否为yyyy-MM-dd
def is_yyyy_mm_dd(date):
"""
用正则表达式判断是否为yyyy-MM-dd
:param date:
:return:
"""
pattern = re.compile(r'^\d{4}-\d{2}-\d{2}$')
match = pattern.match(date)
if match:
return True
else:
return False
|
zwkit
|
/zwkit-0.4.34-py3-none-any.whl/kit/date_kit.py
|
date_kit.py
|
import os
def create_dir(path):
"""
判断文件是否存在
不存在则创建
:param path:
:return:
"""
if not os.path.exists(path):
os.makedirs(path)
import platform
# 判断当前系统是mac还是windows
def get_system():
"""
判断当前系统是mac还是windows
:return:
"""
import platform
system = platform.system()
if system == 'Windows':
return 'Windows'
elif system == 'Darwin':
return 'Mac'
else:
return 'Linux'
def get_system_dir(cloud:str,pj:str):
platform = get_system()
if platform == 'Windows':
return f'//{cloud}//file//{pj}//'
elif platform == 'Mac':
return f'/Volumes/{cloud}/file/{pj}/'
else:
return f'/mnt/{cloud}/file/{pj}/'
def get_system_data_dir(cloud:str,pj:str,data:str):
platform = get_system()
if platform == 'Windows':
return f'//{cloud}//file//{pj}//{data}//'
elif platform == 'Mac':
return f'/Volumes/{cloud}/file/{pj}/{data}/'
else:
return f'/mnt/{cloud}/file/{pj}/{data}/'
|
zwkit
|
/zwkit-0.4.34-py3-none-any.whl/kit/path_kit.py
|
path_kit.py
|
import pandas as pd
def date_split(data):
data_cp = data.copy()
data_cp.reset_index(drop=True, inplace=True)
result = []
if len(data) == 1:
result.append([data_cp[0], data_cp[0]])
return result
else:
data_diff = data_cp.diff()
data_diff.iloc[0] = 1
# 获取断点列表
break_point = data_diff[data_diff != 1].index.tolist()
if len(break_point) == 0:
result.append([data_cp[0], data_cp[len(data_cp) - 1]])
return result
else:
first_point = 0
# 根据断点列表切分data
for i in break_point:
_data = data_cp[first_point:i]
_data.reset_index(drop=True, inplace=True)
result.append([_data[0], _data[len(_data) - 1]])
first_point = i
# 最后一个断点后面的数据
_data = data_cp[first_point:]
_data.reset_index(drop=True, inplace=True)
result.append([_data[0], _data[len(_data) - 1]])
return result
# # main函数
# if __name__ == '__main__':
# # 新建一个pandas 内容是年份
# data = pd.DataFrame({'year': [2014, 2015, 2018, 2020]})
# # 调用函数
# result = date_split(data['year'])
# print(result)
|
zwkit
|
/zwkit-0.4.34-py3-none-any.whl/kit/num_kit.py
|
num_kit.py
|
import clickhouse_driver
import pandas as pd
def create_conn(address: str, port: int, database: str, user: str, password: str = ""):
"""
初始化连接
:return:
"""
conn = clickhouse_driver.connect(dsn=f'clickhouse://{user}:{password}@{address}:{port}/{database}?connect_timeout=3000000&send_receive_timeout=3000000'
'&sync_request_timeout=3000000')
return conn
def clickhouse_insert(df, table_name, conn: clickhouse_driver.connect):
conn = conn
cursor = conn.cursor()
query = fill_sql(df, table_name)
cursor.execute(query)
conn.commit()
cursor.close()
def clickhouse_select(conn: clickhouse_driver.connect, sql: str):
conn = conn
cursor = conn.cursor()
cursor.execute(sql)
result = cursor.fetchall()
cursor.close()
df = pd.DataFrame(result, columns=[i[0] for i in cursor.description])
return df
def fill_sql(df, table_name):
sql = f"INSERT INTO {table_name} VALUES "
values = []
for row in df.itertuples(index=False):
row_values = []
for value in row:
if isinstance(value, str):
row_values.append(f"'{value}'")
else:
row_values.append(str(value))
values.append(f"({', '.join(row_values)})")
sql += ', '.join(values)
return sql
|
zwkit
|
/zwkit-0.4.34-py3-none-any.whl/db/clickhouse_kit.py
|
clickhouse_kit.py
|
import clickhouse_driver
import db.clickhouse_kit as clickhouse_kit
#获取股票日线的指定收益率
def get_stock_returns(conn: clickhouse_driver.dbapi.Connection, symbol_list:str, start_date: str, end_date: str, shift_num: int):
"""
:param conn:
:param symbol_list:
:param start_date:
:param end_date:
:param shift_num:
:return:
"""
limit = shift_num * 2
# 1 获取end_date的后shift_num * 2个交易日
sql = "select trade_date from basic_stock_workday where trade_date>= '%s' order by trade_date limit %s" % (end_date, limit)
workday_list = clickhouse_kit.clickhouse_select(conn, sql)['trade_date']
if len(workday_list) < limit:
# 抛出异常
raise Exception("交易日不足")
new_end_date = str(workday_list.iloc[-1])
# 2 获取日线数据 并以symbol和trade_date正序排序
sql = "select symbol,trade_date,pct_chg from basic_stock_daily where symbol = '%s' and trade_date>='%s' and trade_date<='%s' order by symbol,trade_date" % (
symbol_list, start_date, new_end_date)
daily_data = clickhouse_kit.clickhouse_select(conn, sql)
daily_data['pct_chg'] = daily_data['pct_chg'].shift(-shift_num)
daily_data['trade_date'] = daily_data['trade_date'].apply(lambda x: str(x))
daily_data = daily_data[(daily_data['trade_date']>=start_date) & (daily_data['trade_date']<=end_date)]
return daily_data
|
zwkit
|
/zwkit-0.4.34-py3-none-any.whl/db/zwdb.py
|
zwdb.py
|
import pandas as pd
import pywencai as wc
import tushare as ts
import os
from kit import num_kit, path_kit
import pathlib
'''
数据函数
'''
# 导入工程
class base_data_loader:
"""
数据加载器
通过问财获得股票列表
通过tushare获得股票数据
"""
def __init__(self, path="../data/", module="data/", file_name="base_data.csv"):
self.path = path
self.module = module
self.file_name = file_name
self.module_path = path + module
self.question = None
self.start = None
self.end = None
self.token = None
self.symbol_index = dict() # 股票时间索引
self.filter = set() # 股票的过滤集
self.data = pd.DataFrame()
path_kit.create_dir(self.module_path)
def __daily_data(self, symbol, start, end, token):
"""
获取日线数据
:param symbol:
:param start:
:param end:
:return:
"""
api = ts.pro_api(token)
df = ts.pro_bar(
ts_code=symbol,
api=api,
start_date=str(start) + "0101",
end_date=str(end) + "1231",
adjfactor=True,
asset="E", # 证券类型 E:股票和交易所基金,I:沪深指数,C:数字货币,FT:期货 FD:基金/O期权/H港股/CB可转债
freq="D", # D:日线 W:周 M:月
adj="hfq", # 不复权:None 前复权:qfq 后复权:hfq
retry_count=99 # 重试次数
)
return df[::-1]
def __daily(self, start_date, end_date, token, symbols: list):
"""
获取日线数据
:param start_date:
:param end_date:
:param symbols:
:return:
"""
result = pd.DataFrame()
if len(symbols) == 0:
return pd.DataFrame()
for symbol in symbols:
df = self.__daily_data(symbol, start_date, end_date, token)
result = pd.concat([result, df])
return result
def filter_symbols(self, symbols: list):
"""
过滤数据列表
:param symbols: 以列表的形式填入股票代码
:return:
"""
symbols_set = set(symbols)
self.filter.update(symbols_set)
def __get_symbols_by_wc(self, question, columns: list):
"""
股票列表
通过问财获得股票列表
"""
result = pd.DataFrame()
for i in range(self.start, self.end + 1):
quest = question % (i, i - 1)
data = wc.get(question=quest, loop=True)
data = data[columns]
data = data[~data['股票代码'].isin(self.filter)]
data['trade_date'] = i
result = pd.concat([result, data])
return result
def get_data(self, question, start, end, token):
"""
获取总数据集
优先在本地读取,如果本地没有从互联网获取
:param token:
:param end:
:param start:
:param question:
:param data_path: 默认的数据集路径
:return:
"""
self.question = question
self.start = start
self.end = end
if os.path.exists(self.module_path + self.file_name):
print("读取本地数据集")
self.data = pd.read_csv(self.module_path + self.file_name)
else:
print("从互联网获取数据集")
symbols_list = self.__get_symbols_by_wc(self.question, columns=['股票代码'])
print("开始遍历")
for index, symbol in enumerate(symbols_list['股票代码'].unique()):
print("数据进度百分比:%s" % (index / len(symbols_list['股票代码'].unique()) * 100), end='\r', flush=True)
self.data = pd.concat([self.data, self.__daily_data(symbol, self.start, self.end, token)])
# 重置索引
self.data = self.data.reset_index(drop=True)
# 将日期转换为字符串
self.data['trade_date'] = self.data['trade_date'].apply(lambda x: str(x))
# 将数据里的数字类型转换为float
self.data = self.data.applymap(lambda x: float(x) if isinstance(x, (int, float)) else x)
return self.data
def observe(self, mlflow):
"""
观察数据集
:return:
"""
# 数据报告
mlflow.log_text("\n".join(self.__data_report()), self.module + "data_report.txt")
# 获取每个股票的时间个数
mlflow.log_dict(self.data.groupby('ts_code')['trade_date'].count().to_dict(), self.module + "data_count.txt")
# 每个数据的缺失日期情况
mlflow.log_text(self.__row_nan_trade_date(), self.module + "data_trade_nan.txt")
def save(self, path=None, name=None):
"""
保存数据集
:param name:
:param path:
:return:
"""
file_path = self.module_path if path is None else path
file_name = self.file_name if name is None else name
self.data.to_csv(file_path + file_name, index=False, encoding='utf-8', )
def __data_report(self):
"""
数据报告
常规基础数据
:return:
"""
data = []
# stringbuffer的数据报告
data.append("开始日期:%s" % self.start)
data.append("结束日期:%s" % self.end)
data.append("数据总量:%s" % len(self.data))
data.append("数据列数:%s" % len(self.data.columns))
data.append("数据列名:%s" % self.data.columns.tolist())
data.append("数据集缺失值:%s" % self.data.isnull().sum())
return data
def __row_nan_trade_date(self):
text = ''
symbols = self.data["ts_code"].unique()
trades = self.data["trade_date"].unique()
for symbol in symbols:
trade_list = pd.DataFrame(trades)
trade_list.columns = ['trade_date']
trade_data = pd.merge(trade_list, self.data[self.data['ts_code'] == symbol], on='trade_date', how='left')
trade_data = trade_data[trade_data['ts_code'].isnull()]
if len(trade_data) != 0:
text = text + symbol + ','
text = text + ",".join(trade_data['trade_date'].astype('str').tolist()) + '\n'
return text
# 特征工程
class feature_data_loader:
"""
数据特征工程
1.添加特征
2.观察数据集
3.保存数据集
"""
def __init__(self, base_data=None, path="../data/", module="data/"):
self.data = base_data
self.features_list = []
self.result = pd.DataFrame()
self.path = path
self.module = module
self.module_path = path + module
def add_feature(self, feature):
"""
添加特征
:param feature:
:return:
"""
for func in feature:
self.features_list.append(func)
# 添加特征后,重新初始化数据集
def obverse(self):
"""
观察数据集
:return:
"""
pass
def create_execute(self, path=None):
"""
执行特征工程
:return:
"""
file_path = self.module_path if path is None else path
path_kit.create_dir(file_path + "/feature")
symbol_list = self.data['ts_code'].unique()
columns = self.data.columns
for index, symbol in enumerate(symbol_list):
print("数据进度百分比:%s" % (index / len(symbol_list) * 100))
symbol_data = pd.DataFrame(self.data[self.data['ts_code'] == symbol])
symbol_data.reset_index(drop=True, inplace=True)
for func in self.features_list:
func(symbol_data)
# 将symbol_data 按照旧列和新列分成2个数据集
symbol_data_left = symbol_data[columns]
symbol_data_right = symbol_data[symbol_data.columns[~symbol_data.columns.isin(symbol_data_left.columns)]]
symbol_data_right.applymap(lambda x: round(float(x), 2) if isinstance(x, (int, float)) else x)
# 将新列数据集和旧列数据集合并
data = pd.merge(symbol_data_left, symbol_data_right, left_index=True, right_index=True)
# 如果行数据里有空值 则删除整行
data.dropna(axis=0, how='any', inplace=True)
data.to_csv(file_path + "/feature/" + symbol + ".csv", index=False, encoding='utf-8')
return self.result
def to_execute(self, data, indicator):
"""
执行特征工程
:return:
"""
symbol_list = data['ts_code'].unique()
columns = data.columns
for index, symbol in enumerate(symbol_list):
print("数据进度百分比:%s" % (index / len(symbol_list) * 100))
symbol_data = pd.DataFrame(data[data['ts_code'] == symbol])
symbol_data.reset_index(drop=True, inplace=True)
for func in indicator:
func(symbol_data)
# 将symbol_data 按照旧列和新列分成2个数据集
symbol_data_left = symbol_data[columns]
symbol_data_right = symbol_data[symbol_data.columns[~symbol_data.columns.isin(symbol_data_left.columns)]]
symbol_data_right.applymap(lambda x: round(float(x), 2) if isinstance(x, (int, float)) else x)
# 将新列数据集和旧列数据集合并
data1 = pd.merge(symbol_data_left, symbol_data_right, left_index=True, right_index=True)
return data1
# 训练测试工程
class trains_data_loader:
def __init__(self, path="../data/", module="data/"):
self.feature_dir = None
self.path = path
self.module = module
self.module_path = path + module
self.train_X = pd.DataFrame()
self.train_y = pd.DataFrame()
self.test_X = pd.DataFrame()
self.test_y = pd.DataFrame()
self.drop_column = []
def load_feature_dir(self, feature_dir):
self.feature_dir = feature_dir
def drop_columns(self, columns):
"""
删除指定列
:param columns:
:return:
"""
for column in columns:
self.drop_column.append(column)
def split_by_time(self, trains_start, trains_end, test_start, test_end):
"""
:param test_end:
:param test_start:
:param trains_end:
:param trains_start:
:param trans:
:param start:
:param end:
:return:
"""
self.train_X = pd.DataFrame()
self.train_y = pd.DataFrame()
self.test_X = pd.DataFrame()
self.test_y = pd.DataFrame()
file_list = os.listdir(self.module_path + self.feature_dir)
for index, file in enumerate(file_list):
print(f"读取进度:{(index / len(file_list)) * 100}")
data = pd.read_csv(self.module_path + self.feature_dir + file, encoding='utf-8')
if len(data) == 0:
continue
trains_x = data[(data['trade_date'] > trains_start) & (data['trade_date'] < trains_end)]
if len(trains_x) == 0:
continue
trains_y = trains_x['flag']
trains_x = trains_x.drop(self.drop_column, axis=1)
self.train_X = pd.concat([self.train_X, trains_x])
self.train_y = pd.concat([self.train_y, trains_y])
test_X = data[(data['trade_date'] > test_start) & (data['trade_date'] < test_end)]
if len(test_X) == 0:
continue
test_y = test_X['flag']
test_X = test_X.drop(self.drop_column, axis=1)
self.test_X = pd.concat([self.test_X, test_X])
self.test_y = pd.concat([self.test_y, test_y])
def obverse(self, mlflow):
pass
# mlflow.log_metric("train_label_1", len(self.train_X[self.train_X['flag'] == 1]) / len(self.train_X) * 100)
# mlflow.log_metric("train_label_0", len(self.train_X[self.train_X['flag'] == 1]) / len(self.train_X) * 100)
# mlflow.log_metric("train_label_-1", len(self.train_X[self.train_X['flag'] == 1]) / len(self.train_X) * 100)
def save(self, path=None):
"""
保存数据集
:param path:
:return:
"""
file_path = self.module_path if path is None else path
self.train_X.to_pickle(file_path + 'train_X.pkl')
self.train_y.to_pickle(file_path + 'train_y.pkl')
self.test_X.to_pickle(file_path + 'test_X.pkl')
self.test_y.to_pickle(file_path + 'test_y.pkl')
class backtrader_data_loader:
def __init__(self, path='../data/', module='data/', csv_data='dataset.csv'):
self.path = path
self.module = module
self.data_path = self.path + self.module
self.csv_data = csv_data
self.data = pd.DataFrame()
def get_data(self, start, end):
"""
复权前的数据+ 复权后的指标
:param start:
:param end:
:return:
"""
data = pd.read_csv(self.path + self.csv_data)
self.data = data[(data['trade_date'] > start) & (data['trade_date'] < end)]
self.data['open'] = round(self.data['open'] / self.data['adj_factor'], 2)
self.data['high'] = round(self.data['high'] / self.data['adj_factor'], 2)
self.data['low'] = round(self.data['low'] / self.data['adj_factor'], 2)
self.data['close'] = round(self.data['close'] / self.data['adj_factor'], 2)
self.data['amount'] = round(self.data['amount'] / self.data['adj_factor'], 2)
self.data.drop(['pre_close', 'change', 'pct_chg', 'flag'], axis=1, inplace=True)
self.data['trade_date'] = self.data['trade_date'].apply(lambda x: pd.to_datetime(x, format='%Y%m%d'))
self.data = self.data.rename(columns={'trade_date': 'datetime',
'vol': 'volume'
})
return self.data
def save(self, module, name):
path = self.path + module
if not os.path.exists(path):
os.mkdir(path)
self.data.to_csv(path + name, index=False, encoding='utf-8')
|
zwkit
|
/zwkit-0.4.34-py3-none-any.whl/data/dataset.py
|
dataset.py
|
class log_option:
"""
日志选项
"""
def __init__(self, log_file_name="log.txt"):
self.log_file_name = log_file_name
class cash_option:
"""
股票资金选项
"""
def __init__(self, cash=50000.0, commission=0.0003, slip=0.005):
self.cash = cash # 初始资金
self.commission = commission # 交易费率
self.slip = slip # 滑点
def lines_params_helper(add_list):
lines = ()
params = []
for col in add_list:
lines += (col,)
params.append((col, -1))
params = tuple(params)
print("lines = ", lines)
print("params = ", params)
|
zwkit
|
/zwkit-0.4.34-py3-none-any.whl/zw_backtrader/zw_option.py
|
zw_option.py
|
import backtrader as bt
import pandas as pd
import zw_backtrader.zw_cerebro as zw
import zw_backtrader.strategy.strategy as zw_strategy
import zw_backtrader.zw_option as z
class data_on(bt.feeds.PandasData):
lines = ('datetime', 'open', 'high', 'low', 'close', 'pre_close', 'change', 'pct_chg', 'vol', 'amount',)
params = (
('datetime', -1), ('open', -1), ('high', -1), ('low', -1), ('close', -1), ('pre_close', -1), ('change', -1), ('pct_chg', -1), ('vol', -1),
('amount', -1),)
datafields = ['datetime', 'open', 'high', 'low', 'close', 'pre_close', 'change', 'pct_chg', 'vol', 'amount']
#
if "__main__" == __name__:
# z.lines_params_helper(('trade_date', 'open', 'high', 'low', 'close', 'pre_close', 'change', 'pct_chg', 'vol', 'amount'))
# 1. 创建日志对象
logs = zw.log_option()
# 2. 创建资金对象
cash = zw.cash_option()
# 3. 创建cerebro对象
cerebro = zw.base_cerebro(logs, cash)
# 4. 导出cerebro核心对象
cerebro1 = cerebro.get_cerebro()
# 5. 导入数据
base_data = pd.read_csv("/Users/summer/PycharmProjects/daily-random-forest-project/data/data.csv")
test_data = base_data[base_data['ts_code'] == '603019.SH']
test_data = test_data[test_data['trade_date'] > 20160601]
aa = test_data[['trade_date', 'open', 'high', 'low', 'close', 'pre_close', 'change', 'pct_chg', 'vol', 'amount']]
aa.columns = ['datetime', 'open', 'high', 'low', 'close', 'pre_close', 'change', 'pct_chg', 'vol', 'amount']
aa['datetime'] = aa['datetime'].apply(lambda x: pd.to_datetime(x, format='%Y%m%d'))
# aa.set_index('datetime', inplace=True)
data = data_on(dataname=aa)
# data = bt.feeds.PandasData(dataname=test_data)
# 6. 添加数据
cerebro1.adddata(data=data, name='603019.SH')
# 7. 导入策略
cerebro1.addstrategy(strategy=zw_strategy.test_strategy)
# 8. 运行策略
cerebro1.run()
# 9. 绘图
# cerebro1.plot()
|
zwkit
|
/zwkit-0.4.34-py3-none-any.whl/zw_backtrader/demo.py
|
demo.py
|
import backtrader as bt
from zw_backtrader.zw_option import log_option, cash_option
class base_cerebro:
def __init__(self, logs: log_option, cash: cash_option):
self.cerebro = bt.Cerebro()
self.logs = logs
self.cash_option = cash
def get_cerebro(self):
cerebro = self.cerebro
cerebro.broker.setcash(self.cash_option.cash) # 设置初始资金
cerebro.broker.setcommission(self.cash_option.commission) # 设置交易费率
cerebro.broker.set_slippage_fixed(self.cash_option.slip) # 设置固定滑点
return cerebro # 返回cerebro核心对象
|
zwkit
|
/zwkit-0.4.34-py3-none-any.whl/zw_backtrader/zw_cerebro.py
|
zw_cerebro.py
|
import pandas as pd
class trade_detail:
"""
交易明细
"""
def __init__(self, buy_date, name, trade_id, sell_date):
self.name = name
self.buy_date = buy_date
self.trade_id = trade_id
self.sell_date = sell_date
|
zwkit
|
/zwkit-0.4.34-py3-none-any.whl/zw_backtrader/trade/trade_detail.py
|
trade_detail.py
|
import backtrader as bt
import joblib
import pandas as pd
import os
from kit import date_kit
import datetime as dt
import zw_backtrader.trade.trade_detail as td
class base_strategy(bt.Strategy):
"""
策略基类
同时只能持有一只股票
卖了以后有10天冷静期
"""
params = (
('model_path', ""), # 模型路径
('log_path', '../data/backtrader/'), # 日志路径
('log_name', f'logs_{date_kit.now()}.log'), # 日志名称
('cold_date', 10) # 冷静期
)
def __init__(self):
# 买卖列表
self.position_list = dict()
# 拦截列表
self.filter_dict = dict()
# 日志集合
self.log_text = []
def log(self, txt, dt=None):
"""
Logging function fot this strategy
:param txt:
:param dt:
:return:
"""
dt = dt or self.datas[0].datetime.date(0)
self.log_text.append(f'{dt.isoformat()}: {txt}')
print(f'{dt.isoformat()}: {txt}')
def start(self):
"""
策略启动时执行
:return:
"""
self.model = joblib.load(self.params.model_path)
# def prenext(self):
# '''策略准备阶段,对应第1根bar ~ 第 min_period-1 根bar'''
# # 该函数主要用于等待指标计算,指标计算完成前都会默认调用prenext()空函数
# # min_period 就是 __init__ 中计算完成所有指标的第1个值所需的最小时间段
# print('prenext函数')
#
# def nextstart(self):
# '''策略正常运行的第一个时点,对应第 min_period 根bar'''
# # 只有在 __init__ 中所有指标都有值可用的情况下,才会开始运行策略
# # nextstart()只运行一次,主要用于告知后面可以开始启动 next() 了
# # nextstart()的默认实现是简单地调用next(),所以next中的策略逻辑从第 min_period根bar就已经开始执行
# print('nextstart函数')
def next(self):
# 获取每只股票的信息
# 并放入模型验证
for index in range(len(self.datas)):
self._next_execute(index)
def _next_execute(self, index):
symbol = self.datas[index]._name
date = self.datas[index].datetime.date(0)
self._sell_excute(date, index)
if self._predict_excute(index) == 1:
if self._filter_execute(symbol, date):
self._buy_excute(symbol, index)
else:
pass
# self.log(f'近期购买,过滤{symbol}')
else:
pass
# self.log(f'预测不买入{symbol}')
def _filter_execute(self, symbol, date):
# 过滤持仓
if self.position_list.get(symbol) is not None:
# 有持仓 不买入
return False
else:
# 过滤冷静期
filter_date = self.filter_dict.get(symbol)
if filter_date is not None:
if date < filter_date:
# 日期小于冷静期 不买入
return False
else:
# 日期大于冷静期 买入
# 删除冷静期
self.filter_dict.pop(symbol)
return True
return True
def _sell_excute(self, date, index):
for k, v in self.position_list.items():
if date > v.sell_date:
self.sell(data=self.datas[index], size=100, exectype=bt.Order.Market, price=self.datas[index].lines.close[0])
def _predict_excute(self, index):
data = self._get_data(datas=self.datas[index], index=index)
return self.model.predict(data)
def _buy_excute(self, symbol, index):
adj_factor = self.datas[index].lines.adj_factor[1]
# open = round(self.datas[index].lines.open[1] / adj_factor, 2)
# close = round(self.datas[index].lines.close[0] / adj_factor, 2)
# high = round(self.datas[index].lines.high[0] / adj_factor, 2)
# low = round(self.datas[index].lines.low[0] / adj_factor, 2)
self.buy(data=self.datas[index], size=100)
def _get_data(self, datas, index):
# 这个index非常必要熬
data = pd.DataFrame()
for v, k in enumerate(datas._colmapping):
if self.data.data_schema is not None:
if k not in self.data.data_schema:
continue
exec(f"data.loc[0,'{k}'] = self.datas[index].lines.{k}[0]")
return data
def stop(self):
"""
策略结束时执行
将日志进行保存
:return:
"""
if os.path.exists(self.params.log_path) is False:
os.mkdir(self.params.log_path)
with open(self.params.log_path + self.params.log_name, mode='w') as f:
f.write('\n'.join(self.log_text))
def notify_order(self, order):
"""
订单状态变化时执行
:param order:
:return:
"""
# 判断订单的状态
if order.status in [order.Submitted, order.Accepted]:
return
# 判断订单是否完成
if order.status in [order.Completed]:
if order.isbuy():
self.log(f'买入{order.data._name},价格:{order.executed.price},成本:{order.executed.value},手续费:{order.executed.comm}')
self.position_list.update({order.data._name: td.trade_detail(order.data.datetime.date(0), order.data._name, trade_id=order.ref,
sell_date=order.data.datetime.date(3))})
else:
self.log(f'卖出{order.data._name},价格:{order.executed.price},成本:{order.executed.value},手续费:{order.executed.comm}')
self.position_list.pop(order.data._name)
self.filter_dict[order.data._name] = order.data.datetime.date(0) + dt.timedelta(days=self.params.cold_date)
elif order.status in [order.Canceled, order.Margin, order.Rejected]:
self.log(f'订单失败: {order.data._name} 状态: {order.getstatusname()}')
def notify_trade(self, trade):
"""
交易状态变化时执行
:param trade:
:return:
"""
if not trade.isclosed:
return
self.log(f'利润,毛利:{trade.pnl},净利:{trade.pnlcomm}')
# def notify_cashvalue(self, cash, value):
# """
# 资金变化时执行
# :param cash:
# :param value:
# :return:
# """
# self.log(f'资金:{cash},市值:{value}')
# def notify_fund(self, cash, value, fundvalue, shares):
# """
# 资金变化时执行
# :param cash:
# :param value:
# :param fundvalue:
# :param shares:
# :return:
# """
# self.log(f'资金:{cash},市值:{value},净值:{fundvalue},持仓:{shares}')
# class test_strategy(bt.Strategy):
# """
# 策略基类
# """
#
# def __init__(self):
# # 买卖列表
# self.buying_list = []
# self.selling_list = []
#
# def next(self):
# for index in range(len(self.datas)):
# data = self._get_data(datas=self.datas[index], index=index)
# print(data)
#
# # 进行预测
#
# def _get_data(self, datas, index):
# data = pd.DataFrame()
# for v, k in enumerate(datas._colmapping):
# if k is None:
# continue
# exec(f"data.loc[0,'{k}'] = self.datas[index].lines.{k}[0]")
# return data
|
zwkit
|
/zwkit-0.4.34-py3-none-any.whl/zw_backtrader/strategy/strategy.py
|
strategy.py
|
# Zwlib
ZwLib is a package that provides a series of utility functions for Convo studio, HyperPM, and AIForYou products.
## Features
- Easy-to-use API request functions with built-in error handling
- Functions for handling image, video, and payment meta-data
- Building NLP Models
- Compatible with Python 3.6 and above
## Installation
To install Zwlib, simply run:
```bash
pip install zwlib
|
zwlib
|
/zwlib-1.2.0.tar.gz/zwlib-1.2.0/README.md
|
README.md
|
from setuptools import setup, find_packages
import pathlib
here = pathlib.Path(__file__).parent.resolve()
long_description = (here / "README.md").read_text(encoding="utf-8")
setup(
name="zwlib",
version="1.2.0",
description="ZwLib is a package that provides a series of utility functions for Convo studio, HyperPM, and AIForYou products.",
author="chayan-hypercap",
author_email="[email protected]",
url="https://pypi.org/project/zwlib/",
include_package_data=True,
long_description=long_description,
long_description_content_type="text/markdown",
packages=find_packages(),
install_requires=[
"requests",
],
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
)
|
zwlib
|
/zwlib-1.2.0.tar.gz/zwlib-1.2.0/setup.py
|
setup.py
|
# These module alos are used by protection code, so that protection
# code needn't import anything
import os
import platform
import sys
import struct
# Because ctypes is new from Python 2.5, so pytransform doesn't work
# before Python 2.5
#
from ctypes import cdll, c_char, c_char_p, c_int, c_void_p, \
pythonapi, py_object, PYFUNCTYPE, CFUNCTYPE
from fnmatch import fnmatch
#
# Support Platforms
#
plat_path = 'platforms'
plat_table = (
('windows', ('windows', 'cygwin-*')),
('darwin', ('darwin',)),
('ios', ('ios',)),
('linux', ('linux*',)),
('freebsd', ('freebsd*', 'openbsd*', 'isilon onefs')),
('poky', ('poky',)),
)
arch_table = (
('x86', ('i?86', )),
('x86_64', ('x64', 'x86_64', 'amd64', 'intel')),
('arm', ('armv5',)),
('armv6', ('armv6l',)),
('armv7', ('armv7l',)),
('ppc64', ('ppc64le',)),
('mips32', ('mips',)),
('aarch32', ('aarch32',)),
('aarch64', ('aarch64', 'arm64'))
)
#
# Hardware type
#
HT_HARDDISK, HT_IFMAC, HT_IPV4, HT_IPV6, HT_DOMAIN = range(5)
#
# Global
#
_pytransform = None
class PytransformError(Exception):
pass
def dllmethod(func):
def wrap(*args, **kwargs):
return func(*args, **kwargs)
return wrap
@dllmethod
def version_info():
prototype = PYFUNCTYPE(py_object)
dlfunc = prototype(('version_info', _pytransform))
return dlfunc()
@dllmethod
def init_pytransform():
major, minor = sys.version_info[0:2]
# Python2.5 no sys.maxsize but sys.maxint
# bitness = 64 if sys.maxsize > 2**32 else 32
prototype = PYFUNCTYPE(c_int, c_int, c_int, c_void_p)
init_module = prototype(('init_module', _pytransform))
ret = init_module(major, minor, pythonapi._handle)
if (ret & 0xF000) == 0x1000:
raise PytransformError('Initialize python wrapper failed (%d)'
% (ret & 0xFFF))
return ret
@dllmethod
def init_runtime():
prototype = PYFUNCTYPE(c_int, c_int, c_int, c_int, c_int)
_init_runtime = prototype(('init_runtime', _pytransform))
return _init_runtime(0, 0, 0, 0)
@dllmethod
def encrypt_code_object(pubkey, co, flags, suffix=''):
_pytransform.set_option(6, suffix.encode())
prototype = PYFUNCTYPE(py_object, py_object, py_object, c_int)
dlfunc = prototype(('encrypt_code_object', _pytransform))
return dlfunc(pubkey, co, flags)
@dllmethod
def generate_license_file(filename, priname, rcode, start=-1, count=1):
prototype = PYFUNCTYPE(c_int, c_char_p, c_char_p, c_char_p, c_int, c_int)
dlfunc = prototype(('generate_project_license_files', _pytransform))
return dlfunc(filename.encode(), priname.encode(), rcode.encode(),
start, count) if sys.version_info[0] == 3 \
else dlfunc(filename, priname, rcode, start, count)
@dllmethod
def generate_license_key(prikey, keysize, rcode):
prototype = PYFUNCTYPE(py_object, c_char_p, c_int, c_char_p)
dlfunc = prototype(('generate_license_key', _pytransform))
return dlfunc(prikey, keysize, rcode) if sys.version_info[0] == 2 \
else dlfunc(prikey, keysize, rcode.encode())
@dllmethod
def get_registration_code():
prototype = PYFUNCTYPE(py_object)
dlfunc = prototype(('get_registration_code', _pytransform))
return dlfunc()
@dllmethod
def get_expired_days():
prototype = PYFUNCTYPE(py_object)
dlfunc = prototype(('get_expired_days', _pytransform))
return dlfunc()
@dllmethod
def clean_obj(obj, kind):
prototype = PYFUNCTYPE(c_int, py_object, c_int)
dlfunc = prototype(('clean_obj', _pytransform))
return dlfunc(obj, kind)
def clean_str(*args):
tdict = {
'str': 0,
'bytearray': 1,
'unicode': 2
}
for obj in args:
k = tdict.get(type(obj).__name__)
if k is None:
raise RuntimeError('Can not clean object: %s' % obj)
clean_obj(obj, k)
def get_hd_info(hdtype, name=None):
if hdtype not in range(HT_DOMAIN + 1):
raise RuntimeError('Invalid parameter hdtype: %s' % hdtype)
size = 256
t_buf = c_char * size
buf = t_buf()
cname = c_char_p(0 if name is None
else name.encode('utf-8') if hasattr('name', 'encode')
else name)
if (_pytransform.get_hd_info(hdtype, buf, size, cname) == -1):
raise PytransformError('Get hardware information failed')
return buf.value.decode()
def show_hd_info():
return _pytransform.show_hd_info()
def assert_armored(*names):
prototype = PYFUNCTYPE(py_object, py_object)
dlfunc = prototype(('assert_armored', _pytransform))
def wrapper(func):
def wrap_execute(*args, **kwargs):
dlfunc(names)
return func(*args, **kwargs)
return wrap_execute
return wrapper
def check_armored(*names):
try:
prototype = PYFUNCTYPE(py_object, py_object)
prototype(('assert_armored', _pytransform))(names)
return True
except RuntimeError:
return False
def get_license_info():
info = {
'ISSUER': None,
'EXPIRED': None,
'HARDDISK': None,
'IFMAC': None,
'IFIPV4': None,
'DOMAIN': None,
'DATA': None,
'CODE': None,
}
rcode = get_registration_code().decode()
if rcode.startswith('*VERSION:'):
index = rcode.find('\n')
info['ISSUER'] = rcode[9:index].split('.')[0].replace('-sn-1.txt', '')
rcode = rcode[index+1:]
index = 0
if rcode.startswith('*TIME:'):
from time import ctime
index = rcode.find('\n')
info['EXPIRED'] = ctime(float(rcode[6:index]))
index += 1
if rcode[index:].startswith('*FLAGS:'):
index += len('*FLAGS:') + 1
info['FLAGS'] = ord(rcode[index - 1])
prev = None
start = index
for k in ['HARDDISK', 'IFMAC', 'IFIPV4', 'DOMAIN', 'FIXKEY', 'CODE']:
index = rcode.find('*%s:' % k)
if index > -1:
if prev is not None:
info[prev] = rcode[start:index]
prev = k
start = index + len(k) + 2
info['CODE'] = rcode[start:]
i = info['CODE'].find(';')
if i > 0:
info['DATA'] = info['CODE'][i+1:]
info['CODE'] = info['CODE'][:i]
return info
def get_license_code():
return get_license_info()['CODE']
def get_user_data():
return get_license_info()['DATA']
def _match_features(patterns, s):
for pat in patterns:
if fnmatch(s, pat):
return True
def _gnu_get_libc_version():
try:
prototype = CFUNCTYPE(c_char_p)
ver = prototype(('gnu_get_libc_version', cdll.LoadLibrary('')))()
return ver.decode().split('.')
except Exception:
pass
def format_platform(platid=None):
if platid:
return os.path.normpath(platid)
plat = platform.system().lower()
mach = platform.machine().lower()
for alias, platlist in plat_table:
if _match_features(platlist, plat):
plat = alias
break
if plat == 'linux':
cname, cver = platform.libc_ver()
if cname == 'musl':
plat = 'musl'
elif cname == 'libc':
plat = 'android'
elif cname == 'glibc':
v = _gnu_get_libc_version()
if v and len(v) >= 2 and (int(v[0]) * 100 + int(v[1])) < 214:
plat = 'centos6'
for alias, archlist in arch_table:
if _match_features(archlist, mach):
mach = alias
break
if plat == 'windows' and mach == 'x86_64':
bitness = struct.calcsize('P'.encode()) * 8
if bitness == 32:
mach = 'x86'
return os.path.join(plat, mach)
# Load _pytransform library
def _load_library(path=None, is_runtime=0, platid=None, suffix='', advanced=0):
path = os.path.dirname(__file__) if path is None \
else os.path.normpath(path)
plat = platform.system().lower()
for alias, platlist in plat_table:
if _match_features(platlist, plat):
plat = alias
break
name = '_pytransform' + suffix
if plat == 'linux':
filename = os.path.abspath(os.path.join(path, name + '.so'))
elif plat in ('darwin', 'ios'):
filename = os.path.join(path, name + '.dylib')
elif plat == 'windows':
filename = os.path.join(path, name + '.dll')
elif plat in ('freebsd', 'poky'):
filename = os.path.join(path, name + '.so')
else:
filename = None
if platid is not None and os.path.isfile(platid):
filename = platid
elif platid is not None or not os.path.exists(filename) or not is_runtime:
libpath = platid if platid is not None and os.path.isabs(platid) else \
os.path.join(path, plat_path, format_platform(platid))
filename = os.path.join(libpath, os.path.basename(filename))
if filename is None:
raise PytransformError('Platform %s not supported' % plat)
if not os.path.exists(filename):
raise PytransformError('Could not find "%s"' % filename)
try:
m = cdll.LoadLibrary(filename)
except Exception as e:
if sys.flags.debug:
print('Load %s failed:\n%s' % (filename, e))
raise
# Removed from v4.6.1
# if plat == 'linux':
# m.set_option(-1, find_library('c').encode())
if not os.path.abspath('.') == os.path.abspath(path):
m.set_option(1, path.encode() if sys.version_info[0] == 3 else path)
# Required from Python3.6
m.set_option(2, sys.byteorder.encode())
if sys.flags.debug:
m.set_option(3, c_char_p(1))
m.set_option(4, c_char_p(not is_runtime))
# Disable advanced mode by default
m.set_option(5, c_char_p(not advanced))
# Set suffix for private package
if suffix:
m.set_option(6, suffix.encode())
return m
def pyarmor_init(path=None, is_runtime=0, platid=None, suffix='', advanced=0):
global _pytransform
_pytransform = _load_library(path, is_runtime, platid, suffix, advanced)
return init_pytransform()
def pyarmor_runtime(path=None, suffix='', advanced=0):
if _pytransform is not None:
return
try:
pyarmor_init(path, is_runtime=1, suffix=suffix, advanced=advanced)
init_runtime()
except Exception as e:
if sys.flags.debug or hasattr(sys, '_catch_pyarmor'):
raise
sys.stderr.write("%s\n" % str(e))
sys.exit(1)
# ----------------------------------------------------------
# End of pytransform
# ----------------------------------------------------------
#
# Not available from v5.6
#
def generate_capsule(licfile):
prikey, pubkey, prolic = _generate_project_capsule()
capkey, newkey = _generate_pytransform_key(licfile, pubkey)
return prikey, pubkey, capkey, newkey, prolic
@dllmethod
def _generate_project_capsule():
prototype = PYFUNCTYPE(py_object)
dlfunc = prototype(('generate_project_capsule', _pytransform))
return dlfunc()
@dllmethod
def _generate_pytransform_key(licfile, pubkey):
prototype = PYFUNCTYPE(py_object, c_char_p, py_object)
dlfunc = prototype(('generate_pytransform_key', _pytransform))
return dlfunc(licfile.encode() if sys.version_info[0] == 3 else licfile,
pubkey)
#
# Deprecated functions from v5.1
#
@dllmethod
def encrypt_project_files(proname, filelist, mode=0):
prototype = PYFUNCTYPE(c_int, c_char_p, py_object, c_int)
dlfunc = prototype(('encrypt_project_files', _pytransform))
return dlfunc(proname.encode(), filelist, mode)
def generate_project_capsule(licfile):
prikey, pubkey, prolic = _generate_project_capsule()
capkey = _encode_capsule_key_file(licfile)
return prikey, pubkey, capkey, prolic
@dllmethod
def _encode_capsule_key_file(licfile):
prototype = PYFUNCTYPE(py_object, c_char_p, c_char_p)
dlfunc = prototype(('encode_capsule_key_file', _pytransform))
return dlfunc(licfile.encode(), None)
@dllmethod
def encrypt_files(key, filelist, mode=0):
t_key = c_char * 32
prototype = PYFUNCTYPE(c_int, t_key, py_object, c_int)
dlfunc = prototype(('encrypt_files', _pytransform))
return dlfunc(t_key(*key), filelist, mode)
@dllmethod
def generate_module_key(pubname, key):
t_key = c_char * 32
prototype = PYFUNCTYPE(py_object, c_char_p, t_key, c_char_p)
dlfunc = prototype(('generate_module_key', _pytransform))
return dlfunc(pubname.encode(), t_key(*key), None)
#
# Compatible for PyArmor v3.0
#
@dllmethod
def old_init_runtime(systrace=0, sysprofile=1, threadtrace=0, threadprofile=1):
'''Only for old version, before PyArmor 3'''
pyarmor_init(is_runtime=1)
prototype = PYFUNCTYPE(c_int, c_int, c_int, c_int, c_int)
_init_runtime = prototype(('init_runtime', _pytransform))
return _init_runtime(systrace, sysprofile, threadtrace, threadprofile)
@dllmethod
def import_module(modname, filename):
'''Only for old version, before PyArmor 3'''
prototype = PYFUNCTYPE(py_object, c_char_p, c_char_p)
_import_module = prototype(('import_module', _pytransform))
return _import_module(modname.encode(), filename.encode())
@dllmethod
def exec_file(filename):
'''Only for old version, before PyArmor 3'''
prototype = PYFUNCTYPE(c_int, c_char_p)
_exec_file = prototype(('exec_file', _pytransform))
return _exec_file(filename.encode())
|
zwlib
|
/zwlib-1.2.0.tar.gz/zwlib-1.2.0/pytransform/__init__.py
|
__init__.py
|
from distutils.core import setup
try:
with open("README.md","r") as fh:
long_description = fh.read()
except:
long_description = 'OOP Learning Example By Zawanee Makeng'
setup(
name = 'zwnschool', # How you named your package folder (MyLib)
packages = ['zwnschool'], # Chose the same as "name"
version = '0.1', # Start with a small number and increase it with every change you make
license='MIT', # Chose a license from here: https://help.github.com/articles/licensing-a-repository
description = 'OOP Learning Example By Zawanee Makeng', # Give a short description about your library
long_description=long_description,
long_description_content_type = "text/markdown",
author = 'Zawanee Makeng', # Type in your name
author_email = '[email protected]', # Type in your E-Mail
url = 'https://github.com/zawaneemakeng/zwnschool', # Provide either the link to your github or to your website
download_url = 'https://github.com/zawaneemakeng/zwnschool/archive/v_01.tar.gz', # I explain this later on
keywords = ['OOP Python', 'THAI', 'Zawanee Makeng'], # Keywords that define your package best
classifiers=[
'Development Status :: 3 - Alpha', # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers', # Define that your audience are developers
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License', # Again, pick a license
'Programming Language :: Python :: 3', #Specify which pyhton versions that you want to support
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
|
zwnschool
|
/zwnschool-0.1.tar.gz/zwnschool-0.1/setup.py
|
setup.py
|
zwoasi
==========
A python binding to the ZWO ASI version 2 library.
|
zwoasi
|
/zwoasi-0.0.22.tar.gz/zwoasi-0.0.22/README.rst
|
README.rst
|
Credits
=======
"zwoasi" is written and maintained by Steve Marple.
Contributors
------------
Please append your name here when you submit your first pull request.
|
zwoasi
|
/zwoasi-0.0.22.tar.gz/zwoasi-0.0.22/AUTHORS.rst
|
AUTHORS.rst
|
import codecs
import glob
import os
import re
from setuptools import setup, find_packages
def read(*parts):
"""
Build an absolute path from *parts* and and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, *parts), 'rb', 'utf-8') as f:
return f.read()
def find_version(*file_paths):
"""
Build a path from *file_paths* and search for a ``__version__``
string inside.
"""
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
print('==================================')
print(find_packages(exclude=['tests*']))
print('==================================')
setup(
name='zwoasi',
version=find_version('zwoasi/__init__.py'),
description='Python binding for the ZWO ASI v2 library.',
long_description=(read('README.rst') + '\n\n' +
read('AUTHORS.rst')),
url='https://github.com/stevemarple/python-zwoasi',
license='MIT',
author='Steve Marple',
author_email='[email protected]',
packages=find_packages(exclude=['tests*']),
install_requires=['numpy>=1.7', 'six'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Libraries :: Python Modules',
],
scripts=glob.glob('zwoasi/bin/*.py'),
)
|
zwoasi
|
/zwoasi-0.0.22.tar.gz/zwoasi-0.0.22/setup.py
|
setup.py
|
from __future__ import annotations
import datetime as dt
import typing as t
from collections import deque
from dataclasses import dataclass
from enum import Enum, StrEnum, auto
from textwrap import dedent
from parsimonious.grammar import Grammar
from parsimonious.nodes import Node, NodeVisitor
RAW_GRAMMAR = r"""
workout = ((comment / block) elws*)+ / elws
block = tag ws "{" ((comment / params) / elws)+ "}"
params = (message / value) ","?
value = tag ws (string / range / rangeval)
message = "@" ws duration ws string
range = rangeval ws "->" ws rangeval
rangeval = duration / numeric / zone
duration = number ":" number
percent = number "%"
zone = ("Z" number) / "SS"
numeric = percent / number
elws = ws / emptyline
comment = ~r"\;[^\r\n]*"
tag = ~"[A-Z_]+"
string = ~'"[^\"]+"'
number = ~"\d+"
ws = ~"\s*"
emptyline = ws+
"""
GRAMMAR = Grammar(RAW_GRAMMAR)
class Tag(StrEnum):
AUTHOR = auto()
CADENCE = auto()
COOLDOWN = auto()
DESCRIPTION = auto()
DURATION = auto()
FREE = auto()
FTP = auto()
INTERVALS = auto()
META = auto()
NAME = auto()
POWER = auto()
RAMP = auto()
REPEAT = auto()
SEGMENT = auto()
TAGS = auto()
WARMUP = auto()
MESSAGES = auto() # Included for tidier housekeeping, not a valid keyword in the ZWO file
# Repeat segment metasyntax
START_REPEAT = auto()
END_REPEAT = auto()
@dataclass(frozen=True, slots=True)
class Percentage:
value: int
def __str__(self) -> str:
return f"{self.value / 100:0.3f}"
@classmethod
def from_node(cls, node: Node) -> Percentage:
return cls(value=int(node.text.rstrip("%")))
class PowerZone(Enum):
Z1 = Percentage(value=50)
Z2 = Percentage(value=65)
Z3 = Percentage(value=81)
SS = Percentage(value=90)
Z4 = Percentage(value=95)
Z5 = Percentage(value=109)
Z6 = Percentage(value=125)
Z7 = Percentage(value=150)
def __str__(self) -> str:
return str(self.value)
@dataclass(frozen=True, slots=True)
class Duration:
value: dt.timedelta
def __str__(self) -> str:
return str(int(self.value.total_seconds()))
@classmethod
def from_node(cls, node: Node) -> Duration:
minutes, seconds = (int(chunk) for chunk in node.text.split(":"))
return cls(value=dt.timedelta(minutes=minutes, seconds=seconds))
RANGE_T = Percentage | Duration | PowerZone | int
@dataclass(frozen=True, slots=True)
class Range:
left: RANGE_T
right: RANGE_T
@classmethod
def from_node(cls, visited_children: list[Node]) -> Range:
left, *_, right = visited_children
# I'm not sure how to best keep the numeric values from nesting, I might be misunderstanding
# how the parser is working or have something written poorly in the grammar but for now this
# hack functions
if isinstance(left, list):
left = left[0]
if isinstance(right, list):
right = right[0]
return cls(left=left, right=right)
@dataclass(frozen=True, slots=True)
class Message:
timestamp: Duration
message: str
@classmethod
def from_node(cls, visited_children: list[Node]) -> Message:
_, _, timestamp, _, message = visited_children
return cls(timestamp=timestamp, message=message)
T = t.TypeVar("T")
def deep_flatten(in_iter: list, key_type: type[T]) -> t.Generator[T, None, None]:
"""Accept an arbitrary list of lists and yield objects of the matching data type."""
# Use a deque as an iterator stack to keep track of any nested iterables
iterators = deque((iter(in_iter),))
# Iterate over the elements of each iterable & add them to the stack if they're also a list,
# otherwise yield only dicts & then pop the iterable once exhausted
while iterators:
for item in iterators[-1]:
if isinstance(item, list):
iterators.append(iter(item))
break
elif isinstance(item, key_type):
yield item
else:
iterators.pop()
VAL_T = int | str | Percentage | Duration | Range | list[Message] | None
PARAM_T = dict[Tag, VAL_T]
BLOCK_T = dict[Tag, PARAM_T]
class ZWOVisitor(NodeVisitor):
grammar = GRAMMAR
# Indices of visited_children are determined by the grammar specification
def visit_workout(self, node: Node, visited_children: list[Node]) -> list[BLOCK_T]:
# Catch an empty document
if not node.text.strip():
return []
blocks = []
for chunk in visited_children[0]:
# The grammar here matches comments or blocks, if there are no dictionaries then we
# have a comment, which we just discard
if block := list(deep_flatten(chunk, key_type=dict)):
blocks.append(block[0])
return blocks
def visit_block(self, node: Node, visited_children: list[Node]) -> BLOCK_T:
tag = visited_children[0]
params = list(deep_flatten(visited_children[-2], key_type=dict))
block_messages = list(deep_flatten(visited_children[-2], key_type=Message))
block_params: BLOCK_T = {tag: {key: val for param in params for key, val in param.items()}}
block_params[Tag.MESSAGES] = block_messages if block_messages else None # type: ignore[assignment] # noqa: E501
return block_params
def visit_value(self, node: Node, visited_children: list[Node]) -> PARAM_T:
tag, _, value = visited_children
# I'm not sure how to best keep the numeric values from nesting, I might be misunderstanding
# how the parser is working or have something written poorly in the grammar but for now this
# hack functions
val = value[0]
if isinstance(val, list):
val = val[0]
return {tag: val}
def visit_string(self, node: Node, visited_children: list[Node]) -> str:
return dedent(node.text.strip('"'))
def visit_range(self, node: Node, visited_children: list[Node]) -> Range:
return Range.from_node(visited_children)
def visit_duration(self, node: Node, visited_children: list[Node]) -> Duration:
return Duration.from_node(node)
def visit_tag(self, node: Node, visited_children: list[Node]) -> Tag:
return Tag[node.text]
def visit_message(self, node: Node, visited_children: list[Node]) -> Message:
return Message.from_node(visited_children)
def visit_numeric(self, node: Node, visited_children: list[Node]) -> int | Percentage:
return visited_children[0] # type: ignore[no-any-return]
def visit_number(self, node: Node, visited_children: list[Node]) -> int:
return int(node.text)
def visit_percent(self, node: Node, visited_children: list[Node]) -> Percentage:
return Percentage.from_node(node)
def visit_zone(self, node: Node, visited_children: list[Node]) -> PowerZone:
return PowerZone[node.text]
def generic_visit(self, node: Node, visited_children: list[Node]) -> list[Node] | Node:
return visited_children or node
def parse_src(src: str) -> list[BLOCK_T]:
"""Parse the provided source into a list of raw workout blocks."""
tree = ZWOVisitor.grammar.parse(src)
visitor = ZWOVisitor()
parsed: list[BLOCK_T] = visitor.visit(tree)
return parsed
|
zwolang
|
/zwolang-0.3.0-py3-none-any.whl/zwo/parser.py
|
parser.py
|
from collections import abc
from zwo.parser import BLOCK_T, PARAM_T, Range, Tag, VAL_T
class ZWOMValidationError(BaseException): # noqa: D101
...
def _check_keys(required: set[Tag], check_tags: abc.KeysView[Tag], block_tag: Tag) -> bool:
missing = required - check_tags
if missing:
pretty_tags = ", ".join(tag.upper() for tag in missing)
raise ZWOMValidationError(f"{block_tag.upper()} block missing required keys: {pretty_tags}")
return True
class ZWOMValidator:
raw_blocks: list[BLOCK_T]
validated_blocks: list[BLOCK_T]
_ftp: int | None
_in_repeat: bool
_n_repeats: int
__slots__ = ("raw_blocks", "validated_blocks", "_ftp", "_in_repeat", "_n_repeats")
def __init__(self, raw_blocks: list[BLOCK_T]) -> None:
self.raw_blocks = raw_blocks
self._ftp = None
self.validated_blocks = self.validate_scanned()
def validate_scanned(self) -> list[BLOCK_T]:
if Tag.META not in self.raw_blocks[0]:
raise ZWOMValidationError("ZWOM file must begin with a META block")
self.visit_meta_block(self.raw_blocks[0][Tag.META], Tag.META)
# To account for expansion of any chunk repeats we need to build a new list
# Initialize it with the META block since we skip it in the rest of the validation
validated_blocks: list[BLOCK_T] = [self.raw_blocks[0]]
repeat_blocks = []
self._in_repeat = False
for block in self.raw_blocks[1:]:
# Blocks only have one key, so we can dispatch validators using the first key
block_tag = next(iter(block))
params = block[block_tag]
match block_tag:
case Tag.FREE | Tag.SEGMENT:
self.visit_segment_block(params, block_tag)
case Tag.RAMP | Tag.WARMUP | Tag.COOLDOWN:
self.visit_ramp_block(params, block_tag)
case Tag.INTERVALS:
self.visit_interval_block(params, block_tag)
case Tag.START_REPEAT:
self.visit_start_repeat_block(params, block_tag)
case Tag.END_REPEAT:
self.visit_end_repeat_block(params, block_tag)
case _:
raise ZWOMValidationError(f"Unknown workout tag: '{block_tag}'")
# Dispatch any additional generic parameter validation within the block
for param, val in params.items():
match param:
case Tag.POWER:
self.visit_power(val)
case Tag.CADENCE:
self.visit_cadence(val, block_tag)
case _:
continue
if self._in_repeat:
# Don't include the repeat metablocks in the final output
if block_tag != Tag.START_REPEAT:
repeat_blocks.append(block)
else:
# Check to see if we've just hit the END_REPEAT tag & dump the blocks accordingly
if repeat_blocks:
validated_blocks.extend(repeat_blocks * self._n_repeats)
repeat_blocks.clear()
self._n_repeats = -1
else:
# Don't include the repeat metablocks in the final output
if block_tag != Tag.END_REPEAT:
validated_blocks.append(block)
# Make sure the chunk repeat block was closed
if self._in_repeat:
raise ZWOMValidationError("START_REPEAT is missing a matching END_REPEAT.")
return validated_blocks
def visit_meta_block(self, params: PARAM_T, block_tag: Tag) -> None:
required_tags = {Tag.NAME, Tag.AUTHOR, Tag.DESCRIPTION}
_check_keys(required_tags, params.keys(), block_tag)
ftp = params.get(Tag.FTP)
if ftp is not None:
if isinstance(ftp, int):
if ftp == 0: # The parser already won't accept negative numbers
raise ZWOMValidationError(f"FTP must be > 0, received: {ftp}")
self._ftp = ftp
else:
raise ZWOMValidationError(
f"FTP must be a positive integer, received: '{type(ftp).__name__}'"
)
def visit_segment_block(self, params: PARAM_T, block_tag: Tag) -> None:
required_tags = {Tag.DURATION}
if block_tag == Tag.SEGMENT:
required_tags = required_tags | {Tag.POWER}
_check_keys(required_tags, params.keys(), block_tag)
def visit_ramp_block(self, params: PARAM_T, block_tag: Tag) -> None:
required_tags = {Tag.DURATION, Tag.POWER}
_check_keys(required_tags, params.keys(), block_tag)
def visit_interval_block(self, params: PARAM_T, block_tag: Tag) -> None:
required_tags = {Tag.REPEAT, Tag.DURATION, Tag.POWER}
_check_keys(required_tags, params.keys(), block_tag)
def visit_start_repeat_block(self, params: PARAM_T, block_tag: Tag) -> None:
if self._in_repeat:
raise ZWOMValidationError("Nested block chunk repetition is not supported.")
required_tags = {Tag.REPEAT}
_check_keys(required_tags, params.keys(), block_tag)
n_repeats = params[Tag.REPEAT] # If we're here we know the key is there
if not isinstance(n_repeats, int):
raise ZWOMValidationError("START_REPEAT must have an integer REPEAT value.")
if n_repeats == 0:
raise ZWOMValidationError("REPEAT must be > 0.")
self._in_repeat = True
self._n_repeats = n_repeats
def visit_end_repeat_block(self, params: PARAM_T, block_tag: Tag) -> None:
if not self._in_repeat:
raise ZWOMValidationError("Missing opening START_REPEAT block.")
self._in_repeat = False
def visit_power(self, power_spec: VAL_T) -> None:
# Validate that an FTP is set in order to use absolute watts
if isinstance(power_spec, int):
if power_spec == 0: # The parser already won't accept negative numbers
raise ZWOMValidationError(f"Power must be > 0, received: {power_spec}")
if not self._ftp:
raise ZWOMValidationError(
"An FTP must be specified in the META block to use absolute watts."
)
elif isinstance(power_spec, Range):
if not self._ftp:
if isinstance(power_spec.left, int) or isinstance(power_spec.right, int):
raise ZWOMValidationError(
"An FTP must be specified in the META block to use absolute watts."
)
def visit_cadence(self, cadence_spec: VAL_T, block_tag: Tag) -> None:
# Cadence range is only valid for use in an interval block
if isinstance(cadence_spec, Range) and block_tag != Tag.INTERVALS:
raise ZWOMValidationError("Cadence ranges are only valid for Interval blocks.")
if block_tag == Tag.INTERVALS and not isinstance(cadence_spec, Range):
raise ZWOMValidationError("Cadence spec for Interval blocks must be a range.")
|
zwolang
|
/zwolang-0.3.0-py3-none-any.whl/zwo/interpreter.py
|
interpreter.py
|
from dataclasses import dataclass
from io import StringIO
from pathlib import Path
from xml.dom import minidom
from zwo.interpreter import ZWOMValidator
from zwo.parser import (
BLOCK_T,
Duration,
Message,
PARAM_T,
Percentage,
PowerZone,
Range,
Tag,
parse_src,
)
STATIC_META_PARAMS = {"sportType": "bike"}
BLOCK_MAPPING = {
Tag.COOLDOWN: "Cooldown",
Tag.FREE: "FreeRide",
Tag.INTERVALS: "IntervalsT",
Tag.RAMP: "Ramp",
Tag.SEGMENT: "SteadyState",
Tag.WARMUP: "WarmUp",
}
@dataclass(slots=True)
class Workout:
blocks: list[BLOCK_T]
ftp: int | None = None
def to_zwo(self, out_filepath: Path) -> None:
doc = minidom.Document()
root = doc.createElement("workout_file")
doc.appendChild(root)
# If we're here then we've validate that the meta tag is the first block
doc = self.serialize_meta(doc, root, self.blocks[0][Tag.META])
doc = self.serialize_workout_blocks(doc, root, self.blocks[1:])
# Drop encoding line before writing the XML, Zwift doesn't use it
buff = StringIO()
buff.write(doc.toprettyxml(indent=" " * 4))
buff.seek(0)
_ = buff.readline()
out_filepath.write_text(buff.read())
def serialize_meta(
self, doc: minidom.Document, root: minidom.Element, meta_block: PARAM_T
) -> minidom.Document:
for tag, val in meta_block.items():
if tag == Tag.FTP:
continue
tmp = doc.createElement(tag)
if tag == Tag.TAGS:
if not isinstance(val, str):
raise ValueError("Type narrowing, shouldn't be able to get here")
for hashtag in val.split():
sub_tag = doc.createElement("tag")
sub_tag.setAttribute("name", hashtag.lstrip("#"))
tmp.appendChild(sub_tag)
else:
if tag == Tag.DESCRIPTION:
if not isinstance(val, str):
raise ValueError("Type narrowing, shouldn't be able to get here")
tmp.appendChild(doc.createTextNode(val))
root.appendChild(tmp)
# Add any remaining static parameters that Zwift is expecting
for element, val in STATIC_META_PARAMS.items():
tmp = doc.createElement(element)
tmp.appendChild(doc.createTextNode(val))
root.appendChild(tmp)
return doc
def serialize_workout_blocks(
self, doc: minidom.Document, root: minidom.Element, blocks: list[BLOCK_T]
) -> minidom.Document:
workout = doc.createElement("workout")
root.appendChild(workout)
n_blocks = len(blocks)
for idx, block in enumerate(blocks, start=1):
# Blocks only have one key, so we can dispatch serializers using the first key
block_tag = next(iter(block))
params = block[block_tag]
match block_tag:
case Tag.FREE:
block_element = self._build_simple_block(
doc, BLOCK_MAPPING[block_tag], params, add_flat_road=True
)
case Tag.SEGMENT:
block_element = self._build_simple_block(
doc, BLOCK_MAPPING[block_tag], params, add_power=True, add_pace=True
)
case Tag.RAMP | Tag.WARMUP | Tag.COOLDOWN:
zwift_key = _classify_ramp_type(idx, n_blocks)
block_element = self._build_simple_block(doc, zwift_key, params, add_pace=True)
block_element = self.serialize_ramp(block_element, params)
case Tag.INTERVALS:
block_element = self._build_simple_block(
doc,
BLOCK_MAPPING[block_tag],
params,
add_duration=False,
add_cadence=False, # Unlike the other blocks, intervals have a range
add_pace=True,
)
block_element = self.serialize_interval(block_element, params)
case _:
...
if messages := block.get(Tag.MESSAGES):
if not isinstance(messages, list):
raise ValueError("Type narrowing, shouldn't be able to get here")
block_element = self.serialize_messages(doc, block_element, messages)
workout.appendChild(block_element)
return doc
def _build_simple_block(
self,
doc: minidom.Document,
zwift_key: str,
params: PARAM_T,
add_duration: bool = True,
add_cadence: bool = True,
add_power: bool = False,
add_flat_road: bool = False,
add_pace: bool = False,
) -> minidom.Element:
block_element: minidom.Element = doc.createElement(zwift_key)
if add_duration:
block_element.setAttribute("Duration", str(params[Tag.DURATION]))
if add_cadence and (cadence := params.get(Tag.CADENCE)):
block_element.setAttribute("Cadence", str(cadence))
if add_power:
power = params[Tag.POWER]
if not isinstance(power, (int, Percentage, PowerZone)):
raise ValueError("Type narrowing, shouldn't be able to get here")
block_element.setAttribute("Power", self.serialize_power(power))
if add_flat_road:
block_element.setAttribute("FlatRoad", "0")
if add_pace:
block_element.setAttribute("pace", "0")
return block_element
def serialize_ramp(self, block_element: minidom.Element, params: PARAM_T) -> minidom.Element:
power_range = params[Tag.POWER]
if not isinstance(power_range, Range):
raise ValueError("Type narrowing, shouldn't be able to get here")
if isinstance(power_range.left, Duration) or isinstance(power_range.right, Duration):
raise ValueError("Type narrowing, shouldn't be able to get here")
block_element.setAttribute("PowerLow", self.serialize_power(power_range.left))
block_element.setAttribute("PowerHigh", self.serialize_power(power_range.right))
return block_element
def serialize_interval(
self, block_element: minidom.Element, params: PARAM_T
) -> minidom.Element:
block_element.setAttribute("Repeat", str(params[Tag.REPEAT]))
duration_range = params[Tag.DURATION]
if not isinstance(duration_range, Range):
raise ValueError("Type narrowing, shouldn't be able to get here")
block_element.setAttribute("OnDuration", str(duration_range.left))
block_element.setAttribute("OffDuration", str(duration_range.right))
power_range = params[Tag.POWER]
if not isinstance(power_range, Range):
raise ValueError("Type narrowing, shouldn't be able to get here")
if isinstance(power_range.left, Duration) or isinstance(power_range.right, Duration):
raise ValueError("Type narrowing, shouldn't be able to get here")
block_element.setAttribute("OnPower", self.serialize_power(power_range.left))
block_element.setAttribute("OffPower", self.serialize_power(power_range.right))
cadence_range = params.get(Tag.CADENCE)
if not isinstance(cadence_range, Range):
raise ValueError("Type narrowing, shouldn't be able to get here")
if cadence_range:
block_element.setAttribute("Cadence", str(cadence_range.left))
block_element.setAttribute("CadenceResting", str(cadence_range.right))
return block_element
def serialize_messages(
self, doc: minidom.Document, root: minidom.Element, messages: list[Message]
) -> minidom.Element:
for message in messages:
msg = doc.createElement("textevent")
msg.setAttribute("timeoffset", str(message.timestamp))
msg.setAttribute("message", message.message)
root.appendChild(msg)
return root
def serialize_power(self, power: int | Percentage | PowerZone) -> str:
if isinstance(power, int):
if self.ftp is None:
raise ValueError("Type narrowing, shouldn't be able to get here")
return str(power / self.ftp)
else:
return str(power)
def _classify_ramp_type(block_idx: int, n_blocks: int) -> str:
"""
Locate the appropriate Zwift block tag for the provided ramp block location.
While there is no specific Ramp block in the workout building UI, some experimental observations
have been made:
* If a ramp is at the very beginning of the workout, Zwift serializes it as a Warmup block
* If there are multiple blocks in a workout and a ramp is at the end, Zwift serializes it
as a Cooldown block
* If there are multiple blocks in a workout and a ramp is not at the beginning or the end,
Zwift serializes it as a Ramp block
"""
if block_idx == 1:
return BLOCK_MAPPING[Tag.WARMUP]
if block_idx == n_blocks:
return BLOCK_MAPPING[Tag.COOLDOWN]
else:
return BLOCK_MAPPING[Tag.RAMP]
def convert_zwom(zwom_filepath: Path, out_filepath: Path | None = None) -> None:
"""
Validate and convert the provided ZWOM file to ZWO.
If no `out_filepath` is provided, the resulting ZWO file is written to the same directory as the
input ZWOM file.
NOTE: Any existing ZWO files sharing the specified name will be overwritten.
"""
if out_filepath is None:
out_filepath = zwom_filepath.with_suffix(".zwo")
blocks = parse_src(zwom_filepath.read_text())
val = ZWOMValidator(blocks)
wo = Workout(val.validated_blocks, val._ftp)
wo.to_zwo(out_filepath)
|
zwolang
|
/zwolang-0.3.0-py3-none-any.whl/zwo/serialize.py
|
serialize.py
|
from pathlib import Path
import click
import typer
from sco1_misc import prompts
from zwo.serialize import convert_zwom
zwom_cli = typer.Typer(add_completion=False, no_args_is_help=True)
@zwom_cli.command()
def single(
zwom_file: Path = typer.Option(None, exists=True, dir_okay=False),
out_file: Path = typer.Option(None),
) -> None:
"""
Convert the specified `*.zwom` file to Zwift's `*.zwo`.
`out_file` may be optionally specified, otherwise the file will be output to the same directory
with the file extension swapped.
NOTE: Any existing `*.zwo` file with the same name will be overwritten.
"""
if zwom_file is None:
try:
zwom_file = prompts.prompt_for_file(
title="Select ZWOM file for conversion", filetypes=[("ZWOM", "*.zwom")]
)
except ValueError:
raise click.ClickException("No file selected, aborting.")
convert_zwom(zwom_file, out_file)
@zwom_cli.command()
def batch(
top_dir: Path = typer.Option(None, exists=True, dir_okay=True),
recursive: bool = typer.Option(False),
) -> None:
"""
Discover and convert all `*.zwom` files in the given directory.
NOTE: Any existing `*.zwo` file with the same name will be overwritten.
"""
if top_dir is None:
try:
top_dir = prompts.prompt_for_dir(title="Select ZWOM directory")
except ValueError:
raise click.ClickException("No directory selected, aborting.")
pattern = "*.zwom"
if recursive:
pattern = f"**/{pattern}"
for file in top_dir.glob(pattern):
convert_zwom(file)
if __name__ == "__main__": # pragma: no cover
zwom_cli()
|
zwolang
|
/zwolang-0.3.0-py3-none-any.whl/zwo/cli.py
|
cli.py
|
# Personal PDF utils
Notes:
- 使用[pdf2htmlex](https://github.com/coolwanglu/pdf2htmlEX)进行pdf->html的转换,默认使用docker镜像运行(docker pull bwits/pdf2htmlex)
|
zwpdf
|
/zwpdf-0.0.3.tar.gz/zwpdf-0.0.3/README.md
|
README.md
|
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
from codecs import open
pkg_name = 'zwpdf'
here = os.path.abspath(os.path.dirname(__file__))
packages = [pkg_name]
requires = [s.strip() for s in open('requirements.txt').readlines()]
test_requirements = [s.strip() for s in open('requirements_dev.txt').readlines()][4:]
about = {}
lines = []
with open(os.path.join(here, pkg_name, '__version__.py'), 'r', 'utf-8') as f:
exec(f.read(), about)
# auto update min version number for every dist upload
verarr = about['__version__'].split('.')
verarr[2] = str(int(verarr[2])+1)
about['__version__'] = '.'.join(verarr)
f.seek(0)
lines = f.readlines()
lines[0] = "__version__ = '%s'\n"%about['__version__']
with open(os.path.join(here, pkg_name, '__version__.py'), 'w', 'utf-8') as f:
f.writelines(lines)
with open('README.md', 'r') as f:
readme = f.read()
setup(
name=about['__title__'],
version=about['__version__'],
description=about['__description__'],
long_description=readme,
long_description_content_type='text/markdown',
author=about['__author__'],
author_email=about['__author_email__'],
url=about['__url__'],
license=about['__license__'],
packages=packages,
package_data={'': ['LICENSE', 'NOTICE']},
package_dir={pkg_name:pkg_name},
include_package_data=True,
install_requires=requires,
tests_require=test_requirements,
python_requires='>=3.6',
platforms=["all"],
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
],
)
|
zwpdf
|
/zwpdf-0.0.3.tar.gz/zwpdf-0.0.3/setup.py
|
setup.py
|
# -*- coding: utf-8 -*-
import pytest
from zwpdf.pdf import Pdf
@pytest.mark.parametrize(
't, pth, result', (
(0, 'data/test/《政策预期差》系列篇七:土地改革是下一个突破口吗-20191226-国泰君安-10页.pdf', ('title','《政策预期差》系列篇七:土地改革是下一个突破口吗')),
(1, 'data/test/巴克莱-美股-投资策略-宏观策略:数据显示市场热情尚未被激发-2019.12.10-33页.pdf', ('title', '宏观策略:数据显示市场热情尚未被激发')),
)
)
def test_meta_from_filename(t, pth, result):
p = Pdf(pth)
if t == 0:
r = r'(.+)-(\d+)-(.+)-(\d+)页'
arr = ['title', 'pub_date', 'source', 'pages']
elif t == 1:
r = r'(.+)-(.+)-(.+)-(.+)-(.+)-(\d+)页'
arr = ['source', 'cate', 'subcate', 'title', 'pub_date', 'pages']
r = p.meta_from_filename(r, arr)
assert r[result[0]] == result[1]
|
zwpdf
|
/zwpdf-0.0.3.tar.gz/zwpdf-0.0.3/tests/test_pdf.py
|
test_pdf.py
|
import pytest
|
zwpdf
|
/zwpdf-0.0.3.tar.gz/zwpdf-0.0.3/tests/conftest.py
|
conftest.py
|
import pytest
|
zwpool
|
/zwpool-0.0.2-py3-none-any.whl/tests/conftest.py
|
conftest.py
|
# -*- coding: utf-8 -*-
import pytest
import os
import sys
TEST_DIR = os.path.abspath(os.path.dirname(__file__))
PARENT_DIR = os.path.join(TEST_DIR, '..')
sys.path.insert(0, PARENT_DIR)
from zwpool.websvr import run_websvr
if __name__ == '__main__':
run_websvr()
|
zwpool
|
/zwpool-0.0.2-py3-none-any.whl/tests/test_websvr.py
|
test_websvr.py
|
# -*- coding: utf-8 -*-
import pytest
import os
import sys
TEST_DIR = os.path.abspath(os.path.dirname(__file__))
PARENT_DIR = os.path.join(TEST_DIR, '..')
sys.path.insert(0, PARENT_DIR)
from zwutils.config import Config
from zwpool.proxy.proxypool import ProxyPool
from zwutils import logger
LOG = logger.logger(__name__, cfg='conf/log.json')
if __name__ == '__main__':
cfg = Config('conf/proxypool.json', default={'db':'redis://:pwd@host:port/0'})
pp = ProxyPool(cfg)
# pp.fetch_proxy()
# proxy = pp.random_proxy()
# pp.test_proxy()
|
zwpool
|
/zwpool-0.0.2-py3-none-any.whl/tests/test_proxypool.py
|
test_proxypool.py
|
'''
zwpython
测试。。。。
'''
|
zwpy
|
/zwpy-0.1.10.tar.gz/zwpy-0.1.10/zwpy.py
|
zwpy.py
|
# zwpython
<hr>
测试中。。。
|
zwpy
|
/zwpy-0.1.10.tar.gz/zwpy-0.1.10/README.md
|
README.md
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ pipenv install twine --dev
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = 'zwpy'
# DESCRIPTION = 'zwpython(中文python),用中文编写python代码!'
# URL = 'https://tx.glsnh.cn/zwpython.html'
DESCRIPTION = 'zwpython,测试中。。。'
URL = 'https://pypi.org/project/zwpython/'
EMAIL = '[email protected]'
AUTHOR = '山东郭老师'
REQUIRES_PYTHON = '>=3.4.0'
VERSION = '0.1.10'
# What packages are required for this module to be executed?
REQUIRED = ['']
# 'requests', 'maya', 'records',
# What packages are optional?
EXTRAS = {
# 'fancy feature': ['django'],
}
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
project_slug = NAME.lower().replace("-", "_").replace(" ", "_")
with open(os.path.join(here, project_slug, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
# packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]),
# packages=find_packages(exclude=["ku_zw"]),
# If your package is a single module, use this instead of 'packages':
py_modules=['zwpy'],
# packages=["zwpython"],
# package_dir={"zwpython": "zwpython"},
# entry_points={
# 'console_scripts': ['mycli=mymodule:cli'],
# },
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license='MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
|
zwpy
|
/zwpy-0.1.10.tar.gz/zwpy-0.1.10/setup.py
|
setup.py
|
# zwpython
<hr>
测试中。。。
|
zwpython
|
/zwpython-0.1.11.tar.gz/zwpython-0.1.11/README.md
|
README.md
|
'''
zwpython
测试。。。。
'''
|
zwpython
|
/zwpython-0.1.11.tar.gz/zwpython-0.1.11/zwpython.py
|
zwpython.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ pipenv install twine --dev
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = 'zwpython'
# DESCRIPTION = 'zwpython(中文python),用中文编写python代码!'
# URL = 'https://tx.glsnh.cn/zwpython.html'
DESCRIPTION = 'zwpython,测试中。。。'
URL = 'https://pypi.org/project/zwpython/'
EMAIL = '[email protected]'
AUTHOR = '山东郭老师'
REQUIRES_PYTHON = '>=3.4.0'
VERSION = '0.1.11'
# What packages are required for this module to be executed?
REQUIRED = ['']
# 'requests', 'maya', 'records',
# What packages are optional?
EXTRAS = {
# 'fancy feature': ['django'],
}
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
project_slug = NAME.lower().replace("-", "_").replace(" ", "_")
with open(os.path.join(here, project_slug, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
# packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]),
# packages=find_packages(exclude=["ku_zw"]),
# If your package is a single module, use this instead of 'packages':
py_modules=['zwpython'],
# packages=["zwpython"],
# package_dir={"zwpython": "zwpython"},
# entry_points={
# 'console_scripts': ['mycli=mymodule:cli'],
# },
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license='MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
|
zwpython
|
/zwpython-0.1.11.tar.gz/zwpython-0.1.11/setup.py
|
setup.py
|
This is a library that returns a sequence of zero with characters, AKA as ZWSP, based on the parameters of the
function.
Start by doing this
from zero_width_char import zero_wc
char = zero_wc(count=1) # Int
print(len(char))
Yes, it's that simple.
|
zwsp-char-function
|
/zwsp_char_function-1.0.tar.gz/zwsp_char_function-1.0/README.txt
|
README.txt
|
from setuptools import setup, find_packages
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Education',
'Operating System :: Microsoft :: Windows :: Windows 10',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3'
]
setup(
name='zwsp_char_function',
version='1.0',
description='a library that returns a sequence of zero with characters',
long_description=open('README.txt').read() + '\n\n' + open('CHANGELOG.txt').read(),
url='',
author='Churning Lava',
author_email='[email protected]',
license='MIT',
classifiers=classifiers,
keywords='zero width character',
packages=['zero_width_char'],
install_requires=['']
)
|
zwsp-char-function
|
/zwsp_char_function-1.0.tar.gz/zwsp_char_function-1.0/setup.py
|
setup.py
|
def zero_wc(count=1):
# Returns a zero width character
return '' * count
|
zwsp-char-function
|
/zwsp_char_function-1.0.tar.gz/zwsp_char_function-1.0/zero_width_char/__init__.py
|
__init__.py
|
# zwsp-steg-py
Zero-Width Space Steganography. Encodes and decodes hidden messages as non printable/readable characters.
This repository is a Python porting of [zwsp-steg-js](https://github.com/offdev/zwsp-steg-js).
All credits to [offdev](https://github.com/offdev)!
### Installation
```bash
$ pip install zwsp-steg-py
```
### Usage Example
```.py
import zwsp_steg
encoded = zwsp_steg.encode('hidden message')
decoded = zwsp_steg.decode(encoded)
print(decoded) # hidden message
```
Note that decoding a message will ignore all non 'special' characters. That means if you hide your message within a readable string, and decode the whole string, you will only return the hidden message.
### Parameters
You can use different sets of characters in different encoding / decoding modes.
```.py
import zwsp_steg
zwsp_steg.encode('hidden message', zwsp_steg.MODE_ZWSP)
zwsp_steg.encode('hidden message', zwsp_steg.MODE_FULL)
```
#### Character sets used
- **MODE_ZWSP**: Zero-Width Space (\u200b), Zero-Width Non-Joiner (\u200c), Zero-Width Joiner (\u200d)
- **MODE_FULL**: All MODE_ZWSP characters, Left-To-Right Mark (\u200e), Right-To-Left Mark (\u200f)
### License
[Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0)
|
zwsp-steg-py
|
/zwsp-steg-py-0.2.0.tar.gz/zwsp-steg-py-0.2.0/README.md
|
README.md
|
from setuptools import find_packages, setup
setup(
name='zwsp-steg-py',
version=__import__('zwsp_steg').__version__,
description='Zero-Width Space Steganography, encodes/decodes hidden messages as non printable/readable characters.',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
author='Edoardo Nodari',
author_email='[email protected]',
url='https://github.com/enodari/zwsp-steg-py',
packages=find_packages(),
include_package_data=True,
license='Apache 2.0',
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.6',
],
)
|
zwsp-steg-py
|
/zwsp-steg-py-0.2.0.tar.gz/zwsp-steg-py-0.2.0/setup.py
|
setup.py
|
MODE_ZWSP = 0
MODE_FULL = 1
ZERO_WIDTH_SPACE = '\u200b'
ZERO_WIDTH_NON_JOINER = '\u200c'
ZERO_WIDTH_JOINER = '\u200d'
LEFT_TO_RIGHT_MARK = '\u200e'
RIGHT_TO_LEFT_MARK = '\u200f'
list_ZWSP = [
ZERO_WIDTH_SPACE,
ZERO_WIDTH_NON_JOINER,
ZERO_WIDTH_JOINER,
]
list_FULL = [
ZERO_WIDTH_SPACE,
ZERO_WIDTH_NON_JOINER,
ZERO_WIDTH_JOINER,
LEFT_TO_RIGHT_MARK,
RIGHT_TO_LEFT_MARK,
]
def get_padding_length(mode):
return 11 if mode == MODE_ZWSP else 7 # Keep padding as small as possible
def to_base(num, b, numerals='0123456789abcdefghijklmnopqrstuvwxyz'):
"""
Python implementation of number.toString(radix)
Thanks to jellyfishtree from https://stackoverflow.com/a/2267428
"""
return ((num == 0) and numerals[0]) or (to_base(num // b, b, numerals).lstrip(numerals[0]) + numerals[num % b])
def encode(message, mode=MODE_FULL):
if not isinstance(message, str):
raise TypeError('Cannot encode {0}'.format(type(message).__name__))
alphabet = list_ZWSP if mode == MODE_ZWSP else list_FULL
padding = get_padding_length(mode)
encoded = ''
if (len(message) == 0):
return ''
for message_char in message:
code = '{0}{1}'.format('0' * padding, int(str(to_base(ord(message_char), len(alphabet)))))
code = code[len(code) - padding:]
for code_char in code:
index = int(code_char)
encoded = encoded + alphabet[index]
return encoded
def decode(message, mode=MODE_FULL):
if not isinstance(message, str):
raise TypeError('Cannot decode {0}'.format(type(message).__name__))
alphabet = list_ZWSP if mode == MODE_ZWSP else list_FULL
padding = get_padding_length(mode)
encoded = ''
decoded = ''
for message_char in message:
if message_char in alphabet:
encoded = encoded + str(alphabet.index(message_char))
if (len(encoded) % padding != 0):
raise TypeError('Unknown encoding detected!')
cur_encoded_char = ''
for index, encoded_char in enumerate(encoded):
cur_encoded_char = cur_encoded_char + encoded_char
if index > 0 and (index + 1) % padding == 0:
decoded = decoded + chr(int(cur_encoded_char, len(alphabet)))
cur_encoded_char = ''
return decoded
|
zwsp-steg-py
|
/zwsp-steg-py-0.2.0.tar.gz/zwsp-steg-py-0.2.0/zwsp_steg/steganography.py
|
steganography.py
|
__version__ = '0.2.0'
from zwsp_steg.steganography import encode, decode, MODE_ZWSP, MODE_FULL
|
zwsp-steg-py
|
/zwsp-steg-py-0.2.0.tar.gz/zwsp-steg-py-0.2.0/zwsp_steg/__init__.py
|
__init__.py
|
'''测试一下注释
来个多行的
哈哈'''
def print_log(list_info,width):
for item in list_info:
if isinstance(item,list):
'''如果是集合递归打印'''
#print('-----------------')
print_log(item,width+5)
else:
for i in range(width):
print('\t',end='')
print(item)
|
zwtestprint
|
/zwtestprint-1.0.2.tar.gz/zwtestprint-1.0.2/zwtestprint.py
|
zwtestprint.py
|
from distutils.core import setup
setup(
name='zwtestprint',
version='1.0.2',
py_modules=['zwtestprint'],
author='zw',
author_email='[email protected]',
url='http://www.baidu.com',
description='test print list'
)
|
zwtestprint
|
/zwtestprint-1.0.2.tar.gz/zwtestprint-1.0.2/setup.py
|
setup.py
|
# -*- coding: utf-8 -*-
import pytest
from zwutils.dateutils import *
def test_find_date():
r = find_date('http://www.xinhuanet.com/politics/2020-07/21/c_1126266603.htm')
assert str(r) == '2020-07-21 00:00:00'
|
zwutils
|
/zwutils-0.2.7-py3-none-any.whl/tests/test_dateutils.py
|
test_dateutils.py
|
import pytest
|
zwutils
|
/zwutils-0.2.7-py3-none-any.whl/tests/conftest.py
|
conftest.py
|
# -*- coding: utf-8 -*-
import pytest
from zwutils.sysutils import *
def test_proc():
r = pids_by_name()
assert len(r)>0
r = pids_by_name('mongod')
assert len(r) == 1
r = pids_by_name(r'mongo.*')
assert len(r) == 1
def test_run_shell():
r = run_shell('dir', 'd:\\')
assert len(r) != 0
def test_sys_usage():
r = get_sys_usage()
assert len(r) == 2
def test_write_pidfile():
write_pidfile(dir='./pids')
assert 1
|
zwutils
|
/zwutils-0.2.7-py3-none-any.whl/tests/test_sysutils.py
|
test_sysutils.py
|
# -*- coding: utf-8 -*-
import os
import sys
TEST_DIR = os.path.abspath(os.path.dirname(__file__))
PARENT_DIR = os.path.join(TEST_DIR, '..')
sys.path.insert(0, PARENT_DIR)
from zwutils import logger
LOG = logger.logger(__name__, cfg='../zwtmp/conf/log.json', filesuffix='test')
if __name__ == '__main__':
LOG.debug('xixi')
LOG.info('haha')
|
zwutils
|
/zwutils-0.2.7-py3-none-any.whl/tests/test_logger.py
|
test_logger.py
|
# -*- coding: utf-8 -*-
import pytest
from PIL import Image
from zwutils import imageutils
def test_image_base64():
img = Image.open('data/yew.png')
img_str = imageutils.image_to_base64(img)
new_img = imageutils.base64_to_image(img_str)
new_img.show()
|
zwutils
|
/zwutils-0.2.7-py3-none-any.whl/tests/test_imageutils.py
|
test_imageutils.py
|
# -*- coding: utf-8 -*-
import pytest
import shutil
import requests
from pathlib import Path
from zwutils.mthreading import multithread_task
from zwutils.network import multithread_request
from zwutils import fileutils
class TestMP:
def test_mtask(self):
num = 100
shutil.rmtree('data', ignore_errors=True)
args = [{'path':'data/p%s.txt'%i, 'txt':i} for i in range(num)]
multithread_task(fileutils.writefile, args)
count = len( list(Path('data').glob('*.txt')) )
shutil.rmtree('data', ignore_errors=True)
assert count == num
def test_mrequest(self):
num = 3
urls = ['http://httpbin.org/get' for i in range(num)]
rtn = multithread_request(urls, params_list=[{'key':'yew', 'value':i} for i in range(num)])
assert len(rtn) == num
def test_mrequest_post(self):
num = 3
urls = ['http://httpbin.org/post' for i in range(num)]
def gen_proxy():
r = requests.get('http://66.98.114.234:13603/proxy/get')
return r.json()
settings = {
'method': 'post',
'proxies': gen_proxy,
}
rtn = multithread_request(urls, settings, json_list=[{'key':'yew', 'value':i} for i in range(num)], verify=False)
assert len(rtn) == num
|
zwutils
|
/zwutils-0.2.7-py3-none-any.whl/tests/test_mt.py
|
test_mt.py
|
# -*- coding: utf-8 -*-
import pytest
from zwutils import reutils
@pytest.mark.parametrize(
'input, result', (
('\n118.113.245.40\n8167\n高匿\nhttp\n中国\n四川省成都市\n电信\n1.379 秒\n6分钟前\n\n', '118.113.245.40'),
('\n118.113.245.40:9999\n8167\n高匿\nhttp\n中国\n四川省成都市\n电信\n1.379 秒\n6分钟前\n\n', '118.113.245.40:9999'),
('aaa118.113.245.40:9999asdfadf118.113.245.40:9999safadfaf118.113.245.40', '118.113.245.40:9999'),
)
)
def test_find_ip(input, result):
r = reutils.find_ip(input)
assert r[0] == result
@pytest.mark.parametrize(
'input, result', (
('\n118.113.245.40\n8167\n高匿\nhttp\n中国\n四川省成都市\n电信\n1.379 秒\n6分钟前\n\n', 8167),
)
)
def test_find_port(input, result):
r = reutils.find_port(input)
assert r[0] == result
@pytest.mark.parametrize(
'input, result', (
('http://wb.jiangsu.gov.cn http://www.jsfao.gov.cn', 2),
('http://www.jiangsu.gov.cn;http://www.js.gov.cn', 2),
)
)
def test_urls_from_str(input, result):
r = reutils.urls_from_str(input)
assert len(r) == result
@pytest.mark.parametrize(
's, arr, result', (
('\n\n索引号\n文号', ('索引号', '文号'), True),
('\n\n索引号 名称\n文号\n生成日期\n', ('索引号', '文号'), True),
('\n\n索引号 名称\n文号\n生成日期\n', ['索引号', '文号'], True),
('\n\n索引号 名称\n文号\n生成日期\n', ['索引号', '编号'], False),
)
)
def test_multi_match(s, arr, result):
r = reutils.multi_match(s, arr)
assert r == result
def test_htmltag_by_name():
htmlstr = '''
<div>
<iframe name="subinfo" width="100%" height="530" id="subinfo" src="/module/xxgk/subjectinfo.jsp?area=014000335" frameborder="0"></iframe>
</div>
'''
r = reutils.htmltag_by_name(htmlstr, 'iframe')
a = 0
|
zwutils
|
/zwutils-0.2.7-py3-none-any.whl/tests/test_reutils.py
|
test_reutils.py
|
# -*- coding: utf-8 -*-
import pytest
from zwutils import mprocessing as mp
def multirun_cbfunc(s):
return 'result: %s'%s
class TestMP:
def test_multicmd(self):
args = ['.', '/']
cmds = [['ls', '-l', a] for a in args]
r = mp.multiprocess_cmd(cmds)
assert len(r) == len(args)
def test_multirun(self):
num = 10
args = [(a,) for a in range(num)]
r = mp.multiprocess_run(multirun_cbfunc, args)
assert len(r) == num
|
zwutils
|
/zwutils-0.2.7-py3-none-any.whl/tests/test_mp.py
|
test_mp.py
|
# -*- coding: utf-8 -*-
import pytest
from zwutils import textutils as tu
def test_is_chinese():
assert tu.is_chinese('我') == True
assert tu.is_chinese('A') == False
assert tu.is_chinese('體') == True
assert tu.is_chinese('一') == True
@pytest.mark.parametrize(
'sentence, result', (
(' Zhaowei is NO1, 一天吃 1 顿 , 1 顿 吃 一 天 ', 'Zhaowei is NO1,一天吃1顿,1顿吃一天'),
)
)
def test_remove_space_in_sentence(sentence, result):
out = tu.remove_space_in_sentence(sentence)
assert out == result
def test_replacesequence():
ss = ' \t \n '
replacements = tu.ReplaceSequence()\
.append('\n', '\n\n')\
.append('\t')\
.append('^\\s+$')
rs = replacements.replace(ss)
assert rs == ' \n\n '
def test_inner_trim():
s = ' \tAAA BBB \n '
assert tu.inner_trim(s) == 'AAABBB'
def test_find_datestr():
r = tu.find_datestr('http://abc/20201001/abc')
assert len(r)==1 and r[0] == '2020-10-01'
r = tu.find_datestr('http://abc/2020-10-01/abc')
assert len(r)==1 and r[0] == '2020-10-01'
r = tu.find_datestr('http://abc/202010/abc')
assert len(r)==1 and r[0] == '2020-10'
r = tu.find_datestr('http://abc/2020-10/abc')
assert len(r)==1 and r[0] == '2020-10'
r = tu.find_datestr('http://sjj.jcs.gov.cn/art/2020/10/6/art_42147_519877.html')
assert len(r)==1 and r[0] == '2020-10-06'
r = tu.find_datestr('http://abc/22020-10-01/abc')
assert len(r)==0
r = tu.find_datestr('http://abc/220201001/abc')
assert len(r)==1 # TODO bug
r = tu.find_datestr('http://www.jinhu.gov.cn/col/1185_833426/art/202010/1602578455967HsLIulTb.html')
assert len(r)==1 and r[0] == '2020-10'
r = tu.find_datestr('http://da.jiangsu.gov.cn/art/2020/1/15/art_65298_8910761.html')
assert len(r)==1 and r[0] == '2020-01-15'
|
zwutils
|
/zwutils-0.2.7-py3-none-any.whl/tests/test_textutils.py
|
test_textutils.py
|
# -*- coding: utf-8 -*-
import pytest
from zwutils.config import Config
def test_config():
cfg = Config('data/test_config.json', default={'fld0':123})
assert cfg.fld0==123 and cfg.fld1=='a'
|
zwutils
|
/zwutils-0.2.7-py3-none-any.whl/tests/test_config.py
|
test_config.py
|
# -*- coding: utf-8 -*-
import pytest
import time
import zwutils.dlso as dlso
# pylint: disable=no-member
def test_dict2obj():
r = dlso.dict2obj({
'ks': 'v1',
'kn': 2,
'ka': [1, '2'],
'kd': {'1':1, '2':2},
'knone': None
})
r2 = dlso.dict2obj(None)
assert r.ks == 'v1'
def test_obj2dict():
o = type('', (), {})()
o.a1 = 'a'
o.a2 = 'b'
r = dlso.obj2dict(o)
assert r['a1'] == 'a'
def test_extend_attr():
b = {'a':'a', 'b':'b'}
e = {'b':'bb', 'c':'c', 'd':1}
o = dlso.extend_attrs(dlso.dict2obj(b), e)
assert o.b == 'bb' and o.c == 'c' and o.d == 1
o = dlso.extend_attrs(b, e)
assert o.b == 'bb' and o.c == 'c' and o.d == 1
o = dlso.extend_attrs(dlso.dict2obj(b), dlso.dict2obj(e))
assert o.b == 'bb' and o.c == 'c' and o.d == 1
o = dlso.extend_attrs(None, e)
assert o.b == 'bb' and o.c == 'c' and o.d == 1
o = dlso.extend_attrs(dlso.dict2obj(b), None)
assert o.a == 'a' and o.b == 'b'
def test_update_attrs():
b = {'a':'a', 'b':'b'}
e = {'b':'bb', 'c':'c'}
o = dlso.update_attrs(dlso.dict2obj(b), e)
assert o.b == 'bb' and not hasattr(o, 'c')
o = dlso.update_attrs(b, e)
assert o.b == 'bb' and not hasattr(o, 'c')
o = dlso.update_attrs(dlso.dict2obj(b), dlso.dict2obj(e))
assert o.b == 'bb' and not hasattr(o, 'c')
o = dlso.update_attrs(None, e)
assert not hasattr(o, 'b') and not hasattr(o, 'c')
o = dlso.update_attrs(dlso.dict2obj(b), None)
assert o.a == 'a' and o.b == 'b'
def test_upsert_config():
pcfg = type('', (), {})()
pcfg.a = 'o'
dcfg = {'a': 'd', 'da':'da', 'n1':{'nn1': {'nnn1': 'nnn1'}, 'nn2': 'nn2' } }
ncfg = {'a': 'n', 'na':'na'}
pmcfg = {'a': 'p','pa':'pa'}
cfg = dlso.upsert_config(pcfg, dcfg, ncfg, pmcfg)
assert id(cfg) == id(pcfg) and cfg.a == 'p' and hasattr(cfg, 'pa') and cfg.n1.nn1.nnn1 == 'nnn1'
|
zwutils
|
/zwutils-0.2.7-py3-none-any.whl/tests/test_dlso.py
|
test_dlso.py
|
# -*- coding: utf-8 -*-
import pytest
import time
import zwutils.comm as comm
# pylint: disable=no-member
def test_dict2attr():
r = comm.dict2attr({
'ks': 'v1',
'kn': 2,
'ka': [1, '2'],
'kd': {'1':1, '2':2},
'knone': None
})
r2 = comm.dict2attr(None)
assert r.ks == 'v1'
def test_attr2dict():
o = type('', (), {})()
o.a1 = 'a'
o.a2 = 'b'
r = comm.attr2dict(o)
assert r['a1'] == 'a'
def test_extend_attr():
b = {'a':'a', 'b':'b'}
e = {'b':'bb', 'c':'c', 'd':1}
o = comm.extend_attrs(comm.dict2attr(b), e)
assert o.b == 'bb' and o.c == 'c' and o.d == 1
o = comm.extend_attrs(b, e)
assert o.b == 'bb' and o.c == 'c' and o.d == 1
o = comm.extend_attrs(comm.dict2attr(b), comm.dict2attr(e))
assert o.b == 'bb' and o.c == 'c' and o.d == 1
o = comm.extend_attrs(None, e)
assert o.b == 'bb' and o.c == 'c' and o.d == 1
o = comm.extend_attrs(comm.dict2attr(b), None)
assert o.a == 'a' and o.b == 'b'
def test_update_attrs():
b = {'a':'a', 'b':'b'}
e = {'b':'bb', 'c':'c'}
o = comm.update_attrs(comm.dict2attr(b), e)
assert o.b == 'bb' and not hasattr(o, 'c')
o = comm.update_attrs(b, e)
assert o.b == 'bb' and not hasattr(o, 'c')
o = comm.update_attrs(comm.dict2attr(b), comm.dict2attr(e))
assert o.b == 'bb' and not hasattr(o, 'c')
o = comm.update_attrs(None, e)
assert not hasattr(o, 'b') and not hasattr(o, 'c')
o = comm.update_attrs(comm.dict2attr(b), None)
assert o.a == 'a' and o.b == 'b'
def test_contains_digits():
assert comm.contains_digits('aaabb, 332 44 -adaf')
assert not comm.contains_digits('aaabb,-adaf')
def test_print_duration():
@comm.print_duration
def test_func():
for i in range(3):
time.sleep(1)
test_func()
assert 1
def test_list_split():
r = comm.list_split(list(range(11)), 3)
assert len(r) == 3
r = comm.list_split(list(range(5)), 6)
assert len(r) == 5
def test_list_compare():
assert False == comm.list_compare([1,2,3,3], [1,2,2,3])
assert True == comm.list_compare([1,2,3], [2,1,3])
def test_upsert_config():
cfg = comm.dict2attr({'fld1':1, 'fld2':'b'})
r = comm.upsert_config(cfg, {'fld2':'bb', 'fld3':'cc'}, {'fld2':'z', 'fld4':4})
assert r.fld1 == 1 and r.fld2 == 'bb' and r.fld3 == 'cc' and r.fld4 == 4
r = comm.upsert_config(None, {'fld2':'bb', 'fld3':'cc'}, {'fld2':'z', 'fld4':4})
assert r.fld2 == 'bb' and r.fld3 == 'cc' and r.fld4 == 4
r = comm.upsert_config(None, {'fld2':'bb', 'fld3':'cc'})
assert r.fld2 == 'bb' and r.fld3 == 'cc'
cfg = comm.dict2attr({'fld1':1, 'fld2':'b'})
r = comm.upsert_config(cfg, def_val={'fld2':'z', 'fld4':4})
assert r.fld2 == 'b' and r.fld4 == 4
cfg = comm.dict2attr({'fld1':1, 'fld2':'b'})
r = comm.upsert_config(cfg, {}, {'fld':'abc', 'flddict': {'a1':1, 'a2':'b'}})
assert r.flddict.a1 == 1
|
zwutils
|
/zwutils-0.2.7-py3-none-any.whl/tests/test_comm.py
|
test_comm.py
|
# -*- coding: utf-8 -*-
import pytest
import shutil
import requests
from pathlib import Path
import zwutils.network as network
def test_get_html():
r = network.get_html('http://www.baidu.com')
assert r.startswith('<!DOCTYPE html><!--STATUS OK-->')
def test_downfile():
url = ''
r = network.downfile(url, settings={'method':'post'},data={'downames': '110000'}, filename='110000')
assert Path(r).exists()
def test_check_connect():
r = network.check_connect(['http://www.baidu.com', 'http://123.com'])
assert r[0][1] == True and r[1][1] == False
|
zwutils
|
/zwutils-0.2.7-py3-none-any.whl/tests/test_network.py
|
test_network.py
|
# -*- coding: utf-8 -*-
import pytest
import os
import struct
import shutil
from zwutils import comm
from zwutils import fileutils
def setup_module():
shutil.rmtree('data/bin', ignore_errors=True)
shutil.rmtree('data/unzip', ignore_errors=True)
fileutils.rmfile('data/zipdir.zip')
def teardown_module():
setup_module()
def test_binfile():
p = 'data/bin/binfile'
arr = [10, 20, 30, 40, 92]
dat = struct.pack('5B', *arr)
fileutils.writebin(p, dat)
s = os.path.getsize(p)
d = fileutils.readbin(p)
a = struct.unpack('5B', d)
assert s == len(arr) and len(comm.list_intersection(arr, a)) == 5
def test_md5():
md5 = fileutils.md5('docs/pytest.pdf')
assert md5 == 'd2e81dddfd92aa86233be7c18bf3b5d8'
def test_zipdir():
fileutils.zipdir('data/zipdir', exclude='*.ttt')
assert os.path.isfile('data/zipdir.zip')
def test_unzip():
fileutils.unzip('data/zipdir.zip', outdir='data/unzip')
assert os.listdir('data/unzip')
|
zwutils
|
/zwutils-0.2.7-py3-none-any.whl/tests/test_fileutils.py
|
test_fileutils.py
|
# -*- coding: utf-8 -*-
import pytest
from bs4 import BeautifulSoup
from zwutils import htmlutils
from zwutils import fileutils
def test_find_soup_parent():
htmlstr = '''
<table class="myclass otherclass">
<thead></thead>
<tbody>
<tr><td id="a"></td></tr>
<tr></tr>
</tbody>
</table>
'''
el = BeautifulSoup(htmlstr, features='lxml')
el = el.find(id='a')
r = htmlutils.find_soup_parent(el, tagnm='table')
assert r and r.name == 'table'
r = htmlutils.find_soup_parent(el, attrs={'class': 'myclass'})
assert r and r.name == 'table'
r = htmlutils.find_soup_parent(el, tagnm='table', attrs={'class': 'myclass'})
assert r and r.name == 'table'
def test_find_soup_next_sibling():
htmlstr = '''
<table>
<thead></thead>
<tbody>
<tr><td id="a">label</td><td>text1</td><td class="myclass otherclass">text2</td></tr>
<tr></tr>
</tbody>
</table>
'''
el = BeautifulSoup(htmlstr, features='lxml')
el = el.find(id='a')
r = htmlutils.find_soup_next_sibling(el, tagnm='td')
assert r and r.text == 'text1'
r = htmlutils.find_soup_next_sibling(el, attrs={'class': 'myclass'})
assert r and r.text == 'text2'
r = htmlutils.find_soup_next_sibling(el, tagnm='td', attrs={'class': 'myclass'})
assert r and r.text == 'text2'
def test_soup_depth_count():
htmlstr = '''
<table>
<thead></thead>
<tbody>
<tr id="tr"><td id="td">label</td></tr>
<tr></tr>
</tbody>
</table>
'''
soup = BeautifulSoup(htmlstr, features='lxml')
el = soup.find(id='td')
r = htmlutils.soup_depth_count(el)
assert r == 3
soup = BeautifulSoup(htmlstr, features='lxml')
el = soup.find(id='tr')
r = htmlutils.soup_depth_count(el, 'html')
assert r == 3
def test_soup_calc_child():
htmlstr = '''
<table>
<thead></thead>
<tbody>
<tr id="tr"><td id="td">label</td><td></td></tr>
<tr></tr>
</tbody>
</table>
'''
soup = BeautifulSoup(htmlstr, features='lxml')
r = htmlutils.soup_calc_child(soup, 'td')
assert r[2]['child_count'] == 2 and r[2]['depth_count'] == 2
def test_soup_calc_word():
htmlstr = '''
<html>
<head>HAHA</head>
<body>
<table>
<thead></thead>
<tbody>
<tr><td>这是标题,不是主体</td></tr>
<tr></tr>
</tbody>
</table>
<table>
<thead>这是header,也不是主体</thead>
<tbody>
<tr><td>这是主体了</td></tr>
<tr><td>厅机关各处室,中心、所:</td></tr>
<tr><td>根据军队转业干部接收安置有关政策,经厅党组研究决定,确定姜学明同志为四级调研员。</td></tr>
<tr><td>中共江苏省商务厅党组</td></tr>
<tr><td>2020年2月13日</td></tr>
<tr><td>这是主体了</td></tr>
<tr></tr>
</tbody>
</table>
<div>这仍然不是主体</div>
</body>
</html>
'''
soup = BeautifulSoup(htmlstr, features='lxml')
r = htmlutils.soup_calc_word(soup)
arr = sorted(r, key=lambda o: o['gscore'], reverse=True)
assert 1
def test_soup_drop_tag():
htmlstr = '''
<div>
<p>abc</p>
456
<em>
def
<p>789</p>
<p>ghi</p>
</em>
<p>xxx</p>
zzz
</div>
'''
soup = BeautifulSoup(htmlstr, features='lxml')
# fileutils.writefile('bef.html', str(soup))
htmlutils.soup_drop_tag(soup.find('em'))
# fileutils.writefile('aft.html', str(soup))
assert 1
|
zwutils
|
/zwutils-0.2.7-py3-none-any.whl/tests/test_htmlutils.py
|
test_htmlutils.py
|
# -*- coding: utf-8 -*-
import pytest
from zwutils.urlutils import *
def test_urlutils():
d = ('http://a.com/b?p1=1&p2=2', 'http://a.com/b')
assert remove_args(d[0]) == d[1]
d = ('http://a.com/b?url=http%3A%2F%2Fr.com', 'http://r.com')
assert get_redirect(d[0]) == d[1]
d = ('http://a.com/b', 'a.com')
assert get_domain(d[0]) == d[1]
d = ('http://a.com/b', 'http')
assert get_scheme(d[0]) == d[1]
d = ('http://a.com/b/c?p1=1', '/b/c')
assert get_path(d[0]) == d[1]
d = ('http://a.com/b/c?p1=1', True)
assert is_abs_url(d[0]) == d[1]
d = ('b/c?p1=1', False)
assert is_abs_url(d[0]) == d[1]
d = ('http://blahblah/images/car.jpg', 'jpg')
assert url_to_filetype(d[0]) == d[1]
d = ('http://a.com/b/c?p1=1', 'http://a.com')
assert get_base(d[0]) == d[1]
d = ('http://example.com/../thing///wrong/../multiple-slashes-yeah/.', 'http://example.com/thing///multiple-slashes-yeah/')
assert resolve_url(d[0]) == d[1]
d = ('http://blahblah/images/car.jpg', True)
assert is_url_image(d[0]) == d[1]
d = ( ['abc.yew.com', 'abc.def.yew.com'], 2)
assert subdomain_compare(d[0][0], d[0][1]) == d[1]
d = ( 'http://scjg.taizhou.gov.cn/', '47.96.196.4')
assert domain2ip(d[0]) == d[1]
d = ( 'scjg.taizhou.gov.cn', '47.96.196.4')
assert domain2ip(d[0]) == d[1]
def test_slugify():
s = slugify('UK vows action after record-high migrant crossing of Channel')
assert s == 'uk-vows-action-after-record-high-migrant-crossing-of-channel'
s = slugify('ABC 走向 我们的小康生活')
assert s == 'abc-走向-我们的小康生活'
|
zwutils
|
/zwutils-0.2.7-py3-none-any.whl/tests/test_urlutils.py
|
test_urlutils.py
|
# -*- coding: utf-8 -*-
import os
import sys
import time
import logging
TEST_DIR = os.path.abspath(os.path.dirname(__file__))
PARENT_DIR = os.path.join(TEST_DIR, '..')
sys.path.insert(0, PARENT_DIR)
from zwutils.zwtask import ZWTask
def test(task, a, b):
try:
while True:
task.log(logging.INFO, 'haha')
time.sleep(3)
except Exception as ex:
print(ex)
if __name__ == '__main__':
# task = ZWTask(target=test, name='yewtest', args=(1, 2), c2server='http://localhost:8080/api/spider/status')
# task.start()
# task.join()
args = [(i, i+1) for i in range(3)]
ZWTask.run_processes(target=test, args_list=args, max_size=1, c2server='http://localhost:8080/api/spider/status')
|
zwutils
|
/zwutils-0.2.7-py3-none-any.whl/tests/test_zwtask.py
|
test_zwtask.py
|
# -*- coding: utf-8 -*-
import os
import sys
import time
import logging
TEST_DIR = os.path.abspath(os.path.dirname(__file__))
PARENT_DIR = os.path.join(TEST_DIR, '..')
sys.path.insert(0, PARENT_DIR)
from zwutils.zwtask import ZWTask
if __name__ == '__main__':
pypth, infos = ZWTask.stop_processes()
print(pypth)
for p in infos:
print('%d %s'%(p['pid'], p['cmd']))
|
zwutils
|
/zwutils-0.2.7-py3-none-any.whl/tests/test_zwtask2.py
|
test_zwtask2.py
|
from distutils.core import setup
from setuptools import setup, find_packages
from configparser import ConfigParser
# note: all settings are in settings.ini; edit there, not here
config = ConfigParser(delimiters=['='])
config.read('settings.ini')
cfg = config['DEFAULT']
cfg_keys = 'version description keywords author author_email'.split()
expected = cfg_keys + "lib_name user branch license status min_python audience language".split()
for o in expected: assert o in cfg, "missing expected setting: {}".format(o)
setup_cfg = {o:cfg[o] for o in cfg_keys}
statuses = [ '1 - Planning', '2 - Pre-Alpha', '3 - Alpha',
'4 - Beta', '5 - Production/Stable', '6 - Mature', '7 - Inactive' ]
py_versions = '2.0 2.1 2.2 2.3 2.4 2.5 2.6 2.7 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8'.split()
min_python = cfg['min_python']
licenses = {
'apache2': ('Apache Software License 2.0','OSI Approved :: Apache Software License'),
}
lic = licenses[cfg['license']]
requirements = ['pip', 'packaging']
if cfg.get('requirements'): requirements += cfg.get('requirements','').split()
if cfg.get('pip_requirements'): requirements += cfg.get('pip_requirements','').split()
dev_requirements = (cfg.get('dev_requirements') or '').split()
setup(
name = cfg['lib_name'],
license = lic[0],
classifiers = [
'Development Status :: ' + statuses[int(cfg['status'])],
'Intended Audience :: ' + cfg['audience'].title(),
'License :: ' + lic[1],
'Natural Language :: ' + cfg['language'].title(),
] + ['Programming Language :: Python :: '+o for o in py_versions[py_versions.index(min_python):]],
# description = 'many function for myself to advance my coding',
long_description = open(cfg['long_description'],'r',encoding='utf8').read(),
long_description_content_type = 'text/markdown',
python_require=">=3.5",
install_requires = requirements,
extras_require={ 'dev': dev_requirements },
packages = find_packages("src"),# 需要打包的package,使用find_packages 来动态获取package,exclude参数的存在,使打包的时候,排除掉这些文件
platforms = 'any',
package_dir={"":"src"},
entry_points = { 'console_scripts': cfg.get('console_scripts','').split() },
include_package_data = True,
**setup_cfg
)
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/setup.py
|
setup.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01_layers.ipynb (unless otherwise specified).
__all__ = ['module', 'Identity', 'Lambda', 'PartialLambda', 'Flatten', 'View', 'ResizeBatch', 'Debugger',
'sigmoid_range', 'SigmoidRange', 'AdaptiveConcatPool1d', 'AdaptiveConcatPool2d', 'PoolType', 'adaptive_pool',
'PoolFlatten', 'NormType', 'BatchNorm', 'InstanceNorm', 'BatchNorm1dFlat', 'LinBnDrop', 'sigmoid',
'sigmoid_', 'vleaky_relu', 'init_default', 'init_linear', 'ConvLayer', 'AdaptiveAvgPool', 'MaxPool',
'AvgPool', 'trunc_normal_', 'Embedding', 'SelfAttention', 'PooledSelfAttention2d', 'SimpleSelfAttention',
'icnr_init', 'PixelShuffle_ICNR', 'sequential', 'SequentialEx', 'MergeLayer', 'Cat', 'SimpleCNN',
'ProdLayer', 'inplace_relu', 'SEModule', 'ResBlock', 'SEBlock', 'SEResNeXtBlock', 'SeparableBlock', 'swish',
'Swish', 'MishJitAutoFn', 'mish', 'Mish', 'ParameterModule', 'children_and_parameters', 'has_children',
'flatten_model', 'NoneReduce', 'in_channels']
# Cell
from .imports import *
from .torch_imports import *
from .torch_core import *
from torch.nn.utils import weight_norm, spectral_norm
# Cell
def module(*flds, **defaults):
"Decorator to create an `nn.Module` using `f` as `forward` method"
pa = [inspect.Parameter(o, inspect.Parameter.POSITIONAL_OR_KEYWORD) for o in flds]
pb = [inspect.Parameter(k, inspect.Parameter.POSITIONAL_OR_KEYWORD, default=v)
for k,v in defaults.items()]
params = pa+pb
all_flds = [*flds,*defaults.keys()]
def _f(f):
class c(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
for i,o in enumerate(args): kwargs[all_flds[i]] = o
kwargs = merge(defaults,kwargs)
for k,v in kwargs.items(): setattr(self,k,v)
__repr__ = basic_repr(all_flds)
forward = f
c.__signature__ = inspect.Signature(params)
c.__name__ = c.__qualname__ = f.__name__
c.__doc__ = f.__doc__
return c
return _f
# Cell
@module()
def Identity(self, x):
"Do nothing at all"
return x
# Cell
@module('func')
def Lambda(self, x):
"An easy way to create a pytorch layer for a simple `func`"
return self.func(x)
# Cell
class PartialLambda(Lambda):
"Layer that applies `partial(func, **kwargs)`"
def __init__(self, func, **kwargs):
super().__init__(partial(func, **kwargs))
self.repr = f'{func.__name__}, {kwargs}'
def forward(self, x): return self.func(x)
def __repr__(self): return f'{self.__class__.__name__}({self.repr})'
# Cell
@module(full=False)
def Flatten(self, x):
"Flatten `x` to a single dimension, e.g. at end of a model. `full` for rank-1 tensor"
return TensorBase(x.view(-1) if self.full else x.view(x.size(0), -1))
# Cell
class View(Module):
"Reshape `x` to `size`"
def __init__(self, *size): self.size = size
def forward(self, x): return x.view(self.size)
# Cell
class ResizeBatch(Module):
"Reshape `x` to `size`, keeping batch dim the same size"
def __init__(self, *size): self.size = size
def forward(self, x): return x.view((x.size(0),) + self.size)
# Cell
@module()
def Debugger(self,x):
"A module to debug inside a model."
set_trace()
return x
# Cell
def sigmoid_range(x, low, high):
"Sigmoid function with range `(low, high)`"
return torch.sigmoid(x) * (high - low) + low
# Cell
@module('low','high')
def SigmoidRange(self, x):
"Sigmoid module with range `(low, high)`"
return sigmoid_range(x, self.low, self.high)
# Cell
class AdaptiveConcatPool1d(Module):
"Layer that concats `AdaptiveAvgPool1d` and `AdaptiveMaxPool1d`"
def __init__(self, size=None):
self.size = size or 1
self.ap = nn.AdaptiveAvgPool1d(self.size)
self.mp = nn.AdaptiveMaxPool1d(self.size)
def forward(self, x): return torch.cat([self.mp(x), self.ap(x)], 1)
# Cell
class AdaptiveConcatPool2d(Module):
"Layer that concats `AdaptiveAvgPool2d` and `AdaptiveMaxPool2d`"
def __init__(self, size=None):
self.size = size or 1
self.ap = nn.AdaptiveAvgPool2d(self.size)
self.mp = nn.AdaptiveMaxPool2d(self.size)
def forward(self, x): return torch.cat([self.mp(x), self.ap(x)], 1)
# Cell
class PoolType: Avg,Max,Cat = 'Avg','Max','Cat'
# Cell
def adaptive_pool(pool_type):
return nn.AdaptiveAvgPool2d if pool_type=='Avg' else nn.AdaptiveMaxPool2d if pool_type=='Max' else AdaptiveConcatPool2d
# Cell
class PoolFlatten(nn.Sequential):
"Combine `nn.AdaptiveAvgPool2d` and `Flatten`."
def __init__(self, pool_type=PoolType.Avg): super().__init__(adaptive_pool(pool_type)(1), Flatten())
# Cell
NormType = Enum('NormType', 'Batch BatchZero Weight Spectral Instance InstanceZero')
# Cell
def _get_norm(prefix, nf, ndim=2, zero=False, **kwargs):
"Norm layer with `nf` features and `ndim` initialized depending on `norm_type`."
assert 1 <= ndim <= 3
bn = getattr(nn, f"{prefix}{ndim}d")(nf, **kwargs)
if bn.affine:
bn.bias.data.fill_(1e-3)
bn.weight.data.fill_(0. if zero else 1.)
return bn
# Cell
@delegates(nn.BatchNorm2d)
def BatchNorm(nf, ndim=2, norm_type=NormType.Batch, **kwargs):
"BatchNorm layer with `nf` features and `ndim` initialized depending on `norm_type`."
return _get_norm('BatchNorm', nf, ndim, zero=norm_type==NormType.BatchZero, **kwargs)
# Cell
@delegates(nn.InstanceNorm2d)
def InstanceNorm(nf, ndim=2, norm_type=NormType.Instance, affine=True, **kwargs):
"InstanceNorm layer with `nf` features and `ndim` initialized depending on `norm_type`."
return _get_norm('InstanceNorm', nf, ndim, zero=norm_type==NormType.InstanceZero, affine=affine, **kwargs)
# Cell
class BatchNorm1dFlat(nn.BatchNorm1d):
"`nn.BatchNorm1d`, but first flattens leading dimensions"
def forward(self, x):
if x.dim()==2: return super().forward(x)
*f,l = x.shape
x = x.contiguous().view(-1,l)
return super().forward(x).view(*f,l)
# Cell
class LinBnDrop(nn.Sequential):
"Module grouping `BatchNorm1d`, `Dropout` and `Linear` layers"
def __init__(self, n_in, n_out, bn=True, p=0., act=None, lin_first=False):
layers = [BatchNorm(n_out if lin_first else n_in, ndim=1)] if bn else []
if p != 0: layers.append(nn.Dropout(p))
lin = [nn.Linear(n_in, n_out, bias=not bn)]
if act is not None: lin.append(act)
layers = lin+layers if lin_first else layers+lin
super().__init__(*layers)
# Cell
def sigmoid(input, eps=1e-7):
"Same as `torch.sigmoid`, plus clamping to `(eps,1-eps)"
return input.sigmoid().clamp(eps,1-eps)
# Cell
def sigmoid_(input, eps=1e-7):
"Same as `torch.sigmoid_`, plus clamping to `(eps,1-eps)"
return input.sigmoid_().clamp_(eps,1-eps)
# Cell
from torch.nn.init import kaiming_uniform_,uniform_,xavier_uniform_,normal_
# Cell
def vleaky_relu(input, inplace=True):
"`F.leaky_relu` with 0.3 slope"
return F.leaky_relu(input, negative_slope=0.3, inplace=inplace)
# Cell
for o in F.relu,nn.ReLU,F.relu6,nn.ReLU6,F.leaky_relu,nn.LeakyReLU:
o.__default_init__ = kaiming_uniform_
# Cell
for o in F.sigmoid,nn.Sigmoid,F.tanh,nn.Tanh,sigmoid,sigmoid_:
o.__default_init__ = xavier_uniform_
# Cell
def init_default(m, func=nn.init.kaiming_normal_):
"Initialize `m` weights with `func` and set `bias` to 0."
if func and hasattr(m, 'weight'): func(m.weight)
with torch.no_grad():
if getattr(m, 'bias', None) is not None: m.bias.fill_(0.)
return m
# Cell
def init_linear(m, act_func=None, init='auto', bias_std=0.01):
if getattr(m,'bias',None) is not None and bias_std is not None:
if bias_std != 0: normal_(m.bias, 0, bias_std)
else: m.bias.data.zero_()
if init=='auto':
if act_func in (F.relu_,F.leaky_relu_): init = kaiming_uniform_
else: init = getattr(act_func.__class__, '__default_init__', None)
if init is None: init = getattr(act_func, '__default_init__', None)
if init is not None: init(m.weight)
# Cell
def _conv_func(ndim=2, transpose=False):
"Return the proper conv `ndim` function, potentially `transposed`."
assert 1 <= ndim <=3
return getattr(nn, f'Conv{"Transpose" if transpose else ""}{ndim}d')
# Cell
defaults.activation=nn.ReLU
# Cell
class ConvLayer(nn.Sequential):
"Create a sequence of convolutional (`ni` to `nf`), ReLU (if `use_activ`) and `norm_type` layers."
@delegates(nn.Conv2d)
def __init__(self, ni, nf, ks=3, stride=1, padding=None, bias=None, ndim=2, norm_type=NormType.Batch, bn_1st=True,
act_cls=defaults.activation, transpose=False, init='auto', xtra=None, bias_std=0.01, **kwargs):
if padding is None: padding = ((ks-1)//2 if not transpose else 0)
bn = norm_type in (NormType.Batch, NormType.BatchZero)
inn = norm_type in (NormType.Instance, NormType.InstanceZero)
if bias is None: bias = not (bn or inn)
conv_func = _conv_func(ndim, transpose=transpose)
conv = conv_func(ni, nf, kernel_size=ks, bias=bias, stride=stride, padding=padding, **kwargs)
act = None if act_cls is None else act_cls()
init_linear(conv, act, init=init, bias_std=bias_std)
if norm_type==NormType.Weight: conv = weight_norm(conv)
elif norm_type==NormType.Spectral: conv = spectral_norm(conv)
layers = [conv]
act_bn = []
if act is not None: act_bn.append(act)
if bn: act_bn.append(BatchNorm(nf, norm_type=norm_type, ndim=ndim))
if inn: act_bn.append(InstanceNorm(nf, norm_type=norm_type, ndim=ndim))
if bn_1st: act_bn.reverse()
layers += act_bn
if xtra: layers.append(xtra)
super().__init__(*layers)
# Cell
def AdaptiveAvgPool(sz=1, ndim=2):
"nn.AdaptiveAvgPool layer for `ndim`"
assert 1 <= ndim <= 3
return getattr(nn, f"AdaptiveAvgPool{ndim}d")(sz)
# Cell
def MaxPool(ks=2, stride=None, padding=0, ndim=2, ceil_mode=False):
"nn.MaxPool layer for `ndim`"
assert 1 <= ndim <= 3
return getattr(nn, f"MaxPool{ndim}d")(ks, stride=stride, padding=padding)
# Cell
def AvgPool(ks=2, stride=None, padding=0, ndim=2, ceil_mode=False):
"nn.AvgPool layer for `ndim`"
assert 1 <= ndim <= 3
return getattr(nn, f"AvgPool{ndim}d")(ks, stride=stride, padding=padding, ceil_mode=ceil_mode)
# Cell
def trunc_normal_(x, mean=0., std=1.):
"Truncated normal initialization (approximation)"
# From https://discuss.pytorch.org/t/implementing-truncated-normal-initializer/4778/12
return x.normal_().fmod_(2).mul_(std).add_(mean)
# Cell
class Embedding(nn.Embedding):
"Embedding layer with truncated normal initialization"
def __init__(self, ni, nf, std=0.01):
super().__init__(ni, nf)
trunc_normal_(self.weight.data, std=std)
# Cell
class SelfAttention(Module):
"Self attention layer for `n_channels`."
def __init__(self, n_channels):
self.query,self.key,self.value = [self._conv(n_channels, c) for c in (n_channels//8,n_channels//8,n_channels)]
self.gamma = nn.Parameter(tensor([0.]))
def _conv(self,n_in,n_out):
return ConvLayer(n_in, n_out, ks=1, ndim=1, norm_type=NormType.Spectral, act_cls=None, bias=False)
def forward(self, x):
#Notation from the paper.
size = x.size()
x = x.view(*size[:2],-1)
f,g,h = self.query(x),self.key(x),self.value(x)
beta = F.softmax(torch.bmm(f.transpose(1,2), g), dim=1)
o = self.gamma * torch.bmm(h, beta) + x
return o.view(*size).contiguous()
# Cell
class PooledSelfAttention2d(Module):
"Pooled self attention layer for 2d."
def __init__(self, n_channels):
self.n_channels = n_channels
self.query,self.key,self.value = [self._conv(n_channels, c) for c in (n_channels//8,n_channels//8,n_channels//2)]
self.out = self._conv(n_channels//2, n_channels)
self.gamma = nn.Parameter(tensor([0.]))
def _conv(self,n_in,n_out):
return ConvLayer(n_in, n_out, ks=1, norm_type=NormType.Spectral, act_cls=None, bias=False)
def forward(self, x):
n_ftrs = x.shape[2]*x.shape[3]
f = self.query(x).view(-1, self.n_channels//8, n_ftrs)
g = F.max_pool2d(self.key(x), [2,2]).view(-1, self.n_channels//8, n_ftrs//4)
h = F.max_pool2d(self.value(x), [2,2]).view(-1, self.n_channels//2, n_ftrs//4)
beta = F.softmax(torch.bmm(f.transpose(1, 2), g), -1)
o = self.out(torch.bmm(h, beta.transpose(1,2)).view(-1, self.n_channels//2, x.shape[2], x.shape[3]))
return self.gamma * o + x
# Cell
def _conv1d_spect(ni:int, no:int, ks:int=1, stride:int=1, padding:int=0, bias:bool=False):
"Create and initialize a `nn.Conv1d` layer with spectral normalization."
conv = nn.Conv1d(ni, no, ks, stride=stride, padding=padding, bias=bias)
nn.init.kaiming_normal_(conv.weight)
if bias: conv.bias.data.zero_()
return spectral_norm(conv)
# Cell
class SimpleSelfAttention(Module):
def __init__(self, n_in:int, ks=1, sym=False):
self.sym,self.n_in = sym,n_in
self.conv = _conv1d_spect(n_in, n_in, ks, padding=ks//2, bias=False)
self.gamma = nn.Parameter(tensor([0.]))
def forward(self,x):
if self.sym:
c = self.conv.weight.view(self.n_in,self.n_in)
c = (c + c.t())/2
self.conv.weight = c.view(self.n_in,self.n_in,1)
size = x.size()
x = x.view(*size[:2],-1)
convx = self.conv(x)
xxT = torch.bmm(x,x.permute(0,2,1).contiguous())
o = torch.bmm(xxT, convx)
o = self.gamma * o + x
return o.view(*size).contiguous()
# Cell
def icnr_init(x, scale=2, init=nn.init.kaiming_normal_):
"ICNR init of `x`, with `scale` and `init` function"
ni,nf,h,w = x.shape
ni2 = int(ni/(scale**2))
k = init(x.new_zeros([ni2,nf,h,w])).transpose(0, 1)
k = k.contiguous().view(ni2, nf, -1)
k = k.repeat(1, 1, scale**2)
return k.contiguous().view([nf,ni,h,w]).transpose(0, 1)
# Cell
class PixelShuffle_ICNR(nn.Sequential):
"Upsample by `scale` from `ni` filters to `nf` (default `ni`), using `nn.PixelShuffle`."
def __init__(self, ni, nf=None, scale=2, blur=False, norm_type=NormType.Weight, act_cls=defaults.activation):
super().__init__()
nf = ifnone(nf, ni)
layers = [ConvLayer(ni, nf*(scale**2), ks=1, norm_type=norm_type, act_cls=act_cls, bias_std=0),
nn.PixelShuffle(scale)]
layers[0][0].weight.data.copy_(icnr_init(layers[0][0].weight.data))
if blur: layers += [nn.ReplicationPad2d((1,0,1,0)), nn.AvgPool2d(2, stride=1)]
super().__init__(*layers)
# Cell
def sequential(*args):
"Create an `nn.Sequential`, wrapping items with `Lambda` if needed"
if len(args) != 1 or not isinstance(args[0], OrderedDict):
args = list(args)
for i,o in enumerate(args):
if not isinstance(o,nn.Module): args[i] = Lambda(o)
return nn.Sequential(*args)
# Cell
class SequentialEx(Module):
"Like `nn.Sequential`, but with ModuleList semantics, and can access module input"
def __init__(self, *layers): self.layers = nn.ModuleList(layers)
def forward(self, x):
res = x
for l in self.layers:
res.orig = x
nres = l(res)
# We have to remove res.orig to avoid hanging refs and therefore memory leaks
res.orig, nres.orig = None, None
res = nres
return res
def __getitem__(self,i): return self.layers[i]
def append(self,l): return self.layers.append(l)
def extend(self,l): return self.layers.extend(l)
def insert(self,i,l): return self.layers.insert(i,l)
# Cell
class MergeLayer(Module):
"Merge a shortcut with the result of the module by adding them or concatenating them if `dense=True`."
def __init__(self, dense:bool=False): self.dense=dense
def forward(self, x): return torch.cat([x,x.orig], dim=1) if self.dense else (x+x.orig)
# Cell
class Cat(nn.ModuleList):
"Concatenate layers outputs over a given dim"
def __init__(self, layers, dim=1):
self.dim=dim
super().__init__(layers)
def forward(self, x): return torch.cat([l(x) for l in self], dim=self.dim)
# Cell
class SimpleCNN(nn.Sequential):
"Create a simple CNN with `filters`."
def __init__(self, filters, kernel_szs=None, strides=None, bn=True):
nl = len(filters)-1
kernel_szs = ifnone(kernel_szs, [3]*nl)
strides = ifnone(strides , [2]*nl)
layers = [ConvLayer(filters[i], filters[i+1], kernel_szs[i], stride=strides[i],
norm_type=(NormType.Batch if bn and i<nl-1 else None)) for i in range(nl)]
layers.append(PoolFlatten())
super().__init__(*layers)
# Cell
class ProdLayer(Module):
"Merge a shortcut with the result of the module by multiplying them."
def forward(self, x): return x * x.orig
# Cell
inplace_relu = partial(nn.ReLU, inplace=True)
# Cell
def SEModule(ch, reduction, act_cls=defaults.activation):
nf = math.ceil(ch//reduction/8)*8
return SequentialEx(nn.AdaptiveAvgPool2d(1),
ConvLayer(ch, nf, ks=1, norm_type=None, act_cls=act_cls),
ConvLayer(nf, ch, ks=1, norm_type=None, act_cls=nn.Sigmoid),
ProdLayer())
# Cell
class ResBlock(Module):
"Resnet block from `ni` to `nh` with `stride`"
@delegates(ConvLayer.__init__)
def __init__(self, expansion, ni, nf, stride=1, groups=1, reduction=None, nh1=None, nh2=None, dw=False, g2=1,
sa=False, sym=False, norm_type=NormType.Batch, act_cls=defaults.activation, ndim=2, ks=3,
pool=AvgPool, pool_first=True, **kwargs):
norm2 = (NormType.BatchZero if norm_type==NormType.Batch else
NormType.InstanceZero if norm_type==NormType.Instance else norm_type)
if nh2 is None: nh2 = nf
if nh1 is None: nh1 = nh2
nf,ni = nf*expansion,ni*expansion
k0 = dict(norm_type=norm_type, act_cls=act_cls, ndim=ndim, **kwargs)
k1 = dict(norm_type=norm2, act_cls=None, ndim=ndim, **kwargs)
convpath = [ConvLayer(ni, nh2, ks, stride=stride, groups=ni if dw else groups, **k0),
ConvLayer(nh2, nf, ks, groups=g2, **k1)
] if expansion == 1 else [
ConvLayer(ni, nh1, 1, **k0),
ConvLayer(nh1, nh2, ks, stride=stride, groups=nh1 if dw else groups, **k0),
ConvLayer(nh2, nf, 1, groups=g2, **k1)]
if reduction: convpath.append(SEModule(nf, reduction=reduction, act_cls=act_cls))
if sa: convpath.append(SimpleSelfAttention(nf,ks=1,sym=sym))
self.convpath = nn.Sequential(*convpath)
idpath = []
if ni!=nf: idpath.append(ConvLayer(ni, nf, 1, act_cls=None, ndim=ndim, **kwargs))
if stride!=1: idpath.insert((1,0)[pool_first], pool(stride, ndim=ndim, ceil_mode=True))
self.idpath = nn.Sequential(*idpath)
self.act = defaults.activation(inplace=True) if act_cls is defaults.activation else act_cls()
def forward(self, x): return self.act(self.convpath(x) + self.idpath(x))
# Cell
def SEBlock(expansion, ni, nf, groups=1, reduction=16, stride=1, **kwargs):
return ResBlock(expansion, ni, nf, stride=stride, groups=groups, reduction=reduction, nh1=nf*2, nh2=nf*expansion, **kwargs)
# Cell
def SEResNeXtBlock(expansion, ni, nf, groups=32, reduction=16, stride=1, base_width=4, **kwargs):
w = math.floor(nf * (base_width / 64)) * groups
return ResBlock(expansion, ni, nf, stride=stride, groups=groups, reduction=reduction, nh2=w, **kwargs)
# Cell
def SeparableBlock(expansion, ni, nf, reduction=16, stride=1, base_width=4, **kwargs):
return ResBlock(expansion, ni, nf, stride=stride, reduction=reduction, nh2=nf*2, dw=True, **kwargs)
# Cell
from torch.jit import script
# Cell
@script
def _swish_jit_fwd(x): return x.mul(torch.sigmoid(x))
@script
def _swish_jit_bwd(x, grad_output):
x_sigmoid = torch.sigmoid(x)
return grad_output * (x_sigmoid * (1 + x * (1 - x_sigmoid)))
class _SwishJitAutoFn(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return _swish_jit_fwd(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_variables[0]
return _swish_jit_bwd(x, grad_output)
# Cell
def swish(x, inplace=False): return _SwishJitAutoFn.apply(x)
# Cell
class Swish(Module):
def forward(self, x): return _SwishJitAutoFn.apply(x)
# Cell
@script
def _mish_jit_fwd(x): return x.mul(torch.tanh(F.softplus(x)))
@script
def _mish_jit_bwd(x, grad_output):
x_sigmoid = torch.sigmoid(x)
x_tanh_sp = F.softplus(x).tanh()
return grad_output.mul(x_tanh_sp + x * x_sigmoid * (1 - x_tanh_sp * x_tanh_sp))
class MishJitAutoFn(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return _mish_jit_fwd(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_variables[0]
return _mish_jit_bwd(x, grad_output)
# Cell
def mish(x): return MishJitAutoFn.apply(x)
# Cell
class Mish(Module):
def forward(self, x): return MishJitAutoFn.apply(x)
# Cell
for o in swish,Swish,mish,Mish: o.__default_init__ = kaiming_uniform_
# Cell
class ParameterModule(Module):
"Register a lone parameter `p` in a module."
def __init__(self, p): self.val = p
def forward(self, x): return x
# Cell
def children_and_parameters(m):
"Return the children of `m` and its direct parameters not registered in modules."
children = list(m.children())
children_p = sum([[id(p) for p in c.parameters()] for c in m.children()],[])
for p in m.parameters():
if id(p) not in children_p: children.append(ParameterModule(p))
return children
# Cell
def has_children(m):
try: next(m.children())
except StopIteration: return False
return True
# Cell
def flatten_model(m):
"Return the list of all submodules and parameters of `m`"
return sum(map(flatten_model,children_and_parameters(m)),[]) if has_children(m) else [m]
# Cell
class NoneReduce():
"A context manager to evaluate `loss_func` with none reduce."
def __init__(self, loss_func): self.loss_func,self.old_red = loss_func,None
def __enter__(self):
if hasattr(self.loss_func, 'reduction'):
self.old_red = self.loss_func.reduction
self.loss_func.reduction = 'none'
return self.loss_func
else: return partial(self.loss_func, reduction='none')
def __exit__(self, type, value, traceback):
if self.old_red is not None: self.loss_func.reduction = self.old_red
# Cell
def in_channels(m):
"Return the shape of the first weight layer in `m`."
for l in flatten_model(m):
if getattr(l, 'weight', None) is not None and l.weight.ndim==4:
return l.weight.shape[1]
raise Exception('No weight layer')
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/layers.py
|
layers.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01a_losses.ipynb (unless otherwise specified).
__all__ = ['BaseLoss', 'CrossEntropyLossFlat', 'BCEWithLogitsLossFlat', 'BCELossFlat', 'MSELossFlat', 'L1LossFlat',
'LabelSmoothingCrossEntropy', 'LabelSmoothingCrossEntropyFlat']
# Cell
from .imports import *
from .torch_imports import *
from .torch_core import *
from .layers import *
# Cell
class BaseLoss():
"Same as `loss_cls`, but flattens input and target."
activation=decodes=noops
def __init__(self, loss_cls, *args, axis=-1, flatten=True, floatify=False, is_2d=True, **kwargs):
store_attr("axis,flatten,floatify,is_2d")
self.func = loss_cls(*args,**kwargs)
functools.update_wrapper(self, self.func)
def __repr__(self): return f"FlattenedLoss of {self.func}"
@property
def reduction(self): return self.func.reduction
@reduction.setter
def reduction(self, v): self.func.reduction = v
def _contiguous(self,x):
return TensorBase(x.transpose(self.axis,-1).contiguous()) if isinstance(x,torch.Tensor) else x
def __call__(self, inp, targ, **kwargs):
inp,targ = map(self._contiguous, (inp,targ))
if self.floatify and targ.dtype!=torch.float16: targ = targ.float()
if targ.dtype in [torch.int8, torch.int16, torch.int32]: targ = targ.long()
if self.flatten: inp = inp.view(-1,inp.shape[-1]) if self.is_2d else inp.view(-1)
return self.func.__call__(inp, targ.view(-1) if self.flatten else targ, **kwargs)
# Cell
@delegates()
class CrossEntropyLossFlat(BaseLoss):
"Same as `nn.CrossEntropyLoss`, but flattens input and target."
y_int = True
@use_kwargs_dict(keep=True, weight=None, ignore_index=-100, reduction='mean')
def __init__(self, *args, axis=-1, **kwargs): super().__init__(nn.CrossEntropyLoss, *args, axis=axis, **kwargs)
def decodes(self, x): return x.argmax(dim=self.axis)
def activation(self, x): return F.softmax(x, dim=self.axis)
# Cell
@delegates()
class BCEWithLogitsLossFlat(BaseLoss):
"Same as `nn.BCEWithLogitsLoss`, but flattens input and target."
@use_kwargs_dict(keep=True, weight=None, reduction='mean', pos_weight=None)
def __init__(self, *args, axis=-1, floatify=True, thresh=0.5, **kwargs):
if kwargs.get('pos_weight', None) is not None and kwargs.get('flatten', None) is True:
raise ValueError("`flatten` must be False when using `pos_weight` to avoid a RuntimeError due to shape mismatch")
if kwargs.get('pos_weight', None) is not None: kwargs['flatten'] = False
super().__init__(nn.BCEWithLogitsLoss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs)
self.thresh = thresh
def decodes(self, x): return x>self.thresh
def activation(self, x): return torch.sigmoid(x)
# Cell
@use_kwargs_dict(weight=None, reduction='mean')
def BCELossFlat(*args, axis=-1, floatify=True, **kwargs):
"Same as `nn.BCELoss`, but flattens input and target."
return BaseLoss(nn.BCELoss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs)
# Cell
@use_kwargs_dict(reduction='mean')
def MSELossFlat(*args, axis=-1, floatify=True, **kwargs):
"Same as `nn.MSELoss`, but flattens input and target."
return BaseLoss(nn.MSELoss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs)
# Cell
@use_kwargs_dict(reduction='mean')
def L1LossFlat(*args, axis=-1, floatify=True, **kwargs):
"Same as `nn.L1Loss`, but flattens input and target."
return BaseLoss(nn.L1Loss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs)
# Cell
class LabelSmoothingCrossEntropy(Module):
y_int = True
def __init__(self, eps:float=0.1, reduction='mean'): self.eps,self.reduction = eps,reduction
def forward(self, output, target):
c = output.size()[-1]
log_preds = F.log_softmax(output, dim=-1)
if self.reduction=='sum': loss = -log_preds.sum()
else:
loss = -log_preds.sum(dim=-1) #We divide by that size at the return line so sum and not mean
if self.reduction=='mean': loss = loss.mean()
return loss*self.eps/c + (1-self.eps) * F.nll_loss(log_preds, target.long(), reduction=self.reduction)
def activation(self, out): return F.softmax(out, dim=-1)
def decodes(self, out): return out.argmax(dim=-1)
# Cell
@delegates()
class LabelSmoothingCrossEntropyFlat(BaseLoss):
"Same as `LabelSmoothingCrossEntropy`, but flattens input and target."
y_int = True
@use_kwargs_dict(keep=True, eps=0.1, reduction='mean')
def __init__(self, *args, axis=-1, **kwargs): super().__init__(LabelSmoothingCrossEntropy, *args, axis=axis, **kwargs)
def activation(self, out): return F.softmax(out, dim=-1)
def decodes(self, out): return out.argmax(dim=-1)
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/losses.py
|
losses.py
|
import subprocess,torch,os,sys
from fastcore.basics import *
from fastcore.script import *
@call_parse
def main(
gpus:Param("The GPUs to use for distributed training", str)='all',
script:Param("Script to run", str, opt=False)='',
args:Param("Args to pass to script", nargs='...', opt=False)=''
):
"PyTorch distributed training launch helper that spawns multiple distributed processes"
current_env = os.environ.copy()
gpus = list(range(torch.cuda.device_count())) if gpus=='all' else gpus.split(',')
current_env["WORLD_SIZE"] = str(len(gpus))
current_env["MASTER_ADDR"] = '127.0.0.1'
current_env["MASTER_PORT"] = '29500'
procs = []
for i,gpu in enumerate(gpus):
current_env["RANK"],current_env["DEFAULT_GPU"] = str(i),str(gpu)
procs.append(subprocess.Popen([sys.executable, "-u", script] + args, env=current_env))
for p in procs: p.wait()
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/launch.py
|
launch.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/13b_metrics.ipynb (unless otherwise specified).
__all__ = ['AccumMetric', 'skm_to_fastai', 'optim_metric', 'accuracy', 'error_rate', 'top_k_accuracy', 'APScoreBinary',
'BalancedAccuracy', 'BrierScore', 'CohenKappa', 'F1Score', 'FBeta', 'HammingLoss', 'Jaccard', 'Precision',
'Recall', 'RocAuc', 'RocAucBinary', 'MatthewsCorrCoef', 'Perplexity', 'perplexity', 'accuracy_multi',
'APScoreMulti', 'BrierScoreMulti', 'F1ScoreMulti', 'FBetaMulti', 'HammingLossMulti', 'JaccardMulti',
'MatthewsCorrCoefMulti', 'PrecisionMulti', 'RecallMulti', 'RocAucMulti', 'mse', 'rmse', 'mae', 'msle',
'exp_rmspe', 'ExplainedVariance', 'R2Score', 'PearsonCorrCoef', 'SpearmanCorrCoef', 'foreground_acc', 'Dice',
'DiceMulti', 'JaccardCoeff', 'CorpusBLEUMetric', 'LossMetric', 'LossMetrics']
# Cell
from .data.all import *
from .optimizer import *
from .learner import *
# Cell
import sklearn.metrics as skm
import scipy.stats as scs
# Cell
mk_class('ActivationType', **{o:o.lower() for o in ['No', 'Sigmoid', 'Softmax', 'BinarySoftmax']},
doc="All possible activation classes for `AccumMetric")
# Cell
class AccumMetric(Metric):
"Stores predictions and targets on CPU in accumulate to perform final calculations with `func`."
def __init__(self, func, dim_argmax=None, activation=ActivationType.No, thresh=None, to_np=False,
invert_arg=False, flatten=True, **kwargs):
store_attr('func,dim_argmax,activation,thresh,flatten')
self.to_np,self.invert_args,self.kwargs = to_np,invert_arg,kwargs
def reset(self):
"Clear all targs and preds"
self.targs,self.preds = [],[]
def accumulate(self, learn):
"Store targs and preds from `learn`, using activation function and argmax as appropriate"
pred = learn.pred
if self.activation in [ActivationType.Softmax, ActivationType.BinarySoftmax]:
pred = F.softmax(pred, dim=self.dim_argmax)
if self.activation == ActivationType.BinarySoftmax: pred = pred[:, -1]
elif self.activation == ActivationType.Sigmoid: pred = torch.sigmoid(pred)
elif self.dim_argmax: pred = pred.argmax(dim=self.dim_argmax)
if self.thresh: pred = (pred >= self.thresh)
self.accum_values(pred,learn.y,learn)
def accum_values(self, preds, targs,learn=None):
"Store targs and preds"
to_d = learn.to_detach if learn is not None else to_detach
preds,targs = to_d(preds),to_d(targs)
if self.flatten: preds,targs = flatten_check(preds,targs)
self.preds.append(preds)
self.targs.append(targs)
def __call__(self, preds, targs):
"Calculate metric on one batch of data"
self.reset()
self.accum_values(preds,targs)
return self.value
@property
def value(self):
"Value of the metric using accumulated preds and targs"
if len(self.preds) == 0: return
preds,targs = torch.cat(self.preds),torch.cat(self.targs)
if self.to_np: preds,targs = preds.numpy(),targs.numpy()
return self.func(targs, preds, **self.kwargs) if self.invert_args else self.func(preds, targs, **self.kwargs)
@property
def name(self): return self.func.func.__name__ if hasattr(self.func, 'func') else self.func.__name__
# Cell
def skm_to_fastai(func, is_class=True, thresh=None, axis=-1, activation=None, **kwargs):
"Convert `func` from sklearn.metrics to a fastai metric"
dim_argmax = axis if is_class and thresh is None else None
if activation is None:
activation = ActivationType.Sigmoid if (is_class and thresh is not None) else ActivationType.No
return AccumMetric(func, dim_argmax=dim_argmax, activation=activation, thresh=thresh,
to_np=True, invert_arg=True, **kwargs)
# Cell
def optim_metric(f, argname, bounds, tol=0.01, do_neg=True, get_x=False):
"Replace metric `f` with a version that optimizes argument `argname`"
def _f(preds, targs):
def minfunc(x):
kwargs = {argname:x}
res = f(preds, targs, **kwargs)
return -res if do_neg else res
optres = scipy.optimize.minimize_scalar(minfunc, bounds=bounds, method='bounded',
options={'xatol':0.01})
fun = -optres.fun if do_neg else optres.fun
return (fun,optres.x) if get_x else fun
_f.__name__ = f'opt_{f.__name__}'
return _f
# Cell
def accuracy(inp, targ, axis=-1):
"Compute accuracy with `targ` when `pred` is bs * n_classes"
pred,targ = flatten_check(inp.argmax(dim=axis), targ)
return (pred == targ).float().mean()
# Cell
def error_rate(inp, targ, axis=-1):
"1 - `accuracy`"
return 1 - accuracy(inp, targ, axis=axis)
# Cell
def top_k_accuracy(inp, targ, k=5, axis=-1):
"Computes the Top-k accuracy (`targ` is in the top `k` predictions of `inp`)"
inp = inp.topk(k=k, dim=axis)[1]
targ = targ.unsqueeze(dim=axis).expand_as(inp)
return (inp == targ).sum(dim=-1).float().mean()
# Cell
def APScoreBinary(axis=-1, average='macro', pos_label=1, sample_weight=None):
"Average Precision for single-label binary classification problems"
return skm_to_fastai(skm.average_precision_score, axis=axis, activation=ActivationType.BinarySoftmax,
average=average, pos_label=pos_label, sample_weight=sample_weight)
# Cell
def BalancedAccuracy(axis=-1, sample_weight=None, adjusted=False):
"Balanced Accuracy for single-label binary classification problems"
return skm_to_fastai(skm.balanced_accuracy_score, axis=axis,
sample_weight=sample_weight, adjusted=adjusted)
# Cell
def BrierScore(axis=-1, sample_weight=None, pos_label=None):
"Brier score for single-label classification problems"
return skm_to_fastai(skm.brier_score_loss, axis=axis,
sample_weight=sample_weight, pos_label=pos_label)
# Cell
def CohenKappa(axis=-1, labels=None, weights=None, sample_weight=None):
"Cohen kappa for single-label classification problems"
return skm_to_fastai(skm.cohen_kappa_score, axis=axis, labels=labels, weights=weights,
sample_weight=sample_weight)
# Cell
def F1Score(axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None):
"F1 score for single-label classification problems"
return skm_to_fastai(skm.f1_score, axis=axis,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# Cell
def FBeta(beta, axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None):
"FBeta score with `beta` for single-label classification problems"
return skm_to_fastai(skm.fbeta_score, axis=axis,
beta=beta, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# Cell
def HammingLoss(axis=-1, sample_weight=None):
"Hamming loss for single-label classification problems"
return skm_to_fastai(skm.hamming_loss, axis=axis,
sample_weight=sample_weight)
# Cell
def Jaccard(axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None):
"Jaccard score for single-label classification problems"
return skm_to_fastai(skm.jaccard_score, axis=axis,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# Cell
def Precision(axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None):
"Precision for single-label classification problems"
return skm_to_fastai(skm.precision_score, axis=axis,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# Cell
def Recall(axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None):
"Recall for single-label classification problems"
return skm_to_fastai(skm.recall_score, axis=axis,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# Cell
def RocAuc(axis=-1, average='macro', sample_weight=None, max_fpr=None, multi_class='ovr'):
"Area Under the Receiver Operating Characteristic Curve for single-label multiclass classification problems"
assert multi_class in ['ovr', 'ovo']
return skm_to_fastai(skm.roc_auc_score, axis=axis, activation=ActivationType.Softmax, flatten=False,
average=average, sample_weight=sample_weight, max_fpr=max_fpr, multi_class=multi_class)
# Cell
def RocAucBinary(axis=-1, average='macro', sample_weight=None, max_fpr=None, multi_class='raise'):
"Area Under the Receiver Operating Characteristic Curve for single-label binary classification problems"
return skm_to_fastai(skm.roc_auc_score, axis=axis, activation=ActivationType.BinarySoftmax,
average=average, sample_weight=sample_weight, max_fpr=max_fpr, multi_class=multi_class)
# Cell
def MatthewsCorrCoef(sample_weight=None, **kwargs):
"Matthews correlation coefficient for single-label classification problems"
return skm_to_fastai(skm.matthews_corrcoef, sample_weight=sample_weight, **kwargs)
# Cell
class Perplexity(AvgLoss):
"Perplexity (exponential of cross-entropy loss) for Language Models"
@property
def value(self): return torch.exp(self.total/self.count) if self.count != 0 else None
@property
def name(self): return "perplexity"
perplexity = Perplexity()
# Cell
def accuracy_multi(inp, targ, thresh=0.5, sigmoid=True):
"Compute accuracy when `inp` and `targ` are the same size."
inp,targ = flatten_check(inp,targ)
if sigmoid: inp = inp.sigmoid()
return ((inp>thresh)==targ.bool()).float().mean()
# Cell
def APScoreMulti(sigmoid=True, average='macro', pos_label=1, sample_weight=None):
"Average Precision for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.average_precision_score, activation=activation, flatten=False,
average=average, pos_label=pos_label, sample_weight=sample_weight)
# Cell
def BrierScoreMulti(thresh=0.5, sigmoid=True, sample_weight=None, pos_label=None):
"Brier score for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.brier_score_loss, thresh=thresh, activation=activation, flatten=False,
sample_weight=sample_weight, pos_label=pos_label)
# Cell
def F1ScoreMulti(thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None):
"F1 score for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.f1_score, thresh=thresh, activation=activation, flatten=False,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# Cell
def FBetaMulti(beta, thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None):
"FBeta score with `beta` for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.fbeta_score, thresh=thresh, activation=activation, flatten=False,
beta=beta, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# Cell
def HammingLossMulti(thresh=0.5, sigmoid=True, labels=None, sample_weight=None):
"Hamming loss for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.hamming_loss, thresh=thresh, activation=activation, flatten=False,
sample_weight=sample_weight)
# Cell
def JaccardMulti(thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None):
"Jaccard score for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.jaccard_score, thresh=thresh, activation=activation, flatten=False,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# Cell
def MatthewsCorrCoefMulti(thresh=0.5, sigmoid=True, sample_weight=None):
"Matthews correlation coefficient for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.matthews_corrcoef, thresh=thresh, activation=activation, flatten=False, sample_weight=sample_weight)
# Cell
def PrecisionMulti(thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None):
"Precision for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.precision_score, thresh=thresh, activation=activation, flatten=False,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# Cell
def RecallMulti(thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None):
"Recall for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.recall_score, thresh=thresh, activation=activation, flatten=False,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# Cell
def RocAucMulti(sigmoid=True, average='macro', sample_weight=None, max_fpr=None):
"Area Under the Receiver Operating Characteristic Curve for multi-label binary classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.roc_auc_score, activation=activation, flatten=False,
average=average, sample_weight=sample_weight, max_fpr=max_fpr)
# Cell
def mse(inp,targ):
"Mean squared error between `inp` and `targ`."
return F.mse_loss(*flatten_check(inp,targ))
# Cell
def _rmse(inp, targ): return torch.sqrt(F.mse_loss(inp, targ))
rmse = AccumMetric(_rmse)
rmse.__doc__ = "Root mean squared error"
# Cell
def mae(inp,targ):
"Mean absolute error between `inp` and `targ`."
inp,targ = flatten_check(inp,targ)
return torch.abs(inp - targ).mean()
# Cell
def msle(inp, targ):
"Mean squared logarithmic error between `inp` and `targ`."
inp,targ = flatten_check(inp,targ)
return F.mse_loss(torch.log(1 + inp), torch.log(1 + targ))
# Cell
def _exp_rmspe(inp,targ):
inp,targ = torch.exp(inp),torch.exp(targ)
return torch.sqrt(((targ - inp)/targ).pow(2).mean())
exp_rmspe = AccumMetric(_exp_rmspe)
exp_rmspe.__doc__ = "Root mean square percentage error of the exponential of predictions and targets"
# Cell
def ExplainedVariance(sample_weight=None):
"Explained variance between predictions and targets"
return skm_to_fastai(skm.explained_variance_score, is_class=False, sample_weight=sample_weight)
# Cell
def R2Score(sample_weight=None):
"R2 score between predictions and targets"
return skm_to_fastai(skm.r2_score, is_class=False, sample_weight=sample_weight)
# Cell
@delegates(AccumMetric)
def PearsonCorrCoef(dim_argmax=None, **kwargs):
"Pearson correlation coefficient for regression problem"
def pearsonr(x,y): return scs.pearsonr(x,y)[0]
return AccumMetric(pearsonr, invert_arg=False, dim_argmax=dim_argmax, **kwargs)
# Cell
@delegates(AccumMetric)
def SpearmanCorrCoef(dim_argmax=None, axis=0, nan_policy='propagate', **kwargs):
"Spearman correlation coefficient for regression problem"
def spearmanr(a,b=None,**kwargs): return scs.spearmanr(a,b,**kwargs)[0]
return AccumMetric(partial(spearmanr, axis=axis, nan_policy=nan_policy),
invert_arg=False, dim_argmax=dim_argmax, **kwargs)
# Cell
def foreground_acc(inp, targ, bkg_idx=0, axis=1):
"Computes non-background accuracy for multiclass segmentation"
targ = targ.squeeze(1)
mask = targ != bkg_idx
return (inp.argmax(dim=axis)[mask]==targ[mask]).float().mean()
# Cell
class Dice(Metric):
"Dice coefficient metric for binary target in segmentation"
def __init__(self, axis=1): self.axis = axis
def reset(self): self.inter,self.union = 0,0
def accumulate(self, learn):
pred,targ = flatten_check(learn.pred.argmax(dim=self.axis), learn.y)
self.inter += (pred*targ).float().sum().item()
self.union += (pred+targ).float().sum().item()
@property
def value(self): return 2. * self.inter/self.union if self.union > 0 else None
# Cell
class DiceMulti(Metric):
"Averaged Dice metric (Macro F1) for multiclass target in segmentation"
def __init__(self, axis=1): self.axis = axis
def reset(self): self.inter,self.union = {},{}
def accumulate(self, learn):
pred,targ = flatten_check(learn.pred.argmax(dim=self.axis), learn.y)
for c in range(learn.pred.shape[self.axis]):
p = torch.where(pred == c, 1, 0)
t = torch.where(targ == c, 1, 0)
c_inter = (p*t).float().sum().item()
c_union = (p+t).float().sum().item()
if c in self.inter:
self.inter[c] += c_inter
self.union[c] += c_union
else:
self.inter[c] = c_inter
self.union[c] = c_union
@property
def value(self):
binary_dice_scores = np.array([])
for c in self.inter:
binary_dice_scores = np.append(binary_dice_scores, 2.*self.inter[c]/self.union[c] if self.union[c] > 0 else np.nan)
return np.nanmean(binary_dice_scores)
# Cell
class JaccardCoeff(Dice):
"Implementation of the Jaccard coefficient that is lighter in RAM"
@property
def value(self): return self.inter/(self.union-self.inter) if self.union > 0 else None
# Cell
class CorpusBLEUMetric(Metric):
def __init__(self, vocab_sz=5000, axis=-1):
"BLEU Metric calculated over the validation corpus"
self.metric_name = 'CorpusBLEU'
self.axis, self.vocab_sz = axis, vocab_sz
self.pred_len,self.targ_len,self.samp_idx,self.corrects,self.counts, = 0,0,0,[0]*4,[0]*4
def reset(self):
self.pred_len,self.targ_len,self.corrects,self.counts = 0,0,[0]*4,[0]*4
class NGram():
def __init__(self, ngram, max_n=5000): self.ngram,self.max_n = ngram,max_n
def __eq__(self, other):
if len(self.ngram) != len(other.ngram): return False
return np.all(np.array(self.ngram) == np.array(other.ngram))
def __hash__(self): return int(sum([o * self.max_n**i for i,o in enumerate(self.ngram)]))
def get_grams(self, x, n, max_n=5000):
return x if n==1 else [self.NGram(x[i:i+n], max_n=max_n) for i in range(len(x)-n+1)]
def get_correct_ngrams(self, pred, targ, n, max_n=5000):
pred_grams,targ_grams = self.get_grams(pred, n, max_n=max_n),self.get_grams(targ, n, max_n=max_n)
pred_cnt,targ_cnt = Counter(pred_grams),Counter(targ_grams)
return sum([min(c, targ_cnt[g]) for g,c in pred_cnt.items()]),len(pred_grams)
def accumulate(self, learn):
if learn.training: return None
else:
last_output = learn.pred.argmax(dim=self.axis)
last_target = learn.y
for pred,targ in zip(last_output.cpu().numpy(),last_target.cpu().numpy()):
self.pred_len += len(pred)
self.targ_len += len(targ)
smooth_mteval = 1
for i in range(4):
c,t = self.get_correct_ngrams(pred, targ, i+1, max_n=self.vocab_sz)
if c == 0:
smooth_mteval *= 2
c = 1 / smooth_mteval # exp smoothing, method 3 from http://acl2014.org/acl2014/W14-33/pdf/W14-3346.pdf
self.corrects[i] += c
self.counts[i] += t
@property
def value(self):
if self.counts == 0: return None
elif max(self.corrects) == 0: return 0.0
else:
precs = [c/t for c,t in zip(self.corrects,self.counts)]
len_penalty = math.exp(1 - self.targ_len/self.pred_len) if self.pred_len < self.targ_len else 1
return len_penalty * ((precs[0]*precs[1]*precs[2]*precs[3]) ** 0.25)
# Cell
class LossMetric(AvgMetric):
"Create a metric from `loss_func.attr` named `nm`"
def __init__(self, attr, nm=None): store_attr('attr,nm')
def accumulate(self, learn):
bs = find_bs(learn.yb)
self.total += learn.to_detach(getattr(learn.loss_func, self.attr, 0))*bs
self.count += bs
@property
def name(self): return self.attr if self.nm is None else self.nm
# Cell
def LossMetrics(attrs, nms=None):
"List of `LossMetric` for each of `attrs` and `nms`"
if isinstance(attrs, str): attrs = attrs.split(',')
nms = attrs if nms is None else nms.split(',') if isinstance(nms, str) else nms
return [LossMetric(a, n) for a,n in zip(attrs,nms)]
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/metrics.py
|
metrics.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/12_optimizer.ipynb (unless otherwise specified).
__all__ = ['Optimizer', 'sgd_step', 'weight_decay', 'l2_reg', 'average_grad', 'average_sqr_grad', 'momentum_step',
'SGD', 'rms_prop_step', 'RMSProp', 'step_stat', 'debias', 'adam_step', 'Adam', 'radam_step', 'RAdam',
'qhadam_step', 'QHAdam', 'larc_layer_lr', 'larc_step', 'Larc', 'lamb_step', 'Lamb', 'Lookahead', 'ranger',
'detuplify_pg', 'set_item_pg', 'pytorch_hp_map', 'OptimWrapper']
# Cell
from .torch_basics import *
# Cell
class _BaseOptimizer():
"Common functionality between `Optimizer` and `OptimWrapper`"
def all_params(self, n=slice(None), with_grad=False):
res = L((p,pg,self.state[p],hyper) for pg,hyper in zip(self.param_lists[n],self.hypers[n]) for p in pg)
return L(o for o in res if hasattr(o[0], 'grad') and o[0].grad is not None) if with_grad else res
def _set_require_grad(self, rg, p,pg,state,h): p.requires_grad_(rg or state.get('force_train', False))
def freeze_to(self, n):
self.frozen_idx = n if n >= 0 else len(self.param_lists) + n
if self.frozen_idx >= len(self.param_lists):
warn(f"Freezing {self.frozen_idx} groups; model has {len(self.param_lists)}; whole model is frozen.")
for o in self.all_params(slice(n, None)): self._set_require_grad(True, *o)
for o in self.all_params(slice(None, n)): self._set_require_grad(False, *o)
def freeze(self):
assert(len(self.param_lists)>1)
self.freeze_to(-1)
def set_freeze(self, n, rg, ignore_force_train=False):
for p in self.param_lists[n]: p.requires_grad_(rg or (state.get('force_train', False) and not ignore_force_train))
def unfreeze(self): self.freeze_to(0)
def set_hypers(self, **kwargs): L(kwargs.items()).starmap(self.set_hyper)
def _set_hyper(self, k, v):
for v_,h in zip(v, self.hypers): h[k] = v_
def set_hyper(self, k, v):
if isinstance(v, slice):
if v.start: v = even_mults(v.start, v.stop, len(self.param_lists))
else: v = [v.stop/10]*(len(self.param_lists)-1) + [v.stop]
v = L(v, use_list=None)
if len(v)==1: v = v*len(self.param_lists)
assert len(v) == len(self.hypers), f"Trying to set {len(v)} values for {k} but there are {len(self.param_lists)} parameter groups."
self._set_hyper(k, v)
@property
def param_groups(self): return [{**{'params': pg}, **hp} for pg,hp in zip(self.param_lists, self.hypers)]
@param_groups.setter
def param_groups(self, v):
for pg,v_ in zip(self.param_lists,v): pg = v_['params']
for hyper,v_ in zip(self.hypers,v):
for k,t in v_.items():
if k != 'params': hyper[k] = t
# Cell
def _update(state, new=None):
if new is None: return state
if isinstance(new, dict): state.update(new)
return state
# Cell
class Optimizer(_BaseOptimizer):
"Base optimizer class for the fastai library, updating `params` with `cbs`"
_keep_on_clear = ['force_train', 'do_wd']
def __init__(self, params, cbs, train_bn=True, **defaults):
params = L(params)
self.cbs,self.state,self.train_bn = L(cbs),defaultdict(dict),train_bn
defaults = merge(*self.cbs.attrgot('defaults'), defaults)
self.param_lists = L(L(p) for p in params) if isinstance(params[0], (L,list)) else L([params])
self.hypers = L({} for _ in range_of(self.param_lists))
self.set_hypers(**defaults)
self.frozen_idx = 0
def zero_grad(self):
for p,*_ in self.all_params(with_grad=True):
p.grad.detach_()
p.grad.zero_()
def step(self):
for p,pg,state,hyper in self.all_params(with_grad=True):
for cb in self.cbs: state = _update(state, cb(p, **{**state, **hyper}))
self.state[p] = state
def clear_state(self):
for p,pg,state,hyper in self.all_params():
self.state[p] = {k: state[k] for k in self._keep_on_clear if k in state}
def state_dict(self):
state = [self.state[p] for p,*_ in self.all_params()]
return {'state': state, 'hypers': self.hypers}
def load_state_dict(self, sd):
assert len(sd["hypers"]) == len(self.param_lists)
assert len(sd["state"]) == sum([len(pg) for pg in self.param_lists])
self.hypers = sd['hypers']
self.state = {p: s for p,s in zip(self.all_params().itemgot(0), sd['state'])}
# Cell
def sgd_step(p, lr, **kwargs):
p.data.add_(p.grad.data, alpha=-lr)
# Cell
def weight_decay(p, lr, wd, do_wd=True, **kwargs):
"Weight decay as decaying `p` with `lr*wd`"
if do_wd and wd!=0: p.data.mul_(1 - lr*wd)
weight_decay.defaults = dict(wd=0.)
# Cell
def l2_reg(p, lr, wd, do_wd=True, **kwargs):
"L2 regularization as adding `wd*p` to `p.grad`"
if do_wd and wd!=0: p.grad.data.add_(p.data, alpha=wd)
l2_reg.defaults = dict(wd=0.)
# Cell
def average_grad(p, mom, dampening=False, grad_avg=None, **kwargs):
"Keeps track of the avg grads of `p` in `state` with `mom`."
if grad_avg is None: grad_avg = torch.zeros_like(p.grad.data)
damp = 1-mom if dampening else 1.
grad_avg.mul_(mom).add_(p.grad.data, alpha=damp)
return {'grad_avg': grad_avg}
average_grad.defaults = dict(mom=0.9)
# Cell
def average_sqr_grad(p, sqr_mom, dampening=True, sqr_avg=None, **kwargs):
if sqr_avg is None: sqr_avg = torch.zeros_like(p.grad.data)
damp = 1-sqr_mom if dampening else 1.
sqr_avg.mul_(sqr_mom).addcmul_(p.grad.data, p.grad.data, value=damp)
return {'sqr_avg': sqr_avg}
average_sqr_grad.defaults = dict(sqr_mom=0.99)
# Cell
def momentum_step(p, lr, grad_avg, **kwargs):
"Step for SGD with momentum with `lr`"
p.data.add_(grad_avg, alpha=-lr)
# Cell
def SGD(params, lr, mom=0., wd=0., decouple_wd=True):
"A `Optimizer` for SGD with `lr` and `mom` and `params`"
cbs = [weight_decay] if decouple_wd else [l2_reg]
if mom != 0: cbs.append(average_grad)
cbs.append(sgd_step if mom==0 else momentum_step)
return Optimizer(params, cbs, lr=lr, mom=mom, wd=wd)
# Cell
def rms_prop_step(p, lr, sqr_avg, eps, grad_avg=None, **kwargs):
"Step for SGD with momentum with `lr`"
denom = sqr_avg.sqrt().add_(eps)
p.data.addcdiv_((grad_avg if grad_avg is not None else p.grad), denom, value=-lr)
rms_prop_step.defaults = dict(eps=1e-8)
# Cell
def RMSProp(params, lr, sqr_mom=0.99, mom=0., wd=0., decouple_wd=True):
"A `Optimizer` for RMSProp with `lr`, `sqr_mom`, `mom` and `params`"
cbs = [weight_decay] if decouple_wd else [l2_reg]
cbs += ([average_sqr_grad] if mom==0. else [average_grad, average_sqr_grad])
cbs.append(rms_prop_step)
return Optimizer(params, cbs, lr=lr, mom=mom, sqr_mom=sqr_mom, wd=wd)
# Cell
def step_stat(p, step=0, **kwargs):
"Register the number of steps done in `state` for `p`"
step += 1
return {'step' : step}
# Cell
def debias(mom, damp, step): return damp * (1 - mom**step) / (1-mom)
# Cell
def adam_step(p, lr, mom, step, sqr_mom, grad_avg, sqr_avg, eps, **kwargs):
"Step for Adam with `lr` on `p`"
debias1 = debias(mom, 1-mom, step)
debias2 = debias(sqr_mom, 1-sqr_mom, step)
p.data.addcdiv_(grad_avg, (sqr_avg/debias2).sqrt() + eps, value = -lr / debias1)
return p
adam_step._defaults = dict(eps=1e-5)
# Cell
def Adam(params, lr, mom=0.9, sqr_mom=0.99, eps=1e-5, wd=0.01, decouple_wd=True):
"A `Optimizer` for Adam with `lr`, `mom`, `sqr_mom`, `eps` and `params`"
cbs = [weight_decay] if decouple_wd else [l2_reg]
cbs += [partial(average_grad, dampening=True), average_sqr_grad, step_stat, adam_step]
return Optimizer(params, cbs, lr=lr, mom=mom, sqr_mom=sqr_mom, eps=eps, wd=wd)
# Cell
def radam_step(p, lr, mom, step, sqr_mom, grad_avg, sqr_avg, eps, beta, **kwargs):
"Step for RAdam with `lr` on `p`"
debias1 = debias(mom, 1-mom, step)
debias2 = debias(sqr_mom, 1-sqr_mom, step)
r_inf = 2/(1-sqr_mom) - 1
r = r_inf - 2*step*sqr_mom**step/(1-sqr_mom**step)
if r > 5:
v = math.sqrt(((r-4) * (r-2) * r_inf)/((r_inf-4)*(r_inf-2)*r))
denom = (sqr_avg/debias2).sqrt()
if eps: denom += eps
if beta: denom = F.softplus(denom, beta)
p.data.addcdiv_(grad_avg, denom, value = -lr*v / debias1)
else: p.data.add_(grad_avg, alpha=-lr / debias1)
return p
radam_step._defaults = dict(eps=1e-5)
# Cell
def RAdam(params, lr, mom=0.9, sqr_mom=0.99, eps=1e-5, wd=0., beta=0., decouple_wd=True):
"A `Optimizer` for Adam with `lr`, `mom`, `sqr_mom`, `eps` and `params`"
cbs = [weight_decay] if decouple_wd else [l2_reg]
cbs += [partial(average_grad, dampening=True), average_sqr_grad, step_stat, radam_step]
return Optimizer(params, cbs, lr=lr, mom=mom, sqr_mom=sqr_mom, eps=eps, wd=wd, beta=beta)
# Cell
def qhadam_step(p, lr, mom, sqr_mom, sqr_avg, nu_1, nu_2, step, grad_avg, eps, **kwargs):
debias1 = debias(mom, 1-mom, step)
debias2 = debias(sqr_mom, 1-sqr_mom, step)
p.data.addcdiv_(((1-nu_1) * p.grad.data) + (nu_1 * (grad_avg / debias1)),
(((1 - nu_2) * (p.grad.data)**2) + (nu_2 * (sqr_avg / debias2))).sqrt() + eps,
value = -lr)
return p
qhadam_step._defaults = dict(eps=1e-8)
# Cell
def QHAdam(params, lr, mom=0.999, sqr_mom=0.999, nu_1=0.7, nu_2 = 1.0, eps=1e-8, wd=0., decouple_wd=True):
"An `Optimizer` for Adam with `lr`, `mom`, `sqr_mom`, `nus`, eps` and `params`"
cbs = [weight_decay] if decouple_wd else [l2_reg]
cbs += [partial(average_grad, dampening=True), partial(average_sqr_grad, dampening=True), step_stat, qhadam_step]
return Optimizer(params, cbs, lr=lr, nu_1=nu_1, nu_2=nu_2 ,
mom=mom, sqr_mom=sqr_mom, eps=eps, wd=wd)
# Cell
def larc_layer_lr(p, lr, trust_coeff, wd, eps, clip=True, **kwargs):
"Computes the local lr before weight decay is applied"
p_norm,g_norm = torch.norm(p.data),torch.norm(p.grad.data)
local_lr = lr*trust_coeff * (p_norm) / (g_norm + p_norm * wd + eps)
return {'local_lr': min(lr, local_lr) if clip else local_lr}
larc_layer_lr.defaults = dict(trust_coeff=0.02, wd=0., eps=1e-8)
# Cell
def larc_step(p, local_lr, grad_avg=None, **kwargs):
"Step for LARC `local_lr` on `p`"
p.data.add_(p.grad.data if grad_avg is None else grad_avg, alpha = -local_lr)
# Cell
def Larc(params, lr, mom=0.9, clip=True, trust_coeff=0.02, eps=1e-8, wd=0., decouple_wd=True):
"A `Optimizer` for Adam with `lr`, `mom`, `sqr_mom`, `eps` and `params`"
cbs = [weight_decay] if decouple_wd else [l2_reg]
if mom!=0.: cbs.append(average_grad)
cbs += [partial(larc_layer_lr, clip=clip), larc_step]
return Optimizer(params, cbs, lr=lr, mom=mom, trust_coeff=trust_coeff, eps=eps, wd=wd)
# Cell
def lamb_step(p, lr, mom, step, sqr_mom, grad_avg, sqr_avg, eps, **kwargs):
"Step for LAMB with `lr` on `p`"
debias1 = debias(mom, 1-mom, step)
debias2 = debias(sqr_mom, 1-sqr_mom, step)
r1 = p.data.pow(2).mean().sqrt()
step = (grad_avg/debias1) / ((sqr_avg/debias2).sqrt()+eps)
r2 = step.pow(2).mean().sqrt()
q = 1 if r1 == 0 or r2 == 0 else min(r1/r2,10)
p.data.add_(step, alpha = -lr * q)
lamb_step._defaults = dict(eps=1e-6, wd=0.)
# Cell
def Lamb(params, lr, mom=0.9, sqr_mom=0.99, eps=1e-5, wd=0., decouple_wd=True):
"A `Optimizer` for Adam with `lr`, `mom`, `sqr_mom`, `eps` and `params`"
cbs = [weight_decay] if decouple_wd else [l2_reg]
cbs += [partial(average_grad, dampening=True), average_sqr_grad, step_stat, lamb_step]
return Optimizer(params, cbs, lr=lr, mom=mom, sqr_mom=sqr_mom, eps=eps, wd=wd)
# Cell
class Lookahead(Optimizer, GetAttr):
"Wrap `opt` in a lookahead optimizer"
_default='opt'
def __init__(self, opt, k=6, alpha=0.5):
store_attr('opt,k,alpha')
self._init_state()
def step(self):
if self.slow_weights is None: self._copy_weights()
self.opt.step()
self.count += 1
if self.count%self.k != 0: return
for slow_pg,fast_pg in zip(self.slow_weights,self.param_lists):
for slow_p,fast_p in zip(slow_pg,fast_pg):
slow_p.data.add_(fast_p.data-slow_p.data, alpha=self.alpha)
fast_p.data.copy_(slow_p.data)
def clear_state(self):
self.opt.clear_state()
self._init_state()
def state_dict(self):
state = self.opt.state_dict()
state.update({'count': self.count, 'slow_weights': self.slow_weights})
return state
def load_state_dict(self, sd):
self.count = sd.pop('count')
self.slow_weights = sd.pop('slow_weights')
self.opt.load_state_dict(sd)
def _init_state(self): self.count,self.slow_weights = 0,None
def _copy_weights(self): self.slow_weights = L(L(p.clone().detach() for p in pg) for pg in self.param_lists)
@property
def param_lists(self): return self.opt.param_lists
@param_lists.setter
def param_lists(self, v): self.opt.param_lists = v
# Cell
@delegates(RAdam)
def ranger(p, lr, mom=0.95, wd=0.01, eps=1e-6, **kwargs):
"Convenience method for `Lookahead` with `RAdam`"
return Lookahead(RAdam(p, lr=lr, mom=mom, wd=wd, eps=eps, **kwargs))
# Cell
def detuplify_pg(d):
res = {}
for k,v in d.items():
if k == 'params': continue
if is_listy(v): res.update(**{f'{k}__{i}': v_ for i,v_ in enumerate(v)})
else: res[k] = v
return res
# Cell
def set_item_pg(pg, k, v):
if '__' not in k: pg[k] = v
else:
name,idx = k.split('__')
pg[name] = tuple(v if i==int(idx) else pg[name][i] for i in range_of(pg[name]))
return pg
# Cell
pytorch_hp_map = {'momentum': 'mom', 'weight_decay': 'wd', 'alpha': 'sqr_mom', 'betas__0': 'mom', 'betas__1': 'sqr_mom'}
# Cell
class OptimWrapper(_BaseOptimizer, GetAttr):
_xtra=['zero_grad', 'step', 'state_dict', 'load_state_dict']
_default='opt'
def __init__(self, opt, hp_map=None):
self.opt = opt
if hp_map is None: hp_map = pytorch_hp_map
self.fwd_map = {k: hp_map[k] if k in hp_map else k for k in detuplify_pg(opt.param_groups[0]).keys()}
self.bwd_map = {v:k for k,v in self.fwd_map.items()}
self.state = defaultdict(dict, {})
self.frozen_idx = 0
@property
def hypers(self):
return [{self.fwd_map[k]:v for k,v in detuplify_pg(pg).items() if k != 'params'} for pg in self.opt.param_groups]
def _set_hyper(self, k, v):
for pg,v_ in zip(self.opt.param_groups,v): pg = set_item_pg(pg, self.bwd_map[k], v_)
def clear_state(self): self.opt.state = defaultdict(dict, {})
@property
def param_lists(self): return [pg['params'] for pg in self.opt.param_groups]
@param_lists.setter
def param_lists(self, v):
for pg,v_ in zip(self.opt.param_groups,v): pg['params'] = v_
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/optimizer.py
|
optimizer.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/13a_learner.ipynb (unless otherwise specified).
__all__ = ['CancelStepException', 'CancelFitException', 'CancelEpochException', 'CancelTrainException',
'CancelValidException', 'CancelBatchException', 'replacing_yield', 'mk_metric', 'save_model', 'load_model',
'Learner', 'before_batch_cb', 'load_learner', 'to_detach_from_dl', 'Metric', 'AvgMetric', 'AvgLoss',
'AvgSmoothLoss', 'ValueMetric', 'Recorder']
# Cell
from .data.all import *
from .optimizer import *
from .callback.core import *
import pickle
# Cell
#nbdev_comment _all_ = ['CancelStepException','CancelFitException','CancelEpochException','CancelTrainException','CancelValidException','CancelBatchException']
# Cell
defaults.lr = 1e-3
# Cell
def replacing_yield(o, attr, val):
"Context manager to temporarily replace an attribute"
old = getattr(o,attr)
try: yield setattr(o,attr,val)
finally: setattr(o,attr,old)
# Cell
def mk_metric(m):
"Convert `m` to an `AvgMetric`, unless it's already a `Metric`"
if isinstance(m,type): m = m()
return m if isinstance(m, Metric) else AvgMetric(m)
# Cell
def save_model(file, model, opt, with_opt=True, pickle_protocol=2):
"Save `model` to `file` along with `opt` (if available, and if `with_opt`)"
if rank_distrib(): return # don't save if child proc
if opt is None: with_opt=False
state = get_model(model).state_dict()
if with_opt: state = {'model': state, 'opt':opt.state_dict()}
torch.save(state, file, pickle_protocol=pickle_protocol)
# Cell
def load_model(file, model, opt, with_opt=True, device=None, strict=True):
"Load `model` from `file` along with `opt` (if available, and if `with_opt`)"
distrib_barrier()
if isinstance(device, int): device = torch.device('cuda', device)
elif device is None: device = 'cpu'
state = torch.load(file, map_location=device)
hasopt = set(state)=={'model', 'opt'}
model_state = state['model'] if hasopt else state
get_model(model).load_state_dict(model_state, strict=strict)
if hasopt and with_opt:
try: opt.load_state_dict(state['opt'])
except:
if with_opt: warn("Could not load the optimizer state.")
elif with_opt: warn("Saved filed doesn't contain an optimizer state.")
# Cell
def _try_concat(o):
try: return torch.cat(o)
except: return sum([L(o_[i,:] for i in range_of(o_)) for o_ in o], L())
# Cell
_before_epoch = [event.before_fit, event.before_epoch]
_after_epoch = [event.after_epoch, event.after_fit]
# Cell
class _ConstantFunc():
"Returns a function that returns `o`"
def __init__(self, o): self.o = o
def __call__(self, *args, **kwargs): return self.o
# Cell
_loop = ['Start Fit', 'before_fit', 'Start Epoch Loop', 'before_epoch', 'Start Train', 'before_train',
'Start Batch Loop', 'before_batch', 'after_pred', 'after_loss', 'before_backward', 'before_step',
'after_step', 'after_cancel_batch', 'after_batch','End Batch Loop','End Train',
'after_cancel_train', 'after_train', 'Start Valid', 'before_validate','Start Batch Loop',
'**CBs same as train batch**', 'End Batch Loop', 'End Valid', 'after_cancel_validate',
'after_validate', 'End Epoch Loop', 'after_cancel_epoch', 'after_epoch', 'End Fit',
'after_cancel_fit', 'after_fit']
# Cell
class Learner(GetAttr):
_default='model'
def __init__(self, dls, model, loss_func=None, opt_func=Adam, lr=defaults.lr, splitter=trainable_params, cbs=None,
metrics=None, path=None, model_dir='models', wd=None, wd_bn_bias=False, train_bn=True,
moms=(0.95,0.85,0.95)):
path = Path(path) if path is not None else getattr(dls, 'path', Path('.'))
if loss_func is None:
loss_func = getattr(dls.train_ds, 'loss_func', None)
assert loss_func is not None, "Could not infer loss function from the data, please pass a loss function."
self.dls,self.model = dls,model
store_attr(but='dls,model,cbs')
self.training,self.create_mbar,self.logger,self.opt,self.cbs = False,True,print,None,L()
self.add_cbs(L(defaults.callbacks)+L(cbs))
self("after_create")
@property
def metrics(self): return self._metrics
@metrics.setter
def metrics(self,v): self._metrics = L(v).map(mk_metric)
def _grab_cbs(self, cb_cls): return L(cb for cb in self.cbs if isinstance(cb, cb_cls))
def add_cbs(self, cbs):
L(cbs).map(self.add_cb)
return self
def remove_cbs(self, cbs):
L(cbs).map(self.remove_cb)
return self
def add_cb(self, cb):
if isinstance(cb, type): cb = cb()
cb.learn = self
setattr(self, cb.name, cb)
self.cbs.append(cb)
return self
def remove_cb(self, cb):
if isinstance(cb, type): self.remove_cbs(self._grab_cbs(cb))
else:
cb.learn = None
if hasattr(self, cb.name): delattr(self, cb.name)
if cb in self.cbs: self.cbs.remove(cb)
return self
@contextmanager
def added_cbs(self, cbs):
self.add_cbs(cbs)
try: yield
finally: self.remove_cbs(cbs)
@contextmanager
def removed_cbs(self, cbs):
self.remove_cbs(cbs)
try: yield self
finally: self.add_cbs(cbs)
def ordered_cbs(self, event): return [cb for cb in self.cbs.sorted('order') if hasattr(cb, event)]
def __call__(self, event_name): L(event_name).map(self._call_one)
def _call_one(self, event_name):
if not hasattr(event, event_name): raise Exception(f'missing {event_name}')
for cb in self.cbs.sorted('order'): cb(event_name)
def _bn_bias_state(self, with_bias): return norm_bias_params(self.model, with_bias).map(self.opt.state)
def create_opt(self):
self.opt = self.opt_func(self.splitter(self.model), lr=self.lr)
if not self.wd_bn_bias:
for p in self._bn_bias_state(True ): p['do_wd'] = False
if self.train_bn:
for p in self._bn_bias_state(False): p['force_train'] = True
def _split(self, b):
i = getattr(self.dls, 'n_inp', 1 if len(b)==1 else len(b)-1)
self.xb,self.yb = b[:i],b[i:]
def _with_events(self, f, event_type, ex, final=noop):
try: self(f'before_{event_type}'); f()
except ex: self(f'after_cancel_{event_type}')
self(f'after_{event_type}'); final()
def all_batches(self):
self.n_iter = len(self.dl)
for o in enumerate(self.dl): self.one_batch(*o)
def _do_one_batch(self):
self.pred = self.model(*self.xb)
self('after_pred')
if len(self.yb):
self.loss_grad = self.loss_func(self.pred, *self.yb)
self.loss = self.loss_grad.clone()
self('after_loss')
if not self.training or not len(self.yb): return
self('before_backward')
self.loss_grad.backward()
self._with_events(self.opt.step, 'step', CancelStepException)
self.opt.zero_grad()
def one_batch(self, i, b):
self.iter = i
self._split(b)
self._with_events(self._do_one_batch, 'batch', CancelBatchException)
def _do_epoch_train(self):
self.dl = self.dls.train
self._with_events(self.all_batches, 'train', CancelTrainException)
def _do_epoch_validate(self, ds_idx=1, dl=None):
if dl is None: dl = self.dls[ds_idx]
self.dl = dl
with torch.no_grad(): self._with_events(self.all_batches, 'validate', CancelValidException)
def _do_epoch(self):
self._do_epoch_train()
self._do_epoch_validate()
def _do_fit(self):
for epoch in range(self.n_epoch):
self.epoch=epoch
self._with_events(self._do_epoch, 'epoch', CancelEpochException)
def fit(self, n_epoch, lr=None, wd=None, cbs=None, reset_opt=False):
with self.added_cbs(cbs):
if reset_opt or not self.opt: self.create_opt()
if wd is None: wd = self.wd
if wd is not None: self.opt.set_hypers(wd=wd)
self.opt.set_hypers(lr=self.lr if lr is None else lr)
self.n_epoch = n_epoch
self._with_events(self._do_fit, 'fit', CancelFitException, self._end_cleanup)
def _end_cleanup(self): self.dl,self.xb,self.yb,self.pred,self.loss = None,(None,),(None,),None,None
def __enter__(self): self(_before_epoch); return self
def __exit__(self, exc_type, exc_value, tb): self(_after_epoch)
def validation_context(self, cbs=None, inner=False):
cms = [self.no_logging(),self.no_mbar()]
if cbs: cms.append(self.added_cbs(cbs))
if not inner: cms.append(self)
return ContextManagers(cms)
def validate(self, ds_idx=1, dl=None, cbs=None):
if dl is None: dl = self.dls[ds_idx]
with self.validation_context(cbs=cbs): self._do_epoch_validate(ds_idx, dl)
return getattr(self, 'final_record', None)
@delegates(GatherPredsCallback.__init__)
def get_preds(self, ds_idx=1, dl=None, with_input=False, with_decoded=False, with_loss=False, act=None,
inner=False, reorder=True, cbs=None, **kwargs):
if dl is None: dl = self.dls[ds_idx].new(shuffled=False, drop_last=False)
else:
try: len(dl)
except TypeError as e:
raise TypeError("`dl` is something other than a single `DataLoader` object")
if reorder and hasattr(dl, 'get_idxs'):
idxs = dl.get_idxs()
dl = dl.new(get_idxs = _ConstantFunc(idxs))
cb = GatherPredsCallback(with_input=with_input, with_loss=with_loss, **kwargs)
ctx_mgrs = self.validation_context(cbs=L(cbs)+[cb], inner=inner)
if with_loss: ctx_mgrs.append(self.loss_not_reduced())
with ContextManagers(ctx_mgrs):
self._do_epoch_validate(dl=dl)
if act is None: act = getattr(self.loss_func, 'activation', noop)
res = cb.all_tensors()
pred_i = 1 if with_input else 0
if res[pred_i] is not None:
res[pred_i] = act(res[pred_i])
if with_decoded: res.insert(pred_i+2, getattr(self.loss_func, 'decodes', noop)(res[pred_i]))
if reorder and hasattr(dl, 'get_idxs'): res = nested_reorder(res, tensor(idxs).argsort())
return tuple(res)
self._end_cleanup()
def predict(self, item, rm_type_tfms=None, with_input=False):
dl = self.dls.test_dl([item], rm_type_tfms=rm_type_tfms, num_workers=0)
inp,preds,_,dec_preds = self.get_preds(dl=dl, with_input=True, with_decoded=True)
i = getattr(self.dls, 'n_inp', -1)
inp = (inp,) if i==1 else tuplify(inp)
dec = self.dls.decode_batch(inp + tuplify(dec_preds))[0]
dec_inp,dec_targ = map(detuplify, [dec[:i],dec[i:]])
res = dec_targ,dec_preds[0],preds[0]
if with_input: res = (dec_inp,) + res
return res
def show_results(self, ds_idx=1, dl=None, max_n=9, shuffle=True, **kwargs):
if dl is None: dl = self.dls[ds_idx].new(shuffle=shuffle)
b = dl.one_batch()
_,_,preds = self.get_preds(dl=[b], with_decoded=True)
self.dls.show_results(b, preds, max_n=max_n, **kwargs)
def show_training_loop(self):
indent = 0
for s in _loop:
if s.startswith('Start'): print(f'{" "*indent}{s}'); indent += 2
elif s.startswith('End'): indent -= 2; print(f'{" "*indent}{s}')
else: print(f'{" "*indent} - {s:15}:', self.ordered_cbs(s))
@contextmanager
def no_logging(self): return replacing_yield(self, 'logger', noop)
@contextmanager
def no_mbar(self): return replacing_yield(self, 'create_mbar', False)
@contextmanager
def loss_not_reduced(self):
if hasattr(self.loss_func, 'reduction'): return replacing_yield(self.loss_func, 'reduction', 'none')
else: return replacing_yield(self, 'loss_func', partial(self.loss_func, reduction='none'))
def to_detach(self,b,cpu=True,gather=True):
return self.dl.to_detach(b,cpu,gather) if hasattr(getattr(self,'dl',None),'to_detach') else to_detach(b,cpu,gather)
Learner.x,Learner.y = add_props(lambda i,x: detuplify((x.xb,x.yb)[i]))
# Cell
add_docs(Learner, "Group together a `model`, some `dls` and a `loss_func` to handle training",
add_cbs="Add `cbs` to the list of `Callback` and register `self` as their learner",
add_cb="Add `cb` to the list of `Callback` and register `self` as their learner",
remove_cbs="Remove `cbs` from the list of `Callback` and deregister `self` as their learner",
remove_cb="Add `cb` from the list of `Callback` and deregister `self` as their learner",
added_cbs="Context manage that temporarily adds `cbs`",
removed_cbs="Context manage that temporarily removes `cbs`",
ordered_cbs="List of `Callback`s, in order, for an `event` in the training loop",
create_opt="Create an optimizer with default hyper-parameters",
one_batch="Train or evaluate `self.model` on batch `(xb,yb)`",
all_batches="Train or evaluate `self.model` on all the batches of `self.dl`",
fit="Fit `self.model` for `n_epoch` using `cbs`. Optionally `reset_opt`.",
validate="Validate on `dl` with potential new `cbs`.",
get_preds="Get the predictions and targets on the `ds_idx`-th dbunchset or `dl`, optionally `with_input` and `with_loss`",
predict="Prediction on `item`, fully decoded, loss function decoded and probabilities",
validation_context="A `ContextManagers` suitable for validation, with optional `cbs`",
show_results="Show some predictions on `ds_idx`-th dataset or `dl`",
show_training_loop="Show each step in the training loop",
no_logging="Context manager to temporarily remove `logger`",
no_mbar="Context manager to temporarily prevent the master progress bar from being created",
loss_not_reduced="A context manager to evaluate `loss_func` with reduction set to none.",
to_detach="Calls `to_detach` if `self.dl` provides a `.to_detach` function otherwise calls global `to_detach`",
__call__="Call `event_name` for all `Callback`s in `self.cbs`"
)
# Cell
if not hasattr(defaults, 'callbacks'): defaults.callbacks = [TrainEvalCallback]
# Cell
def _before_batch_cb(f, self):
xb,yb = f(self, self.xb, self.yb)
self.learn.xb,self.learn.yb = xb,yb
# Cell
def before_batch_cb(f):
"Shortcut for creating a Callback on the `before_batch` event, which takes and returns `xb,yb`"
return Callback(before_batch=partial(_before_batch_cb, f))
# Cell
@patch
@delegates(save_model)
def save(self:Learner, file, **kwargs):
"Save model and optimizer state (if `with_opt`) to `self.path/self.model_dir/file`"
file = join_path_file(file, self.path/self.model_dir, ext='.pth')
save_model(file, self.model, getattr(self,'opt',None), **kwargs)
return file
# Cell
@patch
@delegates(load_model)
def load(self:Learner, file, device=None, **kwargs):
"Load model and optimizer state (if `with_opt`) from `self.path/self.model_dir/file` using `device`"
if device is None and hasattr(self.dls, 'device'): device = self.dls.device
if self.opt is None: self.create_opt()
file = join_path_file(file, self.path/self.model_dir, ext='.pth')
load_model(file, self.model, self.opt, device=device, **kwargs)
return self
# Cell
@patch
def export(self:Learner, fname='export.pkl', pickle_module=pickle, pickle_protocol=2):
"Export the content of `self` without the items and the optimizer state for inference"
if rank_distrib(): return # don't export if child proc
self._end_cleanup()
old_dbunch = self.dls
self.dls = self.dls.new_empty()
state = self.opt.state_dict() if self.opt is not None else None
self.opt = None
with warnings.catch_warnings():
#To avoid the warning that come from PyTorch about model not being checked
warnings.simplefilter("ignore")
torch.save(self, self.path/fname, pickle_module=pickle_module, pickle_protocol=pickle_protocol)
self.create_opt()
if state is not None: self.opt.load_state_dict(state)
self.dls = old_dbunch
# Cell
def load_learner(fname, cpu=True, pickle_module=pickle):
"Load a `Learner` object in `fname`, optionally putting it on the `cpu`"
distrib_barrier()
res = torch.load(fname, map_location='cpu' if cpu else None, pickle_module=pickle_module)
if hasattr(res, 'to_fp32'): res = res.to_fp32()
if cpu: res.dls.cpu()
return res
# Cell
def to_detach_from_dl(learn:(Learner,NoneType),b:object,cpu:bool=True,gather:bool=True):
return learn.dl.to_detach(b,cpu,gather) if hasattr(getattr(learn,'dl',None),'to_detach') else to_detach(b,cpu,gather)
# Cell
@docs
class Metric():
"Blueprint for defining a metric"
def reset(self): pass
def accumulate(self, learn): pass
@property
def value(self): raise NotImplementedError
@property
def name(self): return class2attr(self, 'Metric')
_docs = dict(
reset="Reset inner state to prepare for new computation",
name="Name of the `Metric`, camel-cased and with Metric removed",
accumulate="Use `learn` to update the state with new results",
value="The value of the metric")
# Cell
def _maybe_reduce(val):
if num_distrib()>1:
val = val.clone()
torch.distributed.all_reduce(val, op=torch.distributed.ReduceOp.SUM)
val /= num_distrib()
return val
# Cell
class AvgMetric(Metric):
"Average the values of `func` taking into account potential different batch sizes"
def __init__(self, func): self.func = func
def reset(self): self.total,self.count = 0.,0
def accumulate(self, learn):
bs = find_bs(learn.yb)
self.total += learn.to_detach(self.func(learn.pred, *learn.yb))*bs
self.count += bs
@property
def value(self): return self.total/self.count if self.count != 0 else None
@property
def name(self): return self.func.func.__name__ if hasattr(self.func, 'func') else self.func.__name__
# Cell
class AvgLoss(Metric):
"Average the losses taking into account potential different batch sizes"
def reset(self): self.total,self.count = 0.,0
def accumulate(self, learn):
bs = find_bs(learn.yb)
self.total += learn.to_detach(learn.loss.mean())*bs
self.count += bs
@property
def value(self): return self.total/self.count if self.count != 0 else None
@property
def name(self): return "loss"
# Cell
class AvgSmoothLoss(Metric):
"Smooth average of the losses (exponentially weighted with `beta`)"
def __init__(self, beta=0.98): self.beta = beta
def reset(self): self.count,self.val = 0,tensor(0.)
def accumulate(self, learn):
self.count += 1
self.val = torch.lerp(to_detach(learn.loss.mean(), gather=False), self.val, self.beta)
@property
def value(self): return self.val/(1-self.beta**self.count)
# Cell
class ValueMetric(Metric):
"Use to include a pre-calculated metric value (for instance calculated in a `Callback`) and returned by `func`"
def __init__(self, func, metric_name=None): store_attr('func, metric_name')
@property
def value(self): return self.func()
@property
def name(self): return self.metric_name if self.metric_name else self.func.__name__
# Cell
from fastprogress.fastprogress import format_time
# Cell
def _maybe_item(t):
t = t.value
try: return t.item()
except: return t
# Cell
class Recorder(Callback):
"Callback that registers statistics (lr, loss and metrics) during training"
_stateattrs=('lrs','iters','losses','values')
remove_on_fetch,order = True,50
def __init__(self, add_time=True, train_metrics=False, valid_metrics=True, beta=0.98):
store_attr('add_time,train_metrics,valid_metrics')
self.loss,self.smooth_loss = AvgLoss(),AvgSmoothLoss(beta=beta)
def before_fit(self):
"Prepare state for training"
self.lrs,self.iters,self.losses,self.values = [],[],[],[]
names = self.metrics.attrgot('name')
if self.train_metrics and self.valid_metrics:
names = L('loss') + names
names = names.map('train_{}') + names.map('valid_{}')
elif self.valid_metrics: names = L('train_loss', 'valid_loss') + names
else: names = L('train_loss') + names
if self.add_time: names.append('time')
self.metric_names = 'epoch'+names
self.smooth_loss.reset()
def after_batch(self):
"Update all metrics and records lr and smooth loss in training"
if len(self.yb) == 0: return
mets = self._train_mets if self.training else self._valid_mets
for met in mets: met.accumulate(self.learn)
if not self.training: return
self.lrs.append(self.opt.hypers[-1]['lr'])
self.losses.append(self.smooth_loss.value)
self.learn.smooth_loss = self.smooth_loss.value
def before_epoch(self):
"Set timer if `self.add_time=True`"
self.cancel_train,self.cancel_valid = False,False
if self.add_time: self.start_epoch = time.time()
self.log = L(getattr(self, 'epoch', 0))
def before_train (self): self._train_mets[1:].map(Self.reset())
def before_validate(self): self._valid_mets.map(Self.reset())
def after_train (self): self.log += self._train_mets.map(_maybe_item)
def after_validate(self): self.log += self._valid_mets.map(_maybe_item)
def after_cancel_train(self): self.cancel_train = True
def after_cancel_validate(self): self.cancel_valid = True
def after_epoch(self):
"Store and log the loss/metric values"
self.learn.final_record = self.log[1:].copy()
self.values.append(self.learn.final_record)
if self.add_time: self.log.append(format_time(time.time() - self.start_epoch))
self.logger(self.log)
self.iters.append(self.smooth_loss.count)
@property
def _train_mets(self):
if getattr(self, 'cancel_train', False): return L()
return L(self.smooth_loss) + (self.metrics if self.train_metrics else L())
@property
def _valid_mets(self):
if getattr(self, 'cancel_valid', False): return L()
return (L(self.loss) + self.metrics if self.valid_metrics else L())
def plot_loss(self, skip_start=5, with_valid=True):
plt.plot(list(range(skip_start, len(self.losses))), self.losses[skip_start:], label='train')
if with_valid:
idx = (np.array(self.iters)<skip_start).sum()
plt.plot(self.iters[idx:], L(self.values[idx:]).itemgot(1), label='valid')
plt.legend()
# Cell
add_docs(Recorder,
before_train = "Reset loss and metrics state",
after_train = "Log loss and metric values on the training set (if `self.training_metrics=True`)",
before_validate = "Reset loss and metrics state",
after_validate = "Log loss and metric values on the validation set",
after_cancel_train = "Ignore training metrics for this epoch",
after_cancel_validate = "Ignore validation metrics for this epoch",
plot_loss = "Plot the losses from `skip_start` and onward")
if Recorder not in defaults.callbacks: defaults.callbacks.append(Recorder)
# Cell
@patch
def freeze_to(self:Learner, n):
if self.opt is None: self.create_opt()
self.opt.freeze_to(n)
self.opt.clear_state()
@patch
def freeze(self:Learner): self.freeze_to(-1)
@patch
def unfreeze(self:Learner): self.freeze_to(0)
add_docs(Learner,
freeze_to="Freeze parameter groups up to `n`",
freeze="Freeze up to last parameter group",
unfreeze="Unfreeze the entire model")
# Cell
@patch
def tta(self:Learner, ds_idx=1, dl=None, n=4, item_tfms=None, batch_tfms=None, beta=0.25, use_max=False):
"Return predictions on the `ds_idx` dataset or `dl` using Test Time Augmentation"
if dl is None: dl = self.dls[ds_idx]
if item_tfms is not None or batch_tfms is not None: dl = dl.new(after_item=item_tfms, after_batch=batch_tfms)
try:
self(_before_epoch)
with dl.dataset.set_split_idx(0), self.no_mbar():
if hasattr(self,'progress'): self.progress.mbar = master_bar(list(range(n)))
aug_preds = []
for i in self.progress.mbar if hasattr(self,'progress') else range(n):
self.epoch = i #To keep track of progress on mbar since the progress callback will use self.epoch
aug_preds.append(self.get_preds(dl=dl, inner=True)[0][None])
aug_preds = torch.cat(aug_preds)
aug_preds = aug_preds.max(0)[0] if use_max else aug_preds.mean(0)
self.epoch = n
with dl.dataset.set_split_idx(1): preds,targs = self.get_preds(dl=dl, inner=True)
finally: self(event.after_fit)
if use_max: return torch.stack([preds, aug_preds], 0).max(0)[0],targs
preds = (aug_preds,preds) if beta is None else torch.lerp(aug_preds, preds, beta)
return preds,targs
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/learner.py
|
learner.py
|
# AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"subplots": "00_torch_core.ipynb",
"show_image": "00_torch_core.ipynb",
"show_titled_image": "00_torch_core.ipynb",
"show_images": "00_torch_core.ipynb",
"ArrayBase": "00_torch_core.ipynb",
"ArrayImageBase": "00_torch_core.ipynb",
"ArrayImage": "00_torch_core.ipynb",
"ArrayImageBW": "00_torch_core.ipynb",
"ArrayMask": "00_torch_core.ipynb",
"Tensor.__array_eq__": "00_torch_core.ipynb",
"tensor": "00_torch_core.ipynb",
"set_seed": "00_torch_core.ipynb",
"get_random_states": "00_torch_core.ipynb",
"set_random_states": "00_torch_core.ipynb",
"no_random": "00_torch_core.ipynb",
"unsqueeze": "00_torch_core.ipynb",
"unsqueeze_": "00_torch_core.ipynb",
"apply": "00_torch_core.ipynb",
"maybe_gather": "00_torch_core.ipynb",
"to_detach": "00_torch_core.ipynb",
"to_half": "00_torch_core.ipynb",
"to_float": "00_torch_core.ipynb",
"defaults.use_cuda": "00_torch_core.ipynb",
"default_device": "00_torch_core.ipynb",
"to_device": "00_torch_core.ipynb",
"to_cpu": "00_torch_core.ipynb",
"to_np": "00_torch_core.ipynb",
"to_concat": "00_torch_core.ipynb",
"Tensor.set_meta": "00_torch_core.ipynb",
"Tensor.as_subclass": "00_torch_core.ipynb",
"TensorBase": "00_torch_core.ipynb",
"TensorImageBase": "00_torch_core.ipynb",
"TensorImage": "00_torch_core.ipynb",
"TensorImageBW": "00_torch_core.ipynb",
"TensorMask": "00_torch_core.ipynb",
"TensorFlowField": "00_torch_core.ipynb",
"TensorCategory": "00_torch_core.ipynb",
"TensorMultiCategory": "00_torch_core.ipynb",
"TitledTensorScalar": "00_torch_core.ipynb",
"L.tensored": "00_torch_core.ipynb",
"L.stack": "00_torch_core.ipynb",
"L.cat": "00_torch_core.ipynb",
"concat": "00_torch_core.ipynb",
"Chunks": "00_torch_core.ipynb",
"show_title": "00_torch_core.ipynb",
"ShowTitle": "00_torch_core.ipynb",
"TitledInt": "00_torch_core.ipynb",
"TitledFloat": "00_torch_core.ipynb",
"TitledStr": "00_torch_core.ipynb",
"TitledTuple": "00_torch_core.ipynb",
"TitledStr.truncate": "00_torch_core.ipynb",
"pd.DataFrame.__init__": "00_torch_core.ipynb",
"get_empty_df": "00_torch_core.ipynb",
"display_df": "00_torch_core.ipynb",
"get_first": "00_torch_core.ipynb",
"one_param": "00_torch_core.ipynb",
"item_find": "00_torch_core.ipynb",
"find_device": "00_torch_core.ipynb",
"find_bs": "00_torch_core.ipynb",
"np_func": "00_torch_core.ipynb",
"Module": "00_torch_core.ipynb",
"get_model": "00_torch_core.ipynb",
"one_hot": "00_torch_core.ipynb",
"one_hot_decode": "00_torch_core.ipynb",
"params": "00_torch_core.ipynb",
"trainable_params": "00_torch_core.ipynb",
"norm_types": "00_torch_core.ipynb",
"norm_bias_params": "00_torch_core.ipynb",
"batch_to_samples": "00_torch_core.ipynb",
"Tensor.interp_1d": "00_torch_core.ipynb",
"Tensor.pca": "00_torch_core.ipynb",
"logit": "00_torch_core.ipynb",
"num_distrib": "00_torch_core.ipynb",
"rank_distrib": "00_torch_core.ipynb",
"distrib_barrier": "00_torch_core.ipynb",
"Path.save_array": "00_torch_core.ipynb",
"Path.load_array": "00_torch_core.ipynb",
"base_doc": "00_torch_core.ipynb",
"doc": "00_torch_core.ipynb",
"nested_reorder": "00_torch_core.ipynb",
"make_cross_image": "00_torch_core.ipynb",
"show_image_batch": "00_torch_core.ipynb",
"requires_grad": "00_torch_core.ipynb",
"init_default": "01_layers.ipynb",
"cond_init": "00_torch_core.ipynb",
"apply_leaf": "00_torch_core.ipynb",
"apply_init": "00_torch_core.ipynb",
"script_use_ctx": "00_torch_core.ipynb",
"script_save_ctx": "00_torch_core.ipynb",
"script_fwd": "00_torch_core.ipynb",
"script_bwd": "00_torch_core.ipynb",
"grad_module": "00_torch_core.ipynb",
"module": "01_layers.ipynb",
"Identity": "01_layers.ipynb",
"Lambda": "01_layers.ipynb",
"PartialLambda": "01_layers.ipynb",
"Flatten": "01_layers.ipynb",
"View": "01_layers.ipynb",
"ResizeBatch": "01_layers.ipynb",
"Debugger": "01_layers.ipynb",
"sigmoid_range": "01_layers.ipynb",
"SigmoidRange": "01_layers.ipynb",
"AdaptiveConcatPool1d": "01_layers.ipynb",
"AdaptiveConcatPool2d": "01_layers.ipynb",
"PoolType": "01_layers.ipynb",
"adaptive_pool": "01_layers.ipynb",
"PoolFlatten": "01_layers.ipynb",
"NormType": "01_layers.ipynb",
"BatchNorm": "01_layers.ipynb",
"InstanceNorm": "01_layers.ipynb",
"BatchNorm1dFlat": "01_layers.ipynb",
"LinBnDrop": "01_layers.ipynb",
"sigmoid": "01_layers.ipynb",
"sigmoid_": "01_layers.ipynb",
"vleaky_relu": "01_layers.ipynb",
"init_linear": "01_layers.ipynb",
"defaults.activation": "01_layers.ipynb",
"ConvLayer": "01_layers.ipynb",
"AdaptiveAvgPool": "01_layers.ipynb",
"MaxPool": "01_layers.ipynb",
"AvgPool": "01_layers.ipynb",
"trunc_normal_": "01_layers.ipynb",
"Embedding": "01_layers.ipynb",
"SelfAttention": "01_layers.ipynb",
"PooledSelfAttention2d": "01_layers.ipynb",
"SimpleSelfAttention": "01_layers.ipynb",
"icnr_init": "01_layers.ipynb",
"PixelShuffle_ICNR": "01_layers.ipynb",
"sequential": "01_layers.ipynb",
"SequentialEx": "01_layers.ipynb",
"MergeLayer": "01_layers.ipynb",
"Cat": "01_layers.ipynb",
"SimpleCNN": "01_layers.ipynb",
"ProdLayer": "01_layers.ipynb",
"inplace_relu": "01_layers.ipynb",
"SEModule": "01_layers.ipynb",
"ResBlock": "01_layers.ipynb",
"SEBlock": "01_layers.ipynb",
"SEResNeXtBlock": "01_layers.ipynb",
"SeparableBlock": "01_layers.ipynb",
"swish": "01_layers.ipynb",
"Swish": "01_layers.ipynb",
"MishJitAutoFn": "01_layers.ipynb",
"mish": "01_layers.ipynb",
"Mish": "01_layers.ipynb",
"ParameterModule": "01_layers.ipynb",
"children_and_parameters": "01_layers.ipynb",
"has_children": "01_layers.ipynb",
"flatten_model": "01_layers.ipynb",
"NoneReduce": "01_layers.ipynb",
"in_channels": "01_layers.ipynb",
"BaseLoss": "01a_losses.ipynb",
"CrossEntropyLossFlat": "01a_losses.ipynb",
"BCEWithLogitsLossFlat": "01a_losses.ipynb",
"BCELossFlat": "01a_losses.ipynb",
"MSELossFlat": "01a_losses.ipynb",
"L1LossFlat": "01a_losses.ipynb",
"LabelSmoothingCrossEntropy": "01a_losses.ipynb",
"LabelSmoothingCrossEntropyFlat": "01a_losses.ipynb",
"fa_collate": "02_data.load.ipynb",
"fa_convert": "02_data.load.ipynb",
"SkipItemException": "02_data.load.ipynb",
"DataLoader": "02_data.load.ipynb",
"TfmdDL": "03_data.core.ipynb",
"DataLoaders": "03_data.core.ipynb",
"FilteredBase": "03_data.core.ipynb",
"TfmdLists": "03_data.core.ipynb",
"decode_at": "03_data.core.ipynb",
"show_at": "03_data.core.ipynb",
"Datasets": "03_data.core.ipynb",
"test_set": "03_data.core.ipynb",
"DataLoaders.test_dl": "03_data.core.ipynb",
"Config": "04_data.external.ipynb",
"URLs": "04_data.external.ipynb",
"download_url": "04_data.external.ipynb",
"download_data": "04_data.external.ipynb",
"file_extract": "04_data.external.ipynb",
"newest_folder": "04_data.external.ipynb",
"rename_extracted": "04_data.external.ipynb",
"untar_data": "04_data.external.ipynb",
"get_files": "05_data.transforms.ipynb",
"FileGetter": "05_data.transforms.ipynb",
"image_extensions": "05_data.transforms.ipynb",
"get_image_files": "05_data.transforms.ipynb",
"ImageGetter": "05_data.transforms.ipynb",
"get_text_files": "05_data.transforms.ipynb",
"ItemGetter": "05_data.transforms.ipynb",
"AttrGetter": "05_data.transforms.ipynb",
"RandomSplitter": "05_data.transforms.ipynb",
"TrainTestSplitter": "05_data.transforms.ipynb",
"IndexSplitter": "05_data.transforms.ipynb",
"GrandparentSplitter": "05_data.transforms.ipynb",
"FuncSplitter": "05_data.transforms.ipynb",
"MaskSplitter": "05_data.transforms.ipynb",
"FileSplitter": "05_data.transforms.ipynb",
"ColSplitter": "05_data.transforms.ipynb",
"RandomSubsetSplitter": "05_data.transforms.ipynb",
"parent_label": "05_data.transforms.ipynb",
"RegexLabeller": "05_data.transforms.ipynb",
"ColReader": "05_data.transforms.ipynb",
"CategoryMap": "05_data.transforms.ipynb",
"Categorize": "05_data.transforms.ipynb",
"Category": "05_data.transforms.ipynb",
"MultiCategorize": "05_data.transforms.ipynb",
"MultiCategory": "05_data.transforms.ipynb",
"OneHotEncode": "05_data.transforms.ipynb",
"EncodedMultiCategorize": "05_data.transforms.ipynb",
"RegressionSetup": "05_data.transforms.ipynb",
"get_c": "05_data.transforms.ipynb",
"ToTensor": "05_data.transforms.ipynb",
"IntToFloatTensor": "05_data.transforms.ipynb",
"broadcast_vec": "05_data.transforms.ipynb",
"Normalize": "05_data.transforms.ipynb",
"TransformBlock": "06_data.block.ipynb",
"CategoryBlock": "06_data.block.ipynb",
"MultiCategoryBlock": "06_data.block.ipynb",
"RegressionBlock": "06_data.block.ipynb",
"DataBlock": "06_data.block.ipynb",
"DataBlock.summary": "06_data.block.ipynb",
"imagenet_stats": "07_vision.core.ipynb",
"cifar_stats": "07_vision.core.ipynb",
"mnist_stats": "07_vision.core.ipynb",
"n_px": "07_vision.core.ipynb",
"shape": "60_medical.imaging.ipynb",
"aspect": "07_vision.core.ipynb",
"Image.Image.reshape": "07_vision.core.ipynb",
"Image.Image.to_bytes_format": "07_vision.core.ipynb",
"Image.Image.to_thumb": "07_vision.core.ipynb",
"Image.Image.resize_max": "07_vision.core.ipynb",
"to_image": "07_vision.core.ipynb",
"load_image": "07_vision.core.ipynb",
"image2tensor": "07_vision.core.ipynb",
"PILBase": "07_vision.core.ipynb",
"PILImage": "07_vision.core.ipynb",
"PILImageBW": "07_vision.core.ipynb",
"PILMask": "07_vision.core.ipynb",
"OpenMask": "07_vision.core.ipynb",
"OpenMask.loss_func": "07_vision.core.ipynb",
"PILMask.create": "07_vision.core.ipynb",
"AddMaskCodes": "07_vision.core.ipynb",
"TensorPoint": "07_vision.core.ipynb",
"TensorPointCreate": "07_vision.core.ipynb",
"TensorPointCreate.loss_func": "07_vision.core.ipynb",
"TensorPoint.create": "07_vision.core.ipynb",
"get_annotations": "07_vision.core.ipynb",
"TensorBBox": "07_vision.core.ipynb",
"LabeledBBox": "07_vision.core.ipynb",
"encodes": "40_tabular.core.ipynb",
"PointScaler": "07_vision.core.ipynb",
"BBoxLabeler": "07_vision.core.ipynb",
"decodes": "40_tabular.core.ipynb",
"get_grid": "08_vision.data.ipynb",
"clip_remove_empty": "08_vision.data.ipynb",
"bb_pad": "08_vision.data.ipynb",
"ImageBlock": "08_vision.data.ipynb",
"MaskBlock": "08_vision.data.ipynb",
"PointBlock": "08_vision.data.ipynb",
"BBoxBlock": "08_vision.data.ipynb",
"PointBlock.__doc__": "08_vision.data.ipynb",
"BBoxBlock.__doc__": "08_vision.data.ipynb",
"BBoxLblBlock": "08_vision.data.ipynb",
"ImageDataLoaders": "08_vision.data.ipynb",
"ImageDataLoaders.from_csv": "08_vision.data.ipynb",
"ImageDataLoaders.from_name_func": "08_vision.data.ipynb",
"ImageDataLoaders.from_path_re": "08_vision.data.ipynb",
"ImageDataLoaders.from_name_re": "08_vision.data.ipynb",
"SegmentationDataLoaders": "08_vision.data.ipynb",
"RandTransform": "09_vision.augment.ipynb",
"TensorTypes": "09_vision.augment.ipynb",
"Image.Image.flip_lr": "09_vision.augment.ipynb",
"TensorImageBase.flip_lr": "09_vision.augment.ipynb",
"TensorPoint.flip_lr": "09_vision.augment.ipynb",
"TensorBBox.flip_lr": "09_vision.augment.ipynb",
"FlipItem": "09_vision.augment.ipynb",
"PILImage.dihedral": "09_vision.augment.ipynb",
"TensorImage.dihedral": "09_vision.augment.ipynb",
"TensorPoint.dihedral": "09_vision.augment.ipynb",
"TensorBBox.dihedral": "09_vision.augment.ipynb",
"DihedralItem": "09_vision.augment.ipynb",
"TensorBBox.crop_pad": "09_vision.augment.ipynb",
"TensorPoint.crop_pad": "09_vision.augment.ipynb",
"Image.Image.crop_pad": "09_vision.augment.ipynb",
"CropPad": "09_vision.augment.ipynb",
"RandomCrop": "09_vision.augment.ipynb",
"OldRandomCrop": "09_vision.augment.ipynb",
"Resize": "09_vision.augment.ipynb",
"RandomResizedCrop": "09_vision.augment.ipynb",
"RatioResize": "09_vision.augment.ipynb",
"affine_grid": "09_vision.augment.ipynb",
"TensorImage.affine_coord": "09_vision.augment.ipynb",
"TensorMask.affine_coord": "09_vision.augment.ipynb",
"TensorPoint.affine_coord": "09_vision.augment.ipynb",
"TensorBBox.affine_coord": "09_vision.augment.ipynb",
"AffineCoordTfm": "09_vision.augment.ipynb",
"RandomResizedCropGPU": "09_vision.augment.ipynb",
"mask_tensor": "09_vision.augment.ipynb",
"affine_mat": "09_vision.augment.ipynb",
"flip_mat": "09_vision.augment.ipynb",
"TensorImage.flip_batch": "09_vision.augment.ipynb",
"TensorMask.flip_batch": "09_vision.augment.ipynb",
"TensorPoint.flip_batch": "09_vision.augment.ipynb",
"TensorBBox.flip_batch": "09_vision.augment.ipynb",
"Flip": "09_vision.augment.ipynb",
"DeterministicDraw": "09_vision.augment.ipynb",
"DeterministicFlip": "09_vision.augment.ipynb",
"dihedral_mat": "09_vision.augment.ipynb",
"TensorImage.dihedral_batch": "09_vision.augment.ipynb",
"TensorMask.dihedral_batch": "09_vision.augment.ipynb",
"TensorPoint.dihedral_batch": "09_vision.augment.ipynb",
"TensorBBox.dihedral_batch": "09_vision.augment.ipynb",
"Dihedral": "09_vision.augment.ipynb",
"DeterministicDihedral": "09_vision.augment.ipynb",
"rotate_mat": "09_vision.augment.ipynb",
"TensorImage.rotate": "09_vision.augment.ipynb",
"TensorMask.rotate": "09_vision.augment.ipynb",
"TensorPoint.rotate": "09_vision.augment.ipynb",
"TensorBBox.rotate": "09_vision.augment.ipynb",
"Rotate": "09_vision.augment.ipynb",
"zoom_mat": "09_vision.augment.ipynb",
"TensorImage.zoom": "09_vision.augment.ipynb",
"TensorMask.zoom": "09_vision.augment.ipynb",
"TensorPoint.zoom": "09_vision.augment.ipynb",
"TensorBBox.zoom": "09_vision.augment.ipynb",
"Zoom": "09_vision.augment.ipynb",
"find_coeffs": "09_vision.augment.ipynb",
"apply_perspective": "09_vision.augment.ipynb",
"TensorImage.warp": "09_vision.augment.ipynb",
"TensorMask.warp": "09_vision.augment.ipynb",
"TensorPoint.warp": "09_vision.augment.ipynb",
"TensorBBox.warp": "09_vision.augment.ipynb",
"Warp": "09_vision.augment.ipynb",
"TensorImage.lighting": "09_vision.augment.ipynb",
"SpaceTfm": "09_vision.augment.ipynb",
"LightingTfm": "09_vision.augment.ipynb",
"TensorImage.brightness": "09_vision.augment.ipynb",
"Brightness": "09_vision.augment.ipynb",
"TensorImage.contrast": "09_vision.augment.ipynb",
"Contrast": "09_vision.augment.ipynb",
"grayscale": "09_vision.augment.ipynb",
"TensorImage.saturation": "09_vision.augment.ipynb",
"Saturation": "09_vision.augment.ipynb",
"rgb2hsv": "09_vision.augment.ipynb",
"hsv2rgb": "09_vision.augment.ipynb",
"TensorImage.hsv": "09_vision.augment.ipynb",
"HSVTfm": "09_vision.augment.ipynb",
"TensorImage.hue": "09_vision.augment.ipynb",
"Hue": "09_vision.augment.ipynb",
"cutout_gaussian": "09_vision.augment.ipynb",
"norm_apply_denorm": "09_vision.augment.ipynb",
"RandomErasing": "09_vision.augment.ipynb",
"setup_aug_tfms": "09_vision.augment.ipynb",
"aug_transforms": "09_vision.augment.ipynb",
"download_images": "09b_vision.utils.ipynb",
"resize_to": "09b_vision.utils.ipynb",
"verify_image": "09b_vision.utils.ipynb",
"verify_images": "09b_vision.utils.ipynb",
"resize_image": "09b_vision.utils.ipynb",
"resize_images": "09b_vision.utils.ipynb",
"Box.__getitem__": "09c_vision.widgets.ipynb",
"widget": "09c_vision.widgets.ipynb",
"carousel": "09c_vision.widgets.ipynb",
"ImagesCleaner": "09c_vision.widgets.ipynb",
"ImageClassifierCleaner": "09c_vision.widgets.ipynb",
"init_cnn": "11_vision.models.xresnet.ipynb",
"XResNet": "11_vision.models.xresnet.ipynb",
"xresnet18": "11_vision.models.xresnet.ipynb",
"xresnet34": "11_vision.models.xresnet.ipynb",
"xresnet50": "11_vision.models.xresnet.ipynb",
"xresnet101": "11_vision.models.xresnet.ipynb",
"xresnet152": "11_vision.models.xresnet.ipynb",
"xresnet18_deep": "11_vision.models.xresnet.ipynb",
"xresnet34_deep": "11_vision.models.xresnet.ipynb",
"xresnet50_deep": "11_vision.models.xresnet.ipynb",
"xresnet18_deeper": "11_vision.models.xresnet.ipynb",
"xresnet34_deeper": "11_vision.models.xresnet.ipynb",
"xresnet50_deeper": "11_vision.models.xresnet.ipynb",
"se_kwargs1": "11_vision.models.xresnet.ipynb",
"se_kwargs2": "11_vision.models.xresnet.ipynb",
"se_kwargs3": "11_vision.models.xresnet.ipynb",
"g0": "11_vision.models.xresnet.ipynb",
"g1": "11_vision.models.xresnet.ipynb",
"g2": "11_vision.models.xresnet.ipynb",
"g3": "11_vision.models.xresnet.ipynb",
"xse_resnet18": "11_vision.models.xresnet.ipynb",
"xse_resnext18": "11_vision.models.xresnet.ipynb",
"xresnext18": "11_vision.models.xresnet.ipynb",
"xse_resnet34": "11_vision.models.xresnet.ipynb",
"xse_resnext34": "11_vision.models.xresnet.ipynb",
"xresnext34": "11_vision.models.xresnet.ipynb",
"xse_resnet50": "11_vision.models.xresnet.ipynb",
"xse_resnext50": "11_vision.models.xresnet.ipynb",
"xresnext50": "11_vision.models.xresnet.ipynb",
"xse_resnet101": "11_vision.models.xresnet.ipynb",
"xse_resnext101": "11_vision.models.xresnet.ipynb",
"xresnext101": "11_vision.models.xresnet.ipynb",
"xse_resnet152": "11_vision.models.xresnet.ipynb",
"xsenet154": "11_vision.models.xresnet.ipynb",
"xse_resnext18_deep": "11_vision.models.xresnet.ipynb",
"xse_resnext34_deep": "11_vision.models.xresnet.ipynb",
"xse_resnext50_deep": "11_vision.models.xresnet.ipynb",
"xse_resnext18_deeper": "11_vision.models.xresnet.ipynb",
"xse_resnext34_deeper": "11_vision.models.xresnet.ipynb",
"xse_resnext50_deeper": "11_vision.models.xresnet.ipynb",
"Optimizer": "12_optimizer.ipynb",
"sgd_step": "12_optimizer.ipynb",
"weight_decay": "12_optimizer.ipynb",
"weight_decay.defaults": "12_optimizer.ipynb",
"l2_reg": "12_optimizer.ipynb",
"l2_reg.defaults": "12_optimizer.ipynb",
"average_grad": "12_optimizer.ipynb",
"average_grad.defaults": "12_optimizer.ipynb",
"average_sqr_grad": "12_optimizer.ipynb",
"average_sqr_grad.defaults": "12_optimizer.ipynb",
"momentum_step": "12_optimizer.ipynb",
"SGD": "12_optimizer.ipynb",
"rms_prop_step": "12_optimizer.ipynb",
"rms_prop_step.defaults": "12_optimizer.ipynb",
"RMSProp": "12_optimizer.ipynb",
"step_stat": "12_optimizer.ipynb",
"debias": "12_optimizer.ipynb",
"adam_step": "12_optimizer.ipynb",
"Adam": "12_optimizer.ipynb",
"radam_step": "12_optimizer.ipynb",
"RAdam": "12_optimizer.ipynb",
"qhadam_step": "12_optimizer.ipynb",
"QHAdam": "12_optimizer.ipynb",
"larc_layer_lr": "12_optimizer.ipynb",
"larc_layer_lr.defaults": "12_optimizer.ipynb",
"larc_step": "12_optimizer.ipynb",
"Larc": "12_optimizer.ipynb",
"lamb_step": "12_optimizer.ipynb",
"Lamb": "12_optimizer.ipynb",
"Lookahead": "12_optimizer.ipynb",
"ranger": "12_optimizer.ipynb",
"detuplify_pg": "12_optimizer.ipynb",
"set_item_pg": "12_optimizer.ipynb",
"pytorch_hp_map": "12_optimizer.ipynb",
"OptimWrapper": "12_optimizer.ipynb",
"Callback": "13_callback.core.ipynb",
"TrainEvalCallback": "13_callback.core.ipynb",
"GatherPredsCallback": "13_callback.core.ipynb",
"FetchPredsCallback": "13_callback.core.ipynb",
"defaults.lr": "13a_learner.ipynb",
"replacing_yield": "13a_learner.ipynb",
"mk_metric": "13a_learner.ipynb",
"save_model": "13a_learner.ipynb",
"load_model": "13a_learner.ipynb",
"Learner": "13a_learner.ipynb",
"before_batch_cb": "13a_learner.ipynb",
"Learner.save": "13a_learner.ipynb",
"Learner.load": "13a_learner.ipynb",
"Learner.export": "13a_learner.ipynb",
"load_learner": "13a_learner.ipynb",
"to_detach_from_dl": "13a_learner.ipynb",
"Metric": "13a_learner.ipynb",
"AvgMetric": "13a_learner.ipynb",
"AvgLoss": "13a_learner.ipynb",
"AvgSmoothLoss": "13a_learner.ipynb",
"ValueMetric": "13a_learner.ipynb",
"Recorder": "13a_learner.ipynb",
"Learner.freeze_to": "13a_learner.ipynb",
"Learner.freeze": "13a_learner.ipynb",
"Learner.unfreeze": "13a_learner.ipynb",
"Learner.tta": "13a_learner.ipynb",
"flatten_check": "13b_metrics.ipynb",
"AccumMetric": "13b_metrics.ipynb",
"skm_to_fastai": "13b_metrics.ipynb",
"optim_metric": "13b_metrics.ipynb",
"accuracy": "13b_metrics.ipynb",
"error_rate": "13b_metrics.ipynb",
"top_k_accuracy": "13b_metrics.ipynb",
"APScoreBinary": "13b_metrics.ipynb",
"BalancedAccuracy": "13b_metrics.ipynb",
"BrierScore": "13b_metrics.ipynb",
"CohenKappa": "13b_metrics.ipynb",
"F1Score": "13b_metrics.ipynb",
"FBeta": "13b_metrics.ipynb",
"HammingLoss": "13b_metrics.ipynb",
"Jaccard": "13b_metrics.ipynb",
"Precision": "13b_metrics.ipynb",
"Recall": "13b_metrics.ipynb",
"RocAuc": "13b_metrics.ipynb",
"RocAucBinary": "13b_metrics.ipynb",
"MatthewsCorrCoef": "13b_metrics.ipynb",
"Perplexity": "13b_metrics.ipynb",
"perplexity": "13b_metrics.ipynb",
"accuracy_multi": "13b_metrics.ipynb",
"APScoreMulti": "13b_metrics.ipynb",
"BrierScoreMulti": "13b_metrics.ipynb",
"F1ScoreMulti": "13b_metrics.ipynb",
"FBetaMulti": "13b_metrics.ipynb",
"HammingLossMulti": "13b_metrics.ipynb",
"JaccardMulti": "13b_metrics.ipynb",
"MatthewsCorrCoefMulti": "13b_metrics.ipynb",
"PrecisionMulti": "13b_metrics.ipynb",
"RecallMulti": "13b_metrics.ipynb",
"RocAucMulti": "13b_metrics.ipynb",
"mse": "13b_metrics.ipynb",
"rmse": "13b_metrics.ipynb",
"rmse.__doc__": "13b_metrics.ipynb",
"mae": "13b_metrics.ipynb",
"msle": "13b_metrics.ipynb",
"exp_rmspe": "13b_metrics.ipynb",
"exp_rmspe.__doc__": "13b_metrics.ipynb",
"ExplainedVariance": "13b_metrics.ipynb",
"R2Score": "13b_metrics.ipynb",
"PearsonCorrCoef": "13b_metrics.ipynb",
"SpearmanCorrCoef": "13b_metrics.ipynb",
"foreground_acc": "13b_metrics.ipynb",
"Dice": "13b_metrics.ipynb",
"DiceMulti": "13b_metrics.ipynb",
"JaccardCoeff": "13b_metrics.ipynb",
"CorpusBLEUMetric": "13b_metrics.ipynb",
"LossMetric": "13b_metrics.ipynb",
"LossMetrics": "13b_metrics.ipynb",
"annealer": "14_callback.schedule.ipynb",
"sched_lin": "14_callback.schedule.ipynb",
"sched_cos": "14_callback.schedule.ipynb",
"sched_no": "14_callback.schedule.ipynb",
"sched_exp": "14_callback.schedule.ipynb",
"SchedLin": "14_callback.schedule.ipynb",
"SchedCos": "14_callback.schedule.ipynb",
"SchedNo": "14_callback.schedule.ipynb",
"SchedExp": "14_callback.schedule.ipynb",
"SchedLin.__doc__": "14_callback.schedule.ipynb",
"SchedCos.__doc__": "14_callback.schedule.ipynb",
"SchedExp.__doc__": "14_callback.schedule.ipynb",
"SchedPoly": "14_callback.schedule.ipynb",
"combine_scheds": "14_callback.schedule.ipynb",
"combined_cos": "14_callback.schedule.ipynb",
"ParamScheduler": "14_callback.schedule.ipynb",
"Learner.fit_one_cycle": "14_callback.schedule.ipynb",
"Recorder.plot_sched": "14_callback.schedule.ipynb",
"Learner.fit_flat_cos": "14_callback.schedule.ipynb",
"Learner.fit_sgdr": "14_callback.schedule.ipynb",
"Learner.fine_tune": "14_callback.schedule.ipynb",
"LRFinder": "14_callback.schedule.ipynb",
"Recorder.plot_lr_find": "14_callback.schedule.ipynb",
"SuggestedLRs": "14_callback.schedule.ipynb",
"Learner.lr_find": "14_callback.schedule.ipynb",
"CollectDataCallback": "14a_callback.data.ipynb",
"CudaCallback": "14a_callback.data.ipynb",
"WeightedDL": "14a_callback.data.ipynb",
"Datasets.weighted_dataloaders": "14a_callback.data.ipynb",
"PartialDL": "14a_callback.data.ipynb",
"FilteredBase.partial_dataloaders": "14a_callback.data.ipynb",
"Hook": "15_callback.hook.ipynb",
"hook_output": "15_callback.hook.ipynb",
"Hooks": "15_callback.hook.ipynb",
"hook_outputs": "15_callback.hook.ipynb",
"dummy_eval": "15_callback.hook.ipynb",
"model_sizes": "15_callback.hook.ipynb",
"num_features_model": "15_callback.hook.ipynb",
"has_params": "15_callback.hook.ipynb",
"HookCallback": "15_callback.hook.ipynb",
"total_params": "15_callback.hook.ipynb",
"layer_info": "15_callback.hook.ipynb",
"module_summary": "15_callback.hook.ipynb",
"Learner.summary": "15_callback.hook.ipynb",
"ActivationStats": "15_callback.hook.ipynb",
"UnetBlock": "15a_vision.models.unet.ipynb",
"ResizeToOrig": "15a_vision.models.unet.ipynb",
"DynamicUnet": "15a_vision.models.unet.ipynb",
"ProgressCallback": "16_callback.progress.ipynb",
"Learner.no_bar": "16_callback.progress.ipynb",
"ShowGraphCallback": "16_callback.progress.ipynb",
"CSVLogger": "16_callback.progress.ipynb",
"TerminateOnNaNCallback": "17_callback.tracker.ipynb",
"TrackerCallback": "17_callback.tracker.ipynb",
"EarlyStoppingCallback": "17_callback.tracker.ipynb",
"SaveModelCallback": "17_callback.tracker.ipynb",
"ReduceLROnPlateau": "17_callback.tracker.ipynb",
"MixedPrecision": "18_callback.fp16.ipynb",
"FP16TestCallback": "18_callback.fp16.ipynb",
"Learner.to_fp16": "18_callback.fp16.ipynb",
"Learner.to_fp32": "18_callback.fp16.ipynb",
"get_master": "18_callback.fp16.ipynb",
"to_master_grads": "18_callback.fp16.ipynb",
"to_model_params": "18_callback.fp16.ipynb",
"test_overflow": "18_callback.fp16.ipynb",
"grad_overflow": "18_callback.fp16.ipynb",
"copy_clone": "18_callback.fp16.ipynb",
"ModelToHalf": "18_callback.fp16.ipynb",
"NonNativeMixedPrecision": "18_callback.fp16.ipynb",
"Learner.to_to_non_native_fp16": "18_callback.fp16.ipynb",
"Learner.to_non_native_fp32": "18_callback.fp16.ipynb",
"ShortEpochCallback": "18a_callback.training.ipynb",
"GradientAccumulation": "18a_callback.training.ipynb",
"GradientClip": "18a_callback.training.ipynb",
"set_bn_eval": "18a_callback.training.ipynb",
"BnFreeze": "18a_callback.training.ipynb",
"bn_types": "18a_callback.training.ipynb",
"MCDropoutCallback": "18b_callback.preds.ipynb",
"reduce_loss": "19_callback.mixup.ipynb",
"MixHandler": "19_callback.mixup.ipynb",
"MixUp": "19_callback.mixup.ipynb",
"CutMix": "19_callback.mixup.ipynb",
"Interpretation": "20_interpret.ipynb",
"ClassificationInterpretation": "20_interpret.ipynb",
"DataParallel.reset": "20a_distributed.ipynb",
"ParallelTrainer": "20a_distributed.ipynb",
"Learner.to_parallel": "20a_distributed.ipynb",
"Learner.detach_parallel": "20a_distributed.ipynb",
"Learner.parallel_ctx": "20a_distributed.ipynb",
"DistributedDataParallel.reset": "20a_distributed.ipynb",
"setup_distrib": "20a_distributed.ipynb",
"teardown_distrib": "20a_distributed.ipynb",
"DistributedDL": "20a_distributed.ipynb",
"DistributedTrainer": "20a_distributed.ipynb",
"Learner.to_distributed": "20a_distributed.ipynb",
"Learner.detach_distributed": "20a_distributed.ipynb",
"Learner.distrib_ctx": "20a_distributed.ipynb",
"rank0_first": "20a_distributed.ipynb",
"has_pool_type": "21_vision.learner.ipynb",
"create_body": "21_vision.learner.ipynb",
"create_head": "21_vision.learner.ipynb",
"default_split": "21_vision.learner.ipynb",
"model_meta": "21_vision.learner.ipynb",
"create_cnn_model": "21_vision.learner.ipynb",
"cnn_learner": "21_vision.learner.ipynb",
"create_unet_model": "21_vision.learner.ipynb",
"unet_learner": "21_vision.learner.ipynb",
"GANModule": "24_vision.gan.ipynb",
"basic_critic": "24_vision.gan.ipynb",
"AddChannels": "24_vision.gan.ipynb",
"basic_generator": "24_vision.gan.ipynb",
"DenseResBlock": "24_vision.gan.ipynb",
"gan_critic": "24_vision.gan.ipynb",
"GANLoss": "24_vision.gan.ipynb",
"AdaptiveLoss": "24_vision.gan.ipynb",
"accuracy_thresh_expand": "24_vision.gan.ipynb",
"set_freeze_model": "24_vision.gan.ipynb",
"GANTrainer": "24_vision.gan.ipynb",
"FixedGANSwitcher": "24_vision.gan.ipynb",
"AdaptiveGANSwitcher": "24_vision.gan.ipynb",
"GANDiscriminativeLR": "24_vision.gan.ipynb",
"InvisibleTensor": "24_vision.gan.ipynb",
"generate_noise": "24_vision.gan.ipynb",
"gan_loss_from_func": "24_vision.gan.ipynb",
"GANLearner": "24_vision.gan.ipynb",
"GANLearner.from_learners": "24_vision.gan.ipynb",
"GANLearner.wgan": "24_vision.gan.ipynb",
"spec_add_spaces": "30_text.core.ipynb",
"rm_useless_spaces": "30_text.core.ipynb",
"replace_rep": "30_text.core.ipynb",
"replace_wrep": "30_text.core.ipynb",
"fix_html": "30_text.core.ipynb",
"replace_all_caps": "30_text.core.ipynb",
"replace_maj": "30_text.core.ipynb",
"lowercase": "30_text.core.ipynb",
"replace_space": "30_text.core.ipynb",
"defaults.text_spec_tok": "30_text.core.ipynb",
"defaults.text_proc_rules": "30_text.core.ipynb",
"defaults.text_postproc_rules": "30_text.core.ipynb",
"BaseTokenizer": "30_text.core.ipynb",
"SpacyTokenizer": "30_text.core.ipynb",
"WordTokenizer": "30_text.core.ipynb",
"TokenizeWithRules": "30_text.core.ipynb",
"tokenize1": "30_text.core.ipynb",
"parallel_tokenize": "30_text.core.ipynb",
"fn_counter_pkl": "30_text.core.ipynb",
"fn_lengths_pkl": "30_text.core.ipynb",
"tokenize_folder": "30_text.core.ipynb",
"tokenize_files": "30_text.core.ipynb",
"tokenize_texts": "30_text.core.ipynb",
"tokenize_df": "30_text.core.ipynb",
"tokenize_csv": "30_text.core.ipynb",
"load_tokenized_csv": "30_text.core.ipynb",
"Tokenizer": "30_text.core.ipynb",
"eu_langs": "30_text.core.ipynb",
"SentencePieceTokenizer": "30_text.core.ipynb",
"SubwordTokenizer": "30_text.core.ipynb",
"reverse_text": "31_text.data.ipynb",
"make_vocab": "31_text.data.ipynb",
"TensorText": "31_text.data.ipynb",
"LMTensorText": "31_text.data.ipynb",
"TensorText.__doc__": "31_text.data.ipynb",
"LMTensorText.__doc__": "31_text.data.ipynb",
"Numericalize": "31_text.data.ipynb",
"LMDataLoader": "31_text.data.ipynb",
"Pad_Input": "31_text.data.ipynb",
"pad_input": "31_text.data.ipynb",
"pad_chunk": "31_text.data.ipynb",
"pad_input_chunk": "31_text.data.ipynb",
"Pad_Chunk": "31_text.data.ipynb",
"SortedDL": "31_text.data.ipynb",
"TextBlock": "31_text.data.ipynb",
"TextDataLoaders": "31_text.data.ipynb",
"TextDataLoaders.from_csv": "31_text.data.ipynb",
"dropout_mask": "32_text.models.awdlstm.ipynb",
"RNNDropout": "32_text.models.awdlstm.ipynb",
"WeightDropout": "32_text.models.awdlstm.ipynb",
"EmbeddingDropout": "32_text.models.awdlstm.ipynb",
"AWD_LSTM": "32_text.models.awdlstm.ipynb",
"awd_lstm_lm_split": "32_text.models.awdlstm.ipynb",
"awd_lstm_lm_config": "32_text.models.awdlstm.ipynb",
"awd_lstm_clas_split": "32_text.models.awdlstm.ipynb",
"awd_lstm_clas_config": "32_text.models.awdlstm.ipynb",
"AWD_QRNN": "32_text.models.awdlstm.ipynb",
"awd_qrnn_lm_config": "32_text.models.awdlstm.ipynb",
"awd_qrnn_clas_config": "32_text.models.awdlstm.ipynb",
"LinearDecoder": "33_text.models.core.ipynb",
"SequentialRNN": "33_text.models.core.ipynb",
"get_language_model": "33_text.models.core.ipynb",
"SentenceEncoder": "33_text.models.core.ipynb",
"masked_concat_pool": "33_text.models.core.ipynb",
"PoolingLinearClassifier": "33_text.models.core.ipynb",
"get_text_classifier": "33_text.models.core.ipynb",
"ModelResetter": "34_callback.rnn.ipynb",
"RNNCallback": "34_callback.rnn.ipynb",
"RNNRegularizer": "34_callback.rnn.ipynb",
"rnn_cbs": "34_callback.rnn.ipynb",
"load_cpp": "36_text.models.qrnn.ipynb",
"forget_mult_cuda": "36_text.models.qrnn.ipynb",
"bwd_forget_mult_cuda": "36_text.models.qrnn.ipynb",
"dispatch_cuda": "36_text.models.qrnn.ipynb",
"forget_mult_CPU": "36_text.models.qrnn.ipynb",
"ForgetMultGPU": "36_text.models.qrnn.ipynb",
"QRNNLayer": "36_text.models.qrnn.ipynb",
"QRNN": "36_text.models.qrnn.ipynb",
"match_embeds": "37_text.learner.ipynb",
"load_ignore_keys": "37_text.learner.ipynb",
"clean_raw_keys": "37_text.learner.ipynb",
"load_model_text": "37_text.learner.ipynb",
"TextLearner": "37_text.learner.ipynb",
"decode_spec_tokens": "37_text.learner.ipynb",
"LMLearner": "37_text.learner.ipynb",
"language_model_learner": "37_text.learner.ipynb",
"text_classifier_learner": "37_text.learner.ipynb",
"make_date": "40_tabular.core.ipynb",
"add_datepart": "40_tabular.core.ipynb",
"add_elapsed_times": "40_tabular.core.ipynb",
"cont_cat_split": "40_tabular.core.ipynb",
"df_shrink_dtypes": "40_tabular.core.ipynb",
"df_shrink": "40_tabular.core.ipynb",
"Tabular": "40_tabular.core.ipynb",
"TabularPandas": "40_tabular.core.ipynb",
"TabularProc": "40_tabular.core.ipynb",
"Categorify": "40_tabular.core.ipynb",
"setups": "40_tabular.core.ipynb",
"FillStrategy": "40_tabular.core.ipynb",
"FillMissing": "40_tabular.core.ipynb",
"ReadTabBatch": "40_tabular.core.ipynb",
"TabDataLoader": "40_tabular.core.ipynb",
"TabularDataLoaders": "41_tabular.data.ipynb",
"TabularDataLoaders.from_csv": "41_tabular.data.ipynb",
"emb_sz_rule": "42_tabular.model.ipynb",
"get_emb_sz": "42_tabular.model.ipynb",
"TabularModel": "42_tabular.model.ipynb",
"tabular_config": "42_tabular.model.ipynb",
"TabularLearner": "43_tabular.learner.ipynb",
"tabular_learner": "43_tabular.learner.ipynb",
"TabularCollab": "45_collab.ipynb",
"CollabDataLoaders": "45_collab.ipynb",
"CollabDataLoaders.from_csv": "45_collab.ipynb",
"EmbeddingDotBias": "45_collab.ipynb",
"EmbeddingNN": "45_collab.ipynb",
"collab_learner": "45_collab.ipynb",
"get_dicom_files": "60_medical.imaging.ipynb",
"Path.dcmread": "60_medical.imaging.ipynb",
"TensorDicom": "60_medical.imaging.ipynb",
"PILDicom": "60_medical.imaging.ipynb",
"Path.png16read": "60_medical.imaging.ipynb",
"pixels": "60_medical.imaging.ipynb",
"scaled_px": "60_medical.imaging.ipynb",
"array_freqhist_bins": "60_medical.imaging.ipynb",
"Tensor.freqhist_bins": "60_medical.imaging.ipynb",
"Tensor.hist_scaled_pt": "60_medical.imaging.ipynb",
"Tensor.hist_scaled": "60_medical.imaging.ipynb",
"DcmDataset.hist_scaled": "60_medical.imaging.ipynb",
"Tensor.windowed": "60_medical.imaging.ipynb",
"DcmDataset.windowed": "60_medical.imaging.ipynb",
"dicom_windows": "60_medical.imaging.ipynb",
"TensorCTScan": "60_medical.imaging.ipynb",
"PILCTScan": "60_medical.imaging.ipynb",
"DcmDataset.show": "60_medical.imaging.ipynb",
"DcmDataset.pct_in_window": "60_medical.imaging.ipynb",
"uniform_blur2d": "60_medical.imaging.ipynb",
"gauss_blur2d": "60_medical.imaging.ipynb",
"Tensor.mask_from_blur": "60_medical.imaging.ipynb",
"DcmDataset.mask_from_blur": "60_medical.imaging.ipynb",
"mask2bbox": "60_medical.imaging.ipynb",
"crop_resize": "60_medical.imaging.ipynb",
"Tensor.to_nchan": "60_medical.imaging.ipynb",
"DcmDataset.to_nchan": "60_medical.imaging.ipynb",
"Tensor.to_3chan": "60_medical.imaging.ipynb",
"DcmDataset.to_3chan": "60_medical.imaging.ipynb",
"Tensor.save_jpg": "60_medical.imaging.ipynb",
"DcmDataset.save_jpg": "60_medical.imaging.ipynb",
"Tensor.to_uint16": "60_medical.imaging.ipynb",
"DcmDataset.to_uint16": "60_medical.imaging.ipynb",
"Tensor.save_tif16": "60_medical.imaging.ipynb",
"DcmDataset.save_tif16": "60_medical.imaging.ipynb",
"DcmDataset.set_pixels": "60_medical.imaging.ipynb",
"DcmDataset.pixel_array": "60_medical.imaging.ipynb",
"DcmDataset.zoom": "60_medical.imaging.ipynb",
"DcmDataset.zoom_to": "60_medical.imaging.ipynb",
"DcmDataset.as_dict": "60_medical.imaging.ipynb",
"pd.DataFrame.from_dicoms": "60_medical.imaging.ipynb",
"DicomSegmentationDataLoaders": "60_medical.imaging.ipynb",
"WandbCallback": "70_callback.wandb.ipynb",
"Learner.gather_args": "70_callback.wandb.ipynb",
"log_dataset": "70_callback.wandb.ipynb",
"log_model": "70_callback.wandb.ipynb",
"TensorBoardBaseCallback": "71_callback.tensorboard.ipynb",
"TensorBoardCallback": "71_callback.tensorboard.ipynb",
"TensorBoardProjectorCallback": "71_callback.tensorboard.ipynb",
"projector_word_embeddings": "71_callback.tensorboard.ipynb",
"NeptuneCallback": "72_callback.neptune.ipynb",
"json_clean": "73_callback.captum.ipynb",
"jsonutil.json_clean": "73_callback.captum.ipynb",
"CaptumInterpretation": "73_callback.captum.ipynb",
"CaptumInterpretation.insights": "73_callback.captum.ipynb",
"synth_dbunch": "97_test_utils.ipynb",
"RegModel": "97_test_utils.ipynb",
"synth_learner": "97_test_utils.ipynb",
"VerboseCallback": "97_test_utils.ipynb",
"get_env": "97_test_utils.ipynb",
"try_import": "97_test_utils.ipynb",
"nvidia_smi": "97_test_utils.ipynb",
"nvidia_mem": "97_test_utils.ipynb",
"show_install": "97_test_utils.ipynb",
"PYTORCH_URL": "99_pytorch_doc.ipynb",
"pytorch_doc_link": "99_pytorch_doc.ipynb"}
modules = ["torch_core.py",
"layers.py",
"losses.py",
"data/load.py",
"data/core.py",
"data/external.py",
"data/transforms.py",
"data/block.py",
"vision/core.py",
"vision/data.py",
"vision/augment.py",
"vision/utils.py",
"vision/widgets.py",
"vision/models/xresnet.py",
"optimizer.py",
"callback/core.py",
"learner.py",
"metrics.py",
"callback/schedule.py",
"callback/data.py",
"callback/hook.py",
"vision/models/unet.py",
"callback/progress.py",
"callback/tracker.py",
"callback/fp16.py",
"callback/training.py",
"callback/preds.py",
"callback/mixup.py",
"interpret.py",
"distributed.py",
"vision/learner.py",
"vision/gan.py",
"text/core.py",
"text/data.py",
"text/models/awdlstm.py",
"text/models/core.py",
"callback/rnn.py",
"text/models/qrnn.py",
"text/learner.py",
"tabular/core.py",
"tabular/data.py",
"tabular/model.py",
"tabular/learner.py",
"collab.py",
"medical/imaging.py",
"medical/text.py",
"callback/wandb.py",
"callback/tensorboard.py",
"callback/neptune.py",
"callback/captum.py",
"test_utils.py",
"_pytorch_doc.py"]
doc_url = "https://docs.fast.ai/"
git_url = "https://github.com/fastai/fastai/tree/master/"
def custom_doc_links(name):
from nbdev.showdoc import try_external_doc_link
return try_external_doc_link(name, ['fastcore', 'nbdev'])
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/_nbdev.py
|
_nbdev.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/20_interpret.ipynb (unless otherwise specified).
__all__ = ['plot_top_losses', 'Interpretation', 'ClassificationInterpretation']
# Cell
from .data.all import *
from .optimizer import *
from .learner import *
import sklearn.metrics as skm
# Cell
@typedispatch
def plot_top_losses(x, y, *args, **kwargs):
raise Exception(f"plot_top_losses is not implemented for {type(x)},{type(y)}")
# Cell
#nbdev_comment _all_ = ["plot_top_losses"]
# Cell
class Interpretation():
"Interpretation base class, can be inherited for task specific Interpretation classes"
def __init__(self, dl, inputs, preds, targs, decoded, losses):
store_attr("dl,inputs,preds,targs,decoded,losses")
@classmethod
def from_learner(cls, learn, ds_idx=1, dl=None, act=None):
"Construct interpretation object from a learner"
if dl is None: dl = learn.dls[ds_idx]
return cls(dl, *learn.get_preds(dl=dl, with_input=True, with_loss=True, with_decoded=True, act=None))
def top_losses(self, k=None, largest=True):
"`k` largest(/smallest) losses and indexes, defaulting to all losses (sorted by `largest`)."
return self.losses.topk(ifnone(k, len(self.losses)), largest=largest)
def plot_top_losses(self, k, largest=True, **kwargs):
losses,idx = self.top_losses(k, largest)
if not isinstance(self.inputs, tuple): self.inputs = (self.inputs,)
if isinstance(self.inputs[0], Tensor): inps = tuple(o[idx] for o in self.inputs)
else: inps = self.dl.create_batch(self.dl.before_batch([tuple(o[i] for o in self.inputs) for i in idx]))
b = inps + tuple(o[idx] for o in (self.targs if is_listy(self.targs) else (self.targs,)))
x,y,its = self.dl._pre_show_batch(b, max_n=k)
b_out = inps + tuple(o[idx] for o in (self.decoded if is_listy(self.decoded) else (self.decoded,)))
x1,y1,outs = self.dl._pre_show_batch(b_out, max_n=k)
if its is not None:
plot_top_losses(x, y, its, outs.itemgot(slice(len(inps), None)), self.preds[idx], losses, **kwargs)
#TODO: figure out if this is needed
#its None means that a batch knows how to show itself as a whole, so we pass x, x1
#else: show_results(x, x1, its, ctxs=ctxs, max_n=max_n, **kwargs)
# Cell
class ClassificationInterpretation(Interpretation):
"Interpretation methods for classification models."
def __init__(self, dl, inputs, preds, targs, decoded, losses):
super().__init__(dl, inputs, preds, targs, decoded, losses)
self.vocab = self.dl.vocab
if is_listy(self.vocab): self.vocab = self.vocab[-1]
def confusion_matrix(self):
"Confusion matrix as an `np.ndarray`."
x = torch.arange(0, len(self.vocab))
d,t = flatten_check(self.decoded, self.targs)
cm = ((d==x[:,None]) & (t==x[:,None,None])).long().sum(2)
return to_np(cm)
def plot_confusion_matrix(self, normalize=False, title='Confusion matrix', cmap="Blues", norm_dec=2,
plot_txt=True, **kwargs):
"Plot the confusion matrix, with `title` and using `cmap`."
# This function is mainly copied from the sklearn docs
cm = self.confusion_matrix()
if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
fig = plt.figure(**kwargs)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
tick_marks = np.arange(len(self.vocab))
plt.xticks(tick_marks, self.vocab, rotation=90)
plt.yticks(tick_marks, self.vocab, rotation=0)
if plot_txt:
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
coeff = f'{cm[i, j]:.{norm_dec}f}' if normalize else f'{cm[i, j]}'
plt.text(j, i, coeff, horizontalalignment="center", verticalalignment="center", color="white" if cm[i, j] > thresh else "black")
ax = fig.gca()
ax.set_ylim(len(self.vocab)-.5,-.5)
plt.tight_layout()
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.grid(False)
def most_confused(self, min_val=1):
"Sorted descending list of largest non-diagonal entries of confusion matrix, presented as actual, predicted, number of occurrences."
cm = self.confusion_matrix()
np.fill_diagonal(cm, 0)
res = [(self.vocab[i],self.vocab[j],cm[i,j])
for i,j in zip(*np.where(cm>=min_val))]
return sorted(res, key=itemgetter(2), reverse=True)
def print_classification_report(self):
"Print scikit-learn classification report"
d,t = flatten_check(self.decoded, self.targs)
print(skm.classification_report(t, d, labels=list(self.vocab.o2i.values()), target_names=[str(v) for v in self.vocab]))
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/interpret.py
|
interpret.py
|
from .data.all import *
from .optimizer import *
from .callback.core import *
from .learner import *
from .metrics import *
from .interpret import *
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/basics.py
|
basics.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/20a_distributed.ipynb (unless otherwise specified).
__all__ = ['ParallelTrainer', 'setup_distrib', 'teardown_distrib', 'DistributedDL', 'DistributedTrainer', 'rank0_first']
# Cell
from .basics import *
from .callback.progress import ProgressCallback
from torch.nn.parallel import DistributedDataParallel, DataParallel
from .data.load import _FakeLoader,_loaders
# Cell
@patch
def reset(self: DataParallel):
"Patch required `reset` call into `DataParallel`"
if hasattr(self.module, 'reset'): self.module.reset()
# Cell
class ParallelTrainer(Callback):
"Wrap a model `DataParallel` automatically"
run_after,run_before = TrainEvalCallback,Recorder
def __init__(self, device_ids): self.device_ids = device_ids
def before_fit(self): self.learn.model = DataParallel(self.learn.model, device_ids=self.device_ids)
def after_fit(self): self.learn.model = self.learn.model.module
# Cell
@patch
def to_parallel(self: Learner, device_ids=None):
"Add `ParallelTrainer` callback to a `Learner`"
self.add_cb(ParallelTrainer(device_ids))
return self
# Cell
@patch
def detach_parallel(self: Learner):
"Remove `ParallelTrainer` callback from a Learner"
self.remove_cb(ParallelTrainer)
return self
# Cell
@patch
@contextmanager
def parallel_ctx(self: Learner, device_ids=None):
"A context manager to adapt a learner to train in data parallel mode."
try:
self.to_parallel(device_ids)
yield self
finally: self.detach_parallel()
# Cell
@patch
def reset(self: DistributedDataParallel):
"Patch required `reset` call into `DistributedDataParallel`"
if hasattr(self.module, 'reset'): self.module.reset()
# Cell
def setup_distrib(gpu=None):
"Setup this process to participate in distributed training"
if gpu is None: return gpu
gpu = int(gpu)
torch.cuda.set_device(int(gpu))
if num_distrib() > 0: torch.distributed.init_process_group(backend='nccl', init_method='env://')
return gpu
# Cell
def teardown_distrib():
"Free distributed training resources"
if torch.distributed.is_initialized(): torch.distributed.destroy_process_group()
# Cell
def _round_to_multiple(number,multiple): return int(math.ceil(number/multiple)*multiple)
# Cell
class DistributedDL(TfmdDL):
"A `TfmdDL` which splits a batch into equal size pieces for each worker"
def __init__(self,dl,rank=None,world_size=None):
if rank is None: rank=rank_distrib()
if world_size is None: world_size=num_distrib()
store_attr()
self.bs,self.device,self.drop_last,self.dataset,fake = attrgetter('bs','device','drop_last','dataset','fake_l')(dl)
self.fake_l = _FakeLoader(self, fake.pin_memory, fake.num_workers, fake.timeout, persistent_workers=fake.persistent_workers)
def _broadcast(self,t,rank):
"Broadcasts t from rank `rank` to all other ranks. Returns t so t is same for all ranks after call."
t = LongTensor(t).cuda() # nccl only works with cuda tensors
torch.distributed.broadcast(t,rank)
return t.cpu().tolist()
def _to_detach(self,b,cpu=True,gather=True): return to_detach(b,cpu,gather) # member func so we can override for test
def __len__(self): return _round_to_multiple(len(self.dl),self.world_size)//self.world_size
def get_idxs(self):
idxs = list(self.dl.get_idxs()) # compute get_idxs in all ranks (we'll only use rank 0 but size must be consistent)
idxs = self._broadcast(idxs,0) # broadcast and receive it from rank 0 to all
self.n = len(idxs) # we assumed n was dl.n but we really care about number of idxs
# add extra samples to make it evenly divisible
self.n_padded = _round_to_multiple(self.n,self.world_size)
idxs += (idxs * (self.n_padded//self.n))[:self.n_padded-self.n] # idx needs to be repeated when n_padded>>n
# slice padded idxs so that each rank gets self.n_padded//self.world_size tensors
return idxs[self.rank*self.n_padded//self.world_size:(self.rank+1)*self.n_padded//self.world_size]
def before_iter(self):
self.i = 0
self.dl.before_iter()
def randomize(self): self.dl.randomize()
def after_batch(self,b):
self.i += find_bs(b)
return self.dl.after_batch(b)
def after_iter(self): self.dl.after_iter()
def create_batches(self,samps): return self.dl.create_batches(samps)
def to_detach(self,b, cpu=True, gather=True):
b = self._to_detach(b, cpu, gather)
def _inner(b):
if b.ndim>0:
# for each rank, compute overflow of read idxs vs self.n and accumulate them to unpad totals after gathering
n = sum([min(0,max(-len(b)//self.world_size,
self.n-(self.i+r*self.n_padded//self.world_size))) for r in range(self.world_size)])
b = b[:n or None]
return b
return apply(_inner,b) if gather and all(hasattr(self,o) for o in ('i','n','n_padded')) else b
# Cell
class DistributedTrainer(Callback):
"Wrap `model` in `DistributedDataParallel` and `dls` in `DistributedDL`"
fup = None
def __init__(self, cuda_id=0,sync_bn=True): store_attr()
def before_fit(self):
opt_kwargs = { 'find_unused_parameters' : DistributedTrainer.fup } if DistributedTrainer.fup is not None else {}
self.learn.model = DistributedDataParallel(
nn.SyncBatchNorm.convert_sync_batchnorm(self.model) if self.sync_bn else self.model,
device_ids=[self.cuda_id], output_device=self.cuda_id, **opt_kwargs)
self.old_dls = list(self.dls)
self.learn.dls.loaders = [self._wrap_dl(dl) for dl in self.dls]
if rank_distrib(): self.learn.logger=noop
def _wrap_dl(self, dl): return dl if isinstance(dl,DistributedDL) else DistributedDL(dl)
def before_train(self): self.learn.dl = self._wrap_dl(self.learn.dl)
def before_validate(self): self.learn.dl = self._wrap_dl(self.learn.dl)
def after_fit(self): self.learn.model,self.learn.dls.loaders = self.learn.model.module,self.old_dls
# Cell
@patch
def to_distributed(self: Learner, cuda_id, sync_bn=True):
"Add `DistributedTrainer` to a learner"
self.add_cb(DistributedTrainer(cuda_id,sync_bn))
if rank_distrib(): self.remove_cb(ProgressCallback)
return self
# Cell
@patch
def detach_distributed(self: Learner):
"Remove `DistributedTrainer` from a learner"
if num_distrib() <=1: return self
self.remove_cb(DistributedTrainer)
if rank_distrib() and not hasattr(self, 'progress'): self.add_cb(ProgressCallback())
return self
# Cell
@patch
@contextmanager
def distrib_ctx(self: Learner, cuda_id=None,sync_bn=True):
"A context manager to adapt a learner to train in distributed data parallel mode."
# Figure out the GPU to use from rank. Create a dpg if none exists yet.
if cuda_id is None: cuda_id = rank_distrib()
if not torch.distributed.is_initialized():
setup_distrib(cuda_id)
cleanup_dpg = torch.distributed.is_initialized()
else: cleanup_dpg = False
# Adapt self to DistributedDataParallel, yield, and cleanup afterwards.
try:
if num_distrib(): self.to_distributed(cuda_id,sync_bn)
yield self
finally:
self.detach_distributed()
if cleanup_dpg: teardown_distrib()
# Cell
def rank0_first(func, *args, **kwargs):
"Execute `func` in the Rank-0 process first, then in other ranks in parallel."
if args or kwargs: func = partial(func, *args, **kwargs)
dummy_l = Learner(DataLoaders(device='cpu'), nn.Linear(1,1), loss_func=lambda: 0)
with dummy_l.distrib_ctx():
if not rank_distrib(): res = func()
distrib_barrier()
if rank_distrib(): res = func()
return res
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/distributed.py
|
distributed.py
|
from torch import multiprocessing
import platform,os
if platform.system()=='Darwin':
# Python 3.8 changed to 'spawn' but that doesn't work with PyTorch DataLoader w n_workers>0
multiprocessing.set_start_method('fork', force=True)
# workaround "OMP: Error #15: Initializing libiomp5.dylib, but found libomp.dylib already initialized"
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
from .imports import *
from .torch_imports import *
from .torch_core import *
from .layers import *
from .losses import *
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/torch_basics.py
|
torch_basics.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/99_pytorch_doc.ipynb (unless otherwise specified).
__all__ = ['PYTORCH_URL', 'pytorch_doc_link']
# Cell
PYTORCH_URL = 'https://pytorch.org/docs/stable/'
# Cell
def _mod2page(mod):
if mod == Tensor: return 'tensors.html'
name = mod.__name__
name = name.replace('torch.', '').replace('utils.', '')
if name.startswith('nn.modules'): return 'nn.html'
return f'{name}.html'
# Cell
import importlib
# Cell
def pytorch_doc_link(name):
if name.startswith('F'): name = 'torch.nn.functional' + name[1:]
if not name.startswith('torch.'): name = 'torch.' + name
if name == 'torch.Tensor': return f'{PYTORCH_URL}tensors.html'
try:
mod = importlib.import_module(name)
return f'{PYTORCH_URL}{_mod2page(mod)}'
except: pass
splits = name.split('.')
mod_name,fname = '.'.join(splits[:-1]),splits[-1]
if mod_name == 'torch.Tensor': return f'{PYTORCH_URL}tensors.html#{name}'
try:
mod = importlib.import_module(mod_name)
page = _mod2page(mod)
return f'{PYTORCH_URL}{page}#{name}'
except: return None
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/_pytorch_doc.py
|
_pytorch_doc.py
|
import numpy as np
import io,operator,sys,os,re,mimetypes,csv,itertools,json,shutil,glob,pickle,tarfile,collections
import hashlib,itertools,types,inspect,functools,random,time,math,bz2,typing,numbers,string
import multiprocessing,threading,urllib,tempfile,concurrent.futures,matplotlib,warnings,zipfile
from concurrent.futures import as_completed
from functools import partial,reduce
from itertools import starmap,dropwhile,takewhile,zip_longest
from copy import copy,deepcopy
from multiprocessing import Lock,Process,Queue,queues
from datetime import datetime
from contextlib import redirect_stdout,contextmanager
from collections.abc import Iterable,Iterator,Generator,Sequence
from typing import Union,Optional
from types import SimpleNamespace
from pathlib import Path
from collections import OrderedDict,defaultdict,Counter,namedtuple
from enum import Enum,IntEnum
from textwrap import TextWrapper
from operator import itemgetter,attrgetter,methodcaller
from urllib.request import urlopen
# External modules
import requests,yaml,matplotlib.pyplot as plt,pandas as pd,scipy
from pandas.api.types import is_categorical_dtype,is_numeric_dtype
from numpy import array,ndarray
from scipy import ndimage
from pdb import set_trace
from fastcore.all import *
from fastprogress.fastprogress import progress_bar,master_bar
try:
from types import WrapperDescriptorType,MethodWrapperType,MethodDescriptorType
except ImportError:
WrapperDescriptorType = type(object.__init__)
MethodWrapperType = type(object().__str__)
MethodDescriptorType = type(str.join)
from types import BuiltinFunctionType,BuiltinMethodType,MethodType,FunctionType,LambdaType
pd.options.display.max_colwidth = 600
NoneType = type(None)
string_classes = (str,bytes)
mimetypes.init()
# PyTorch warnings
warnings.filterwarnings("ignore", message='.*nonzero.*', category=UserWarning)
warnings.filterwarnings("ignore", message='.*grid_sample.*', category=UserWarning)
warnings.filterwarnings("ignore", message='.*Distutils.*', category=UserWarning)
def is_iter(o):
"Test whether `o` can be used in a `for` loop"
#Rank 0 tensors in PyTorch are not really iterable
return isinstance(o, (Iterable,Generator)) and getattr(o,'ndim',1)
def is_coll(o):
"Test whether `o` is a collection (i.e. has a usable `len`)"
#Rank 0 tensors in PyTorch do not have working `len`
return hasattr(o, '__len__') and getattr(o,'ndim',1)
def all_equal(a,b):
"Compares whether `a` and `b` are the same length and have the same contents"
if not is_iter(b): return False
return all(equals(a_,b_) for a_,b_ in itertools.zip_longest(a,b))
def noop (x=None, *args, **kwargs):
"Do nothing"
return x
def noops(self, x=None, *args, **kwargs):
"Do nothing (method)"
return x
def one_is_instance(a, b, t): return isinstance(a,t) or isinstance(b,t)
def equals(a,b):
"Compares `a` and `b` for equality; supports sublists, tensors and arrays too"
if one_is_instance(a,b,type): return a==b
if hasattr(a, '__array_eq__'): return a.__array_eq__(b)
if hasattr(b, '__array_eq__'): return b.__array_eq__(a)
cmp = (np.array_equal if one_is_instance(a, b, ndarray ) else
operator.eq if one_is_instance(a, b, (str,dict,set)) else
all_equal if is_iter(a) or is_iter(b) else
operator.eq)
return cmp(a,b)
def pv(text, verbose):
if verbose: print(text)
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/imports.py
|
imports.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/00_torch_core.ipynb (unless otherwise specified).
__all__ = ['progress_bar', 'master_bar', 'subplots', 'show_image', 'show_titled_image', 'show_images', 'ArrayBase',
'ArrayImageBase', 'ArrayImage', 'ArrayImageBW', 'ArrayMask', 'tensor', 'set_seed', 'get_random_states',
'set_random_states', 'no_random', 'unsqueeze', 'unsqueeze_', 'apply', 'maybe_gather', 'to_detach', 'to_half',
'to_float', 'default_device', 'to_device', 'to_cpu', 'to_np', 'to_concat', 'TensorBase', 'TensorImageBase',
'TensorImage', 'TensorImageBW', 'TensorMask', 'TensorFlowField', 'TensorCategory', 'TensorMultiCategory',
'TitledTensorScalar', 'concat', 'Chunks', 'show_title', 'ShowTitle', 'TitledInt', 'TitledFloat', 'TitledStr',
'TitledTuple', 'get_empty_df', 'display_df', 'get_first', 'one_param', 'item_find', 'find_device', 'find_bs',
'np_func', 'Module', 'get_model', 'one_hot', 'one_hot_decode', 'params', 'trainable_params', 'norm_types',
'norm_bias_params', 'batch_to_samples', 'logit', 'num_distrib', 'rank_distrib', 'distrib_barrier',
'base_doc', 'doc', 'nested_reorder', 'make_cross_image', 'show_image_batch', 'requires_grad', 'init_default',
'cond_init', 'apply_leaf', 'apply_init', 'script_use_ctx', 'script_save_ctx', 'script_fwd', 'script_bwd',
'grad_module', 'flatten_check']
# Cell
from .imports import *
from .torch_imports import *
# Cell
#nbdev_comment _all_ = ['progress_bar','master_bar']
# Cell
if torch.cuda.is_available():
if torch.cuda.current_device()==0:
def_gpu = int(os.environ.get('DEFAULT_GPU') or 0)
if torch.cuda.device_count()>=def_gpu: torch.cuda.set_device(def_gpu)
torch.backends.cudnn.benchmark = True
# Cell
@delegates(plt.subplots, keep=True)
def subplots(nrows=1, ncols=1, figsize=None, imsize=3,suptitle=None, **kwargs):
if figsize is None:
h=nrows*imsize if suptitle is None or imsize>2 else nrows*imsize+0.6 #https://github.com/matplotlib/matplotlib/issues/5355
figsize=(ncols*imsize, h)
fig,ax = plt.subplots(nrows, ncols, figsize=figsize, **kwargs)
if suptitle is not None: fig.suptitle(suptitle)
if nrows*ncols==1: ax = array([ax])
return fig,ax
# Cell
def _fig_bounds(x):
r = x//32
return min(5, max(1,r))
# Cell
@delegates(plt.Axes.imshow, keep=True, but=['shape', 'imlim'])
def show_image(im, ax=None, figsize=None, title=None, ctx=None, **kwargs):
"Show a PIL or PyTorch image on `ax`."
# Handle pytorch axis order
if hasattrs(im, ('data','cpu','permute')):
im = im.data.cpu()
if im.shape[0]<5: im=im.permute(1,2,0)
elif not isinstance(im,np.ndarray): im=array(im)
# Handle 1-channel images
if im.shape[-1]==1: im=im[...,0]
ax = ifnone(ax,ctx)
if figsize is None: figsize = (_fig_bounds(im.shape[0]), _fig_bounds(im.shape[1]))
if ax is None: _,ax = plt.subplots(figsize=figsize)
ax.imshow(im, **kwargs)
if title is not None: ax.set_title(title)
ax.axis('off')
return ax
# Cell
@delegates(show_image, keep=True)
def show_titled_image(o, **kwargs):
"Call `show_image` destructuring `o` to `(img,title)`"
show_image(o[0], title=str(o[1]), **kwargs)
# Cell
@delegates(subplots)
def show_images(ims, nrows=1, ncols=None, titles=None, **kwargs):
"Show all images `ims` as subplots with `rows` using `titles`."
if ncols is None: ncols = int(math.ceil(len(ims)/nrows))
if titles is None: titles = [None]*len(ims)
axs = subplots(nrows, ncols, **kwargs)[1].flat
for im,t,ax in zip(ims, titles, axs): show_image(im, ax=ax, title=t)
# Cell
class ArrayBase(ndarray):
"An `ndarray` that can modify casting behavior"
@classmethod
def _before_cast(cls, x): return x if isinstance(x,ndarray) else array(x)
# Cell
class ArrayImageBase(ArrayBase):
"Base class for arrays representing images"
_show_args = {'cmap':'viridis'}
def show(self, ctx=None, **kwargs):
return show_image(self, ctx=ctx, **{**self._show_args, **kwargs})
# Cell
class ArrayImage(ArrayImageBase):
"An array representing an image"
pass
# Cell
class ArrayImageBW(ArrayImage):
"An array representing an image"
_show_args = {'cmap':'Greys'}
# Cell
class ArrayMask(ArrayImageBase):
"An array representing an image mask"
_show_args = {'alpha':0.5, 'cmap':'tab20', 'interpolation':'nearest'}
# Cell
@patch
def __array_eq__(self:Tensor,b):
return torch.equal(self,b) if self.dim() else self==b
# Cell
def _array2tensor(x):
if x.dtype==np.uint16: x = x.astype(np.float32)
return torch.from_numpy(x)
# Cell
@use_kwargs_dict(dtype=None, device=None, requires_grad=False, pin_memory=False)
def tensor(x, *rest, **kwargs):
"Like `torch.as_tensor`, but handle lists too, and can pass multiple vector elements directly."
if len(rest): x = (x,)+rest
# There was a Pytorch bug in dataloader using num_workers>0. Haven't confirmed if fixed
# if isinstance(x, (tuple,list)) and len(x)==0: return tensor(0)
res = (x if isinstance(x, Tensor)
else torch.tensor(x, **kwargs) if isinstance(x, (tuple,list))
else _array2tensor(x) if isinstance(x, ndarray)
else as_tensor(x.values, **kwargs) if isinstance(x, (pd.Series, pd.DataFrame))
else as_tensor(x, **kwargs) if hasattr(x, '__array__') or is_iter(x)
else _array2tensor(array(x), **kwargs))
if res.dtype is torch.float64: return res.float()
return res
# Cell
def set_seed(s, reproducible=False):
"Set random seed for `random`, `torch`, and `numpy` (where available)"
try: torch.manual_seed(s)
except NameError: pass
try: torch.cuda.manual_seed_all(s)
except NameError: pass
try: np.random.seed(s%(2**32-1))
except NameError: pass
random.seed(s)
if reproducible:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Cell
def get_random_states():
"Gets states for `random`, `torch`, and `numpy` random number generators"
return {'random_state':random.getstate(),
'numpy_state':np.random.get_state(),
'torch_state':torch.get_rng_state(),
'torch_cuda_state':torch.cuda.get_rng_state_all(),
'torch_deterministic':torch.backends.cudnn.deterministic,
'torch_benchmark':torch.backends.cudnn.benchmark}
# Cell
def set_random_states(random_state,numpy_state,torch_state,torch_cuda_state,torch_deterministic,torch_benchmark):
"Set states for `random`, `torch`, and `numpy` random number generators"
random.setstate(random_state)
np.random.set_state(numpy_state)
torch.set_rng_state(torch_state)
torch.cuda.set_rng_state_all(torch_cuda_state)
torch.backends.cudnn.deterministic=torch_deterministic
torch.backends.cudnn.benchmark=torch_benchmark
# Cell
@contextmanager
def no_random(seed=42,reproducible=True):
"Stores and retrieves state of random number generators. Sets random seed for `random`, `torch`, and `numpy`."
states = get_random_states()
set_seed(seed,reproducible=reproducible)
try:
yield #we are managing global variables
finally:
set_random_states(**states)
# Cell
def unsqueeze(x, dim=-1, n=1):
"Same as `torch.unsqueeze` but can add `n` dims"
for _ in range(n): x = x.unsqueeze(dim)
return x
# Cell
def unsqueeze_(x, dim=-1, n=1):
"Same as `torch.unsqueeze_` but can add `n` dims"
for _ in range(n): x.unsqueeze_(dim)
return x
# Cell
def _fa_rebuild_tensor (cls, *args, **kwargs): return cls(torch._utils._rebuild_tensor_v2(*args, **kwargs))
def _fa_rebuild_qtensor(cls, *args, **kwargs): return cls(torch._utils._rebuild_qtensor (*args, **kwargs))
# Cell
def apply(func, x, *args, **kwargs):
"Apply `func` recursively to `x`, passing on args"
if is_listy(x): return type(x)([apply(func, o, *args, **kwargs) for o in x])
if isinstance(x,dict): return {k: apply(func, v, *args, **kwargs) for k,v in x.items()}
res = func(x, *args, **kwargs)
return res if x is None else retain_type(res, x)
# Cell
def maybe_gather(x, axis=0):
"Gather copies of `x` on `axis` (if training is distributed)"
if num_distrib()<=1: return x
ndim = x.ndim
res = [x.new_zeros(*x.shape if ndim > 0 else (1,)) for _ in range(num_distrib())]
torch.distributed.all_gather(res, x.contiguous() if ndim > 0 else x[None])
return torch.cat(res, dim=axis) if ndim > 0 else torch.cat(res, dim=axis).mean()
# Cell
def to_detach(b, cpu=True, gather=True):
"Recursively detach lists of tensors in `b `; put them on the CPU if `cpu=True`."
def _inner(x, cpu=True, gather=True):
if not isinstance(x,Tensor): return x
x = x.detach()
if gather: x = maybe_gather(x)
return x.cpu() if cpu else x
return apply(_inner, b, cpu=cpu, gather=gather)
# Cell
def to_half(b):
"Recursively map lists of tensors in `b ` to FP16."
return apply(lambda x: x.half() if torch.is_floating_point(x) else x, b)
# Cell
def to_float(b):
"Recursively map lists of int tensors in `b ` to float."
return apply(lambda x: x.float() if torch.is_floating_point(x) else x, b)
# Cell
# None: True if available; True: error if not available; False: use CPU
defaults.use_cuda = None
# Cell
def default_device(use_cuda=-1):
"Return or set default device; `use_cuda`: None - CUDA if available; True - error if not available; False - CPU"
if use_cuda != -1: defaults.use_cuda=use_cuda
use = defaults.use_cuda or (torch.cuda.is_available() and defaults.use_cuda is None)
assert torch.cuda.is_available() or not use
return torch.device(torch.cuda.current_device()) if use else torch.device('cpu')
# Cell
def to_device(b, device=None):
"Recursively put `b` on `device`."
if defaults.use_cuda==False: device='cpu'
elif device is None: device=default_device()
def _inner(o): return o.to(device, non_blocking=True) if isinstance(o,Tensor) else o.to_device(device) if hasattr(o, "to_device") else o
return apply(_inner, b)
# Cell
def to_cpu(b):
"Recursively map lists of tensors in `b ` to the cpu."
return to_device(b,'cpu')
# Cell
def to_np(x):
"Convert a tensor to a numpy array."
return apply(lambda o: o.data.cpu().numpy(), x)
# Cell
def to_concat(xs, dim=0):
"Concat the element in `xs` (recursively if they are tuples/lists of tensors)"
if not xs: return xs
if is_listy(xs[0]): return type(xs[0])([to_concat([x[i] for x in xs], dim=dim) for i in range_of(xs[0])])
if isinstance(xs[0],dict): return {k: to_concat([x[k] for x in xs], dim=dim) for k in xs[0].keys()}
#We may receive xs that are not concatenable (inputs of a text classifier for instance),
# in this case we return a big list
try: return retain_type(torch.cat(xs, dim=dim), xs[0])
except: return sum([L(retain_type(o_.index_select(dim, tensor(i)).squeeze(dim), xs[0])
for i in range_of(o_)) for o_ in xs], L())
# Cell
@patch
def set_meta(self:Tensor, x, as_copy=False):
"Set all metadata in `__dict__`"
if not hasattr(x,'__dict__'): return
# XXX: change to `deepcopy` once PyTorch 1.7.1 is out, and check nb 23 segmentation fit works
self.__dict__ = copy(x.__dict__) if as_copy else x.__dict__
# Cell
if not hasattr(torch,'as_subclass'): torch.as_subclass = torch.Tensor.as_subclass
# Cell
@patch
def as_subclass(self:Tensor, typ):
"Cast to `typ` and include `__dict__` and meta"
return retain_meta(self, torch.as_subclass(self, typ))
# Cell
def _torch_handled(args, opt, func):
if func not in opt: return False
for oks in opt[func]:
if all(isinstance(arg,ok) for arg,ok in zip(args,oks) if ok): return True
# Cell
class TensorBase(Tensor):
"A `Tensor` which support subclass pickling, and maintains metadata when casting or after methods"
debug,_opt = False,defaultdict(list)
def __new__(cls, x, **kwargs):
res = cast(tensor(x), cls)
for k,v in kwargs.items(): setattr(res, k, v)
return res
@classmethod
def _before_cast(cls, x): return tensor(x)
def __repr__(self): return re.sub('tensor', self.__class__.__name__, super().__repr__())
def __reduce_ex__(self,proto):
torch.utils.hooks.warn_if_has_hooks(self)
args = (type(self), self.storage(), self.storage_offset(), tuple(self.size()), self.stride())
if self.is_quantized: args = args + (self.q_scale(), self.q_zero_point())
f = _fa_rebuild_qtensor if self.is_quantized else _fa_rebuild_tensor
return (f, args + (self.requires_grad, OrderedDict()))
@classmethod
def register_func(cls, func, *oks): cls._opt[func].append(oks)
def __torch_function__(self, func, types, args=(), kwargs=None):
if self.debug and func.__name__ not in ('__str__','__repr__'): print(func, types, args, kwargs)
convert=False
if _torch_handled(args, self._opt, func): convert,types = type(self),(torch.Tensor,)
res = super().__torch_function__(func, types, args=args, kwargs=kwargs)
if convert: res = convert(res)
if isinstance(res, TensorBase): res.set_meta(self, as_copy=True)
return res
def new_tensor(self, size, dtype=None, device=None, requires_grad=False):
cls = type(self)
return self.as_subclass(Tensor).new_tensor(size, dtype=dtype, device=device, requires_grad=requires_grad).as_subclass(cls)
def new_ones(self, data, dtype=None, device=None, requires_grad=False):
cls = type(self)
return self.as_subclass(Tensor).new_ones(data, dtype=dtype, device=device, requires_grad=requires_grad).as_subclass(cls)
def new(self, x=None):
cls = type(self)
res = self.as_subclass(Tensor).new() if x is None else self.as_subclass(Tensor).new(x)
return res.as_subclass(cls)
# Cell
class TensorImageBase(TensorBase):
_show_args = ArrayImageBase._show_args
def show(self, ctx=None, **kwargs):
return show_image(self, ctx=ctx, **{**self._show_args, **kwargs})
# Cell
class TensorImage(TensorImageBase): pass
# Cell
class TensorImageBW(TensorImage): _show_args = ArrayImageBW._show_args
# Cell
class TensorMask(TensorImageBase):
_show_args = ArrayMask._show_args
def show(self, ctx=None, **kwargs):
codes = getattr(self, 'codes', None)
if codes is not None: kwargs = merge({'vmin': 1, 'vmax': len(codes)}, kwargs)
return super().show(ctx=ctx, **kwargs)
# Cell
for o in Tensor.add,Tensor.sub,Tensor.mul,Tensor.div,Tensor.__rsub__,Tensor.__radd__,Tensor.matmul,Tensor.bmm:
TensorBase.register_func(o, TensorMask, TensorImageBase)
TensorBase.register_func(o, TensorImageBase, TensorMask)
TensorMask.register_func(torch.einsum, str, TensorImageBase, TensorMask)
TensorMask.register_func(torch.einsum, str, TensorMask, TensorImageBase)
# Cell
class TensorFlowField(TensorBase): pass
TensorImage.register_func(F.grid_sample, TensorImageBase, TensorFlowField)
# Cell
class TensorCategory(TensorBase): pass
# Cell
class TensorMultiCategory(TensorCategory): pass
# Cell
class TitledTensorScalar(TensorBase):
"A tensor containing a scalar that has a `show` method"
def show(self, **kwargs): show_title(self.item(), **kwargs)
# Cell
@patch
def tensored(self:L):
"`mapped(tensor)`"
return self.map(tensor)
@patch
def stack(self:L, dim=0):
"Same as `torch.stack`"
return torch.stack(list(self.tensored()), dim=dim)
@patch
def cat (self:L, dim=0):
"Same as `torch.cat`"
return torch.cat (list(self.tensored()), dim=dim)
# Cell
def concat(*ls):
"Concatenate tensors, arrays, lists, or tuples"
if not len(ls): return []
it = ls[0]
if isinstance(it,torch.Tensor): res = torch.cat(ls)
elif isinstance(it,ndarray): res = np.concatenate(ls)
else:
res = itertools.chain.from_iterable(map(L,ls))
if isinstance(it,(tuple,list)): res = type(it)(res)
else: res = L(res)
return retain_type(res, it)
# Cell
class Chunks:
"Slice and int indexing into a list of lists"
def __init__(self, chunks, lens=None):
self.chunks = chunks
self.lens = L(map(len,self.chunks) if lens is None else lens)
self.cumlens = np.cumsum(0+self.lens)
self.totlen = self.cumlens[-1]
def __getitem__(self,i):
if isinstance(i,slice): return retain_type(self.getslice(i), old=self.chunks[0])
di,idx = self.doc_idx(i)
return retain_type(self.chunks[di][idx], old=self.chunks[0])
def getslice(self, i):
st_d,st_i = self.doc_idx(ifnone(i.start,0))
en_d,en_i = self.doc_idx(ifnone(i.stop,self.totlen+1))
res = [self.chunks[st_d][st_i:(en_i if st_d==en_d else sys.maxsize)]]
for b in range(st_d+1,en_d): res.append(self.chunks[b])
if st_d!=en_d and en_d<len(self.chunks): res.append(self.chunks[en_d][:en_i])
return concat(*res)
def doc_idx(self, i):
if i<0: i=self.totlen+i # count from end
docidx = np.searchsorted(self.cumlens, i+1)-1
cl = self.cumlens[docidx]
return docidx,i-cl
# Cell
def show_title(o, ax=None, ctx=None, label=None, color='black', **kwargs):
"Set title of `ax` to `o`, or print `o` if `ax` is `None`"
ax = ifnone(ax,ctx)
if ax is None: print(o)
elif hasattr(ax, 'set_title'):
t = ax.title.get_text()
if len(t) > 0: o = t+'\n'+str(o)
ax.set_title(o, color=color)
elif isinstance(ax, pd.Series):
while label in ax: label += '_'
ax = ax.append(pd.Series({label: o}))
return ax
# Cell
class ShowTitle:
"Base class that adds a simple `show`"
_show_args = {'label': 'text'}
def show(self, ctx=None, **kwargs):
"Show self"
return show_title(str(self), ctx=ctx, **merge(self._show_args, kwargs))
class TitledInt(Int, ShowTitle):
_show_args = {'label': 'text'}
def show(self, ctx=None, **kwargs):
"Show self"
return show_title(str(self), ctx=ctx, **merge(self._show_args, kwargs))
class TitledFloat(Float, ShowTitle):
_show_args = {'label': 'text'}
def show(self, ctx=None, **kwargs):
"Show self"
return show_title(str(self), ctx=ctx, **merge(self._show_args, kwargs))
class TitledStr(Str, ShowTitle):
_show_args = {'label': 'text'}
def show(self, ctx=None, **kwargs):
"Show self"
return show_title(str(self), ctx=ctx, **merge(self._show_args, kwargs))
class TitledTuple(fastuple, ShowTitle):
_show_args = {'label': 'text'}
def show(self, ctx=None, **kwargs):
"Show self"
return show_title(str(self), ctx=ctx, **merge(self._show_args, kwargs))
add_docs(TitledInt, "An `int` with `show`"); add_docs(TitledStr, "An `str` with `show`");
add_docs(TitledFloat, "A `float` with `show`"); add_docs(TitledTuple, "A `fastuple` with `show`")
# Cell
@patch
def truncate(self:TitledStr, n):
"Truncate self to `n`"
words = self.split(' ')[:n]
return TitledStr(' '.join(words))
# Cell
if not hasattr(pd.DataFrame,'_old_init'): pd.DataFrame._old_init = pd.DataFrame.__init__
# Cell
@patch
def __init__(self:pd.DataFrame, data=None, index=None, columns=None, dtype=None, copy=False):
if data is not None and isinstance(data, Tensor): data = to_np(data)
self._old_init(data, index=index, columns=columns, dtype=dtype, copy=copy)
# Cell
def get_empty_df(n):
"Return `n` empty rows of a dataframe"
df = pd.DataFrame(index = range(n))
return [df.iloc[i] for i in range(n)]
# Cell
def display_df(df):
"Display `df` in a notebook or defaults to print"
try: from IPython.display import display, HTML
except: return print(df)
display(HTML(df.to_html()))
# Cell
def get_first(c):
"Get the first element of c, even if c is a dataframe"
return getattr(c, 'iloc', c)[0]
# Cell
def one_param(m):
"First parameter in `m`"
return first(m.parameters())
# Cell
def item_find(x, idx=0):
"Recursively takes the `idx`-th element of `x`"
if is_listy(x): return item_find(x[idx])
if isinstance(x,dict):
key = list(x.keys())[idx] if isinstance(idx, int) else idx
return item_find(x[key])
return x
# Cell
def find_device(b):
"Recursively search the device of `b`."
return item_find(b).device
# Cell
def find_bs(b):
"Recursively search the batch size of `b`."
return item_find(b).shape[0]
# Cell
def np_func(f):
"Convert a function taking and returning numpy arrays to one taking and returning tensors"
def _inner(*args, **kwargs):
nargs = [to_np(arg) if isinstance(arg,Tensor) else arg for arg in args]
return tensor(f(*nargs, **kwargs))
functools.update_wrapper(_inner, f)
return _inner
# Cell
class Module(nn.Module, metaclass=PrePostInitMeta):
"Same as `nn.Module`, but no need for subclasses to call `super().__init__`"
def __pre_init__(self, *args, **kwargs): super().__init__()
def __init__(self): pass
# Cell
from torch.nn.parallel import DistributedDataParallel
# Cell
def get_model(model):
"Return the model maybe wrapped inside `model`."
return model.module if isinstance(model, (DistributedDataParallel, nn.DataParallel)) else model
# Cell
def one_hot(x, c):
"One-hot encode `x` with `c` classes."
res = torch.zeros(c, dtype=torch.uint8)
if isinstance(x, Tensor) and x.numel()>0: res[x] = 1.
else: res[list(L(x, use_list=None))] = 1.
return res
# Cell
def one_hot_decode(x, vocab=None):
return L(vocab[i] if vocab else i for i,x_ in enumerate(x) if x_==1)
# Cell
def params(m):
"Return all parameters of `m`"
return [p for p in m.parameters()]
# Cell
def trainable_params(m):
"Return all trainable parameters of `m`"
return [p for p in m.parameters() if p.requires_grad]
# Cell
norm_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.InstanceNorm1d, nn.InstanceNorm2d, nn.InstanceNorm3d, nn.LayerNorm)
# Cell
def norm_bias_params(m, with_bias=True):
"Return all bias and BatchNorm parameters"
if isinstance(m, norm_types): return L(m.parameters())
res = L(m.children()).map(norm_bias_params, with_bias=with_bias).concat()
if with_bias and getattr(m, 'bias', None) is not None: res.append(m.bias)
return res
# Cell
def batch_to_samples(b, max_n=10):
"'Transposes' a batch to (at most `max_n`) samples"
if isinstance(b, Tensor): return retain_types(list(b[:max_n]), [b])
else:
res = L(b).map(partial(batch_to_samples,max_n=max_n))
return retain_types(res.zip(), [b])
# Cell
@patch
def interp_1d(x:Tensor, xp, fp):
"Same as `np.interp`"
slopes = (fp[1:]-fp[:-1])/(xp[1:]-xp[:-1])
incx = fp[:-1] - (slopes*xp[:-1])
locs = (x[:,None]>=xp[None,:]).long().sum(1)-1
locs = locs.clamp(0,len(slopes)-1)
return slopes[locs]*x + incx[locs]
# Cell
@patch
def pca(x:Tensor, k=2):
"Compute PCA of `x` with `k` dimensions."
x = x-torch.mean(x,0)
U,S,V = torch.svd(x.t())
return torch.mm(x,U[:,:k])
# Cell
def logit(x):
"Logit of `x`, clamped to avoid inf."
x = x.clamp(1e-7, 1-1e-7)
return -(1/x-1).log()
# Cell
def num_distrib():
"Return the number of processes in distributed training (if applicable)."
return int(os.environ.get('WORLD_SIZE', 0))
# Cell
def rank_distrib():
"Return the distributed rank of this process (if applicable)."
return int(os.environ.get('RANK', 0))
# Cell
def distrib_barrier():
"Place a synchronization barrier in distributed training"
if num_distrib() > 1 and torch.distributed.is_initialized(): torch.distributed.barrier()
# Cell
# Saving arrays requires pytables - optional dependency
try: import tables
except: pass
# Cell
def _comp_filter(lib='lz4',lvl=3): return tables.Filters(complib=f'blosc:{lib}', complevel=lvl)
# Cell
@patch
def save_array(p:Path, o, complib='lz4', lvl=3):
"Save numpy array to a compressed `pytables` file, using compression level `lvl`"
if isinstance(o,Tensor): o = to_np(o)
with tables.open_file(p, mode='w', filters=_comp_filter(lib=complib,lvl=lvl)) as f: f.create_carray('/', 'data', obj=o)
# Cell
@patch
def load_array(p:Path):
"Save numpy array to a `pytables` file"
with tables.open_file(p, 'r') as f: return f.root.data.read()
# Cell
def base_doc(elt):
"Print a base documentation of `elt`"
name = getattr(elt, '__qualname__', getattr(elt, '__name__', ''))
print(f'{name}{inspect.signature(elt)}\n{inspect.getdoc(elt)}\n')
print('To get a prettier result with hyperlinks to source code and documentation, install nbdev: pip install nbdev')
# Cell
def doc(elt):
"Try to use doc form nbdev and fall back to `base_doc`"
try:
from nbdev.showdoc import doc
doc(elt)
except: base_doc(elt)
# Cell
def nested_reorder(t, idxs):
"Reorder all tensors in `t` using `idxs`"
if isinstance(t, (Tensor,L)): return t[idxs]
elif is_listy(t): return type(t)(nested_reorder(t_, idxs) for t_ in t)
if t is None: return t
raise TypeError(f"Expected tensor, tuple, list or L but got {type(t)}")
# Cell
def make_cross_image(bw=True):
"Create a tensor containing a cross image, either `bw` (True) or color"
if bw:
im = torch.zeros(5,5)
im[2,:] = 1.
im[:,2] = 1.
else:
im = torch.zeros(3,5,5)
im[0,2,:] = 1.
im[1,:,2] = 1.
return im
# Cell
def show_image_batch(b, show=show_titled_image, items=9, cols=3, figsize=None, **kwargs):
"Display batch `b` in a grid of size `items` with `cols` width"
if items<cols: cols=items
rows = (items+cols-1) // cols
if figsize is None: figsize = (cols*3, rows*3)
fig,axs = plt.subplots(rows, cols, figsize=figsize)
for *o,ax in zip(*to_cpu(b), axs.flatten()): show(o, ax=ax, **kwargs)
# Cell
def requires_grad(m):
"Check if the first parameter of `m` requires grad or not"
ps = list(m.parameters())
return ps[0].requires_grad if len(ps)>0 else False
# Cell
def init_default(m, func=nn.init.kaiming_normal_):
"Initialize `m` weights with `func` and set `bias` to 0."
if func:
if hasattr(m, 'weight'): func(m.weight)
if hasattr(m, 'bias') and hasattr(m.bias, 'data'): m.bias.data.fill_(0.)
return m
# Cell
def cond_init(m, func):
"Apply `init_default` to `m` unless it's a batchnorm module"
if (not isinstance(m, norm_types)) and requires_grad(m): init_default(m, func)
# Cell
def apply_leaf(m, f):
"Apply `f` to children of `m`."
c = m.children()
if isinstance(m, nn.Module): f(m)
for l in c: apply_leaf(l,f)
# Cell
def apply_init(m, func=nn.init.kaiming_normal_):
"Initialize all non-batchnorm layers of `m` with `func`."
apply_leaf(m, partial(cond_init, func=func))
# Cell
def script_use_ctx(f):
"Decorator: create jit script and pass everything in `ctx.saved_variables to `f`, after `*args`"
sf = torch.jit.script(f)
def _f(ctx, *args, **kwargs): return sf(*args, *ctx.saved_variables, **kwargs)
return update_wrapper(_f,f)
# Cell
def script_save_ctx(static, *argidx):
"Decorator: create jit script and save args with indices `argidx` using `ctx.save_for_backward`"
def _dec(f):
sf = torch.jit.script(f)
def _f(ctx, *args, **kwargs):
if argidx:
save = [args[o] for o in argidx]
ctx.save_for_backward(*save)
if not argidx: args = [ctx]+args
return sf(*args, **kwargs)
if static: _f = staticmethod(_f)
return update_wrapper(_f,f)
return _dec
# Cell
def script_fwd(*argidx):
"Decorator: create static jit script and save args with indices `argidx` using `ctx.save_for_backward`"
return script_save_ctx(True, *argidx)
# Cell
def script_bwd(f):
"Decorator: create static jit script and pass everything in `ctx.saved_variables to `f`, after `*args`"
return staticmethod(script_use_ctx(f))
# Cell
def grad_module(cls):
"Decorator: convert `cls` into an autograd function"
class _c(nn.Module):
def forward(self, *args, **kwargs): return cls.apply(*args, **kwargs)
return _c
# Comes from 13b_metrics.ipynb, cell
def flatten_check(inp, targ):
"Check that `out` and `targ` have the same number of elements and flatten them."
inp,targ = TensorBase(inp.contiguous()).view(-1),TensorBase(targ.contiguous()).view(-1)
test_eq(len(inp), len(targ))
return inp,targ
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/torch_core.py
|
torch_core.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/97_test_utils.ipynb (unless otherwise specified).
__all__ = ['synth_dbunch', 'RegModel', 'synth_learner', 'VerboseCallback', 'get_env', 'try_import', 'nvidia_smi',
'nvidia_mem', 'show_install']
# Cell
from .imports import *
from .data.all import *
from .optimizer import *
from .learner import *
from .callback.core import *
from torch.utils.data import TensorDataset
# Cell
from torch.utils.data import TensorDataset
# Cell
def synth_dbunch(a=2, b=3, bs=16, n_train=10, n_valid=2, cuda=False):
def get_data(n):
x = torch.randn(bs*n, 1)
return TensorDataset(x, a*x + b + 0.1*torch.randn(bs*n, 1))
train_ds = get_data(n_train)
valid_ds = get_data(n_valid)
device = default_device() if cuda else None
train_dl = TfmdDL(train_ds, bs=bs, shuffle=True, num_workers=0)
valid_dl = TfmdDL(valid_ds, bs=bs, num_workers=0)
return DataLoaders(train_dl, valid_dl, device=device)
# Cell
class RegModel(Module):
def __init__(self): self.a,self.b = nn.Parameter(torch.randn(1)),nn.Parameter(torch.randn(1))
def forward(self, x): return x*self.a + self.b
# Cell
@delegates(Learner.__init__)
def synth_learner(n_trn=10, n_val=2, cuda=False, lr=1e-3, data=None, model=None, **kwargs):
if data is None: data=synth_dbunch(n_train=n_trn,n_valid=n_val, cuda=cuda)
if model is None: model=RegModel()
return Learner(data, model, lr=lr, loss_func=MSELossFlat(),
opt_func=partial(SGD, mom=0.9), **kwargs)
# Cell
class VerboseCallback(Callback):
"Callback that prints the name of each event called"
def __call__(self, event_name):
print(event_name)
super().__call__(event_name)
# Cell
def get_env(name):
"Return env var value if it's defined and not an empty string, or return Unknown"
res = os.environ.get(name,'')
return res if len(res) else "Unknown"
# Cell
def try_import(module):
"Try to import `module`. Returns module's object on success, None on failure"
try: return importlib.import_module(module)
except: return None
# Cell
def nvidia_smi(cmd = "nvidia-smi"):
try: res = run(cmd)
except OSError as e: return None
return res
# Cell
def nvidia_mem():
try: mem = run("nvidia-smi --query-gpu=memory.total --format=csv,nounits,noheader")
except: return None
return mem.strip().split('\n')
# Cell
def show_install(show_nvidia_smi:bool=False):
"Print user's setup information"
import fastai, platform, fastprogress
rep = []
opt_mods = []
rep.append(["=== Software ===", None])
rep.append(["python", platform.python_version()])
rep.append(["fastai", fastai.__version__])
rep.append(["fastprogress", fastprogress.__version__])
rep.append(["torch", torch.__version__])
# nvidia-smi
smi = nvidia_smi()
if smi:
match = re.findall(r'Driver Version: +(\d+\.\d+)', smi)
if match: rep.append(["nvidia driver", match[0]])
available = "available" if torch.cuda.is_available() else "**Not available** "
rep.append(["torch cuda", f"{torch.version.cuda} / is {available}"])
# no point reporting on cudnn if cuda is not available, as it
# seems to be enabled at times even on cpu-only setups
if torch.cuda.is_available():
enabled = "enabled" if torch.backends.cudnn.enabled else "**Not enabled** "
rep.append(["torch cudnn", f"{torch.backends.cudnn.version()} / is {enabled}"])
rep.append(["\n=== Hardware ===", None])
gpu_total_mem = []
nvidia_gpu_cnt = 0
if smi:
mem = nvidia_mem()
nvidia_gpu_cnt = len(ifnone(mem, []))
if nvidia_gpu_cnt: rep.append(["nvidia gpus", nvidia_gpu_cnt])
torch_gpu_cnt = torch.cuda.device_count()
if torch_gpu_cnt:
rep.append(["torch devices", torch_gpu_cnt])
# information for each gpu
for i in range(torch_gpu_cnt):
rep.append([f" - gpu{i}", (f"{gpu_total_mem[i]}MB | " if gpu_total_mem else "") + torch.cuda.get_device_name(i)])
else:
if nvidia_gpu_cnt:
rep.append([f"Have {nvidia_gpu_cnt} GPU(s), but torch can't use them (check nvidia driver)", None])
else:
rep.append([f"No GPUs available", None])
rep.append(["\n=== Environment ===", None])
rep.append(["platform", platform.platform()])
if platform.system() == 'Linux':
distro = try_import('distro')
if distro:
# full distro info
rep.append(["distro", ' '.join(distro.linux_distribution())])
else:
opt_mods.append('distro');
# partial distro info
rep.append(["distro", platform.uname().version])
rep.append(["conda env", get_env('CONDA_DEFAULT_ENV')])
rep.append(["python", sys.executable])
rep.append(["sys.path", "\n".join(sys.path)])
print("\n\n```text")
keylen = max([len(e[0]) for e in rep if e[1] is not None])
for e in rep:
print(f"{e[0]:{keylen}}", (f": {e[1]}" if e[1] is not None else ""))
if smi:
if show_nvidia_smi: print(f"\n{smi}")
else:
if torch_gpu_cnt: print("no nvidia-smi is found")
else: print("no supported gpus found on this system")
print("```\n")
print("Please make sure to include opening/closing ``` when you paste into forums/github to make the reports appear formatted as code sections.\n")
if opt_mods:
print("Optional package(s) to enhance the diagnostics can be installed with:")
print(f"pip install {' '.join(opt_mods)}")
print("Once installed, re-run this utility to get the additional information")
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/test_utils.py
|
test_utils.py
|
__version__ = "2.2.0"
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/__init__.py
|
__init__.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/45_collab.ipynb (unless otherwise specified).
__all__ = ['TabularCollab', 'CollabDataLoaders', 'EmbeddingDotBias', 'EmbeddingNN', 'collab_learner']
# Cell
from .tabular.all import *
# Cell
class TabularCollab(TabularPandas):
"Instance of `TabularPandas` suitable for collaborative filtering (with no continuous variable)"
with_cont=False
# Cell
class CollabDataLoaders(DataLoaders):
"Base `DataLoaders` for collaborative filtering."
@delegates(DataLoaders.from_dblock)
@classmethod
def from_df(cls, ratings, valid_pct=0.2, user_name=None, item_name=None, rating_name=None, seed=None, path='.', **kwargs):
"Create a `DataLoaders` suitable for collaborative filtering from `ratings`."
user_name = ifnone(user_name, ratings.columns[0])
item_name = ifnone(item_name, ratings.columns[1])
rating_name = ifnone(rating_name, ratings.columns[2])
cat_names = [user_name,item_name]
splits = RandomSplitter(valid_pct=valid_pct, seed=seed)(range_of(ratings))
to = TabularCollab(ratings, [Categorify], cat_names, y_names=[rating_name], y_block=TransformBlock(), splits=splits)
return to.dataloaders(path=path, **kwargs)
@classmethod
def from_csv(cls, csv, **kwargs):
"Create a `DataLoaders` suitable for collaborative filtering from `csv`."
return cls.from_df(pd.read_csv(csv), **kwargs)
CollabDataLoaders.from_csv = delegates(to=CollabDataLoaders.from_df)(CollabDataLoaders.from_csv)
# Cell
class EmbeddingDotBias(Module):
"Base dot model for collaborative filtering."
def __init__(self, n_factors, n_users, n_items, y_range=None):
self.y_range = y_range
(self.u_weight, self.i_weight, self.u_bias, self.i_bias) = [Embedding(*o) for o in [
(n_users, n_factors), (n_items, n_factors), (n_users,1), (n_items,1)
]]
def forward(self, x):
users,items = x[:,0],x[:,1]
dot = self.u_weight(users)* self.i_weight(items)
res = dot.sum(1) + self.u_bias(users).squeeze() + self.i_bias(items).squeeze()
if self.y_range is None: return res
return torch.sigmoid(res) * (self.y_range[1]-self.y_range[0]) + self.y_range[0]
@classmethod
def from_classes(cls, n_factors, classes, user=None, item=None, y_range=None):
"Build a model with `n_factors` by inferring `n_users` and `n_items` from `classes`"
if user is None: user = list(classes.keys())[0]
if item is None: item = list(classes.keys())[1]
res = cls(n_factors, len(classes[user]), len(classes[item]), y_range=y_range)
res.classes,res.user,res.item = classes,user,item
return res
def _get_idx(self, arr, is_item=True):
"Fetch item or user (based on `is_item`) for all in `arr`"
assert hasattr(self, 'classes'), "Build your model with `EmbeddingDotBias.from_classes` to use this functionality."
classes = self.classes[self.item] if is_item else self.classes[self.user]
c2i = {v:k for k,v in enumerate(classes)}
try: return tensor([c2i[o] for o in arr])
except Exception as e:
print(f"""You're trying to access {'an item' if is_item else 'a user'} that isn't in the training data.
If it was in your original data, it may have been split such that it's only in the validation set now.""")
def bias(self, arr, is_item=True):
"Bias for item or user (based on `is_item`) for all in `arr`"
idx = self._get_idx(arr, is_item)
layer = (self.i_bias if is_item else self.u_bias).eval().cpu()
return to_detach(layer(idx).squeeze(),gather=False)
def weight(self, arr, is_item=True):
"Weight for item or user (based on `is_item`) for all in `arr`"
idx = self._get_idx(arr, is_item)
layer = (self.i_weight if is_item else self.u_weight).eval().cpu()
return to_detach(layer(idx),gather=False)
# Cell
class EmbeddingNN(TabularModel):
"Subclass `TabularModel` to create a NN suitable for collaborative filtering."
@delegates(TabularModel.__init__)
def __init__(self, emb_szs, layers, **kwargs):
super().__init__(emb_szs=emb_szs, n_cont=0, out_sz=1, layers=layers, **kwargs)
# Cell
@delegates(Learner.__init__)
def collab_learner(dls, n_factors=50, use_nn=False, emb_szs=None, layers=None, config=None, y_range=None, loss_func=None, **kwargs):
"Create a Learner for collaborative filtering on `dls`."
emb_szs = get_emb_sz(dls, ifnone(emb_szs, {}))
if loss_func is None: loss_func = MSELossFlat()
if config is None: config = tabular_config()
if y_range is not None: config['y_range'] = y_range
if layers is None: layers = [n_factors]
if use_nn: model = EmbeddingNN(emb_szs=emb_szs, layers=layers, **config)
else: model = EmbeddingDotBias.from_classes(n_factors, dls.classes, y_range=y_range)
return Learner(dls, model, loss_func=loss_func, **kwargs)
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/collab.py
|
collab.py
|
#Code directly taken from NVIDIA apex: https://github.com/NVIDIA/apex
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
class tofp16(nn.Module):
"""
Utility module that implements::
def forward(self, input):
return input.half()
"""
def __init__(self):
super(tofp16, self).__init__()
def forward(self, input):
return input.half()
def BN_convert_float(module):
"""
Utility function for network_to_half().
Retained for legacy purposes.
"""
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm) and module.affine is True:
module.float()
for child in module.children():
BN_convert_float(child)
return module
def network_to_half(network):
"""
Convert model to half precision in a batchnorm-safe way.
Retained for legacy purposes. It is recommended to use FP16Model.
"""
return nn.Sequential(tofp16(), BN_convert_float(network.half()))
def convert_module(module, dtype):
"""
Converts a module's immediate parameters and buffers to dtype.
"""
for param in module.parameters(recurse=False):
if param is not None:
if param.data.dtype.is_floating_point:
param.data = param.data.to(dtype=dtype)
if param._grad is not None and param._grad.data.dtype.is_floating_point:
param._grad.data = param._grad.data.to(dtype=dtype)
for buf in module.buffers(recurse=False):
if buf is not None and buf.data.dtype.is_floating_point:
buf.data = buf.data.to(dtype=dtype)
def convert_network(network, dtype):
"""
Converts a network's parameters and buffers to dtype.
"""
for module in network.modules():
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm) and module.affine is True:
continue
convert_module(module, dtype)
if isinstance(module, torch.nn.RNNBase) or isinstance(module, torch.nn.modules.rnn.RNNBase):
module.flatten_parameters()
return network
class FP16Model(nn.Module):
"""
Convert model to half precision in a batchnorm-safe way.
"""
def __init__(self, network):
super(FP16Model, self).__init__()
self.network = convert_network(network, dtype=torch.half)
def forward(self, *inputs):
inputs = tuple(t.half() for t in inputs)
return self.network(*inputs)
def backwards_debug_hook(grad):
raise RuntimeError("master_params recieved a gradient in the backward pass!")
def prep_param_lists(model, flat_master=False):
"""
Creates a list of FP32 master parameters for a given model, as in
`Training Neural Networks with Mixed Precision: Real Examples`_.
Args:
model (torch.nn.Module): Existing Pytorch model
flat_master (bool, optional, default=False): Flatten the master parameters into a single tensor, as a performance optimization.
Returns:
A tuple (``model_params``, ``master_params``). ``model_params`` is a list of the model's parameters for later use with :func:`model_grads_to_master_grads` and :func:`master_params_to_model_params`. ``master_params`` is a list of FP32 master gradients. If ``flat_master=True``, ``master_params`` will be a list with one element.
Example::
model_params, master_params = prep_param_lists(model)
.. warning::
Currently, if ``flat_master=True``, all the model's parameters must be the same type. If the model has parameters of different types, use ``flat_master=False``, or use :class:`FP16_Optimizer`.
.. _`Training Neural Networks with Mixed Precision: Real Examples`:
http://on-demand.gputechconf.com/gtc/2018/video/S81012/
"""
model_params = [param for param in model.parameters() if param.requires_grad]
if flat_master:
# Give the user some more useful error messages
try:
# flatten_dense_tensors returns a contiguous flat array.
# http://pytorch.org/docs/master/_modules/torch/_utils.html
master_params = _flatten_dense_tensors([param.data for param in model_params]).float()
except:
print("Error in prep_param_lists: model may contain a mixture of parameters "
"of different types. Use flat_master=False, or use F16_Optimizer.")
raise
master_params = torch.nn.Parameter(master_params)
master_params.requires_grad = True
# master_params.register_hook(backwards_debug_hook)
if master_params.grad is None:
master_params.grad = master_params.new(*master_params.size())
return model_params, [master_params]
else:
master_params = [param.clone().float().detach() for param in model_params]
for param in master_params:
param.requires_grad = True
return model_params, master_params
def model_grads_to_master_grads(model_params, master_params, flat_master=False):
"""
Copy model gradients to master gradients.
Args:
model_params: List of model parameters created by :func:`prep_param_lists`.
master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`model_grads_to_master_grads`.
"""
if flat_master:
# The flattening may incur one more deep copy than is necessary.
master_params[0].grad.data.copy_(
_flatten_dense_tensors([p.grad.data for p in model_params]))
else:
for model, master in zip(model_params, master_params):
if model.grad is not None:
if master.grad is None:
master.grad = Variable(master.data.new(*master.data.size()))
master.grad.data.copy_(model.grad.data)
else:
master.grad = None
def master_params_to_model_params(model_params, master_params, flat_master=False):
"""
Copy master parameters to model parameters.
Args:
model_params: List of model parameters created by :func:`prep_param_lists`.
master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`master_params_to_model_params`.
"""
if flat_master:
for model, master in zip(model_params,
_unflatten_dense_tensors(master_params[0].data, model_params)):
model.data.copy_(master)
else:
for model, master in zip(model_params, master_params):
model.data.copy_(master.data)
# Backward compatibility fixes
def to_python_float(t):
if hasattr(t, 'item'):
return t.item()
else:
return t[0]
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
if TORCH_MAJOR == 0 and TORCH_MINOR <= 4:
clip_grad_norm = torch.nn.utils.clip_grad_norm
else:
clip_grad_norm = torch.nn.utils.clip_grad_norm_
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/fp16_utils.py
|
fp16_utils.py
|
import pandas as pd
import torch
from torch import as_tensor,Tensor,ByteTensor,LongTensor,FloatTensor,HalfTensor,DoubleTensor
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import SequentialSampler,RandomSampler,Sampler,BatchSampler
from torch.utils.data import IterableDataset,get_worker_info
from torch.utils.data._utils.collate import default_collate,default_convert
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/torch_imports.py
|
torch_imports.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.