language
stringlengths
0
24
filename
stringlengths
9
214
code
stringlengths
99
9.93M
wireshark/test/captures/dhcp.pcapng
M<+яяяяяяяя яя  \ѓк Љ35::яяяяяя ‚ьBE,Ё6ъ‹яяяяDCY= ‚ьBc‚Sc5= ‚ьB27*я\xѓк4‹35VV ‚ьBt­с›EHEЂАЁАЁ CD4"3=АЁ АЁ ‚ьBc‚Sc5яяя:; N36АЁяx\ѓкњ›45::яяяяяя ‚ьBE,Ё7ъЉяяяяDCџЅ= ‚ьBc‚Sc5= ‚ьB2АЁ 6АЁ7*я\xѓкЦњ45VV ‚ьBt­с›EHFЂАЁАЁ CD4ЯЫ=АЁ ‚ьBc‚Sc5:; N36АЁяяяяx
wireshark/test/captures/dtn_udpcl_bpv7_bpsec_bcb_admin.cbordiag
[_ [7, 11094, 1, [2, [26622, 12070]], [2, [5279, 7390]], [2, [4785, 1111]], [81089243, 993], 52532350140, 1646, 2047, h'55E4'], [7, 7, 175, 0, 24(h'1B000000013075CD37')], [10, 5, 89, 0, 24(h'820007')], [12, 25, 162, 0, 63(h'8101 02 01 82028219149f191cde 84 8201426869 820205 8203426869 820407 8181 8201457468657265')], [1, 1, 3, 0, 24(h'8201848482F41B000000018BA3F02382F41A3027AC8782F41B000000018DFAF97381F503820282185D18B9821A533D733D190119')] ]
wireshark/test/captures/dtn_udpcl_bpv7_bpsec_bib_admin.cbordiag
[_ [7, 11094, 1, [2, [26622, 12070]], [2, [5279, 7390]], [2, [4785, 1111]], [81089243, 993], 52532350140, 1646, 2047, h'55E4'], [7, 7, 175, 0, 24(h'1B000000013075CD37')], [10, 5, 89, 0, 24(h'820007')], [11, 25, 162, 0, 63(h'8101 01 01 82028219149f191cde 83 820105 8202426869 820307 8181 8201457468657265')], [1, 1, 3, 0, 24(h'8201848482F41B000000018BA3F02382F41A3027AC8782F41B000000018DFAF97381F503820282185D18B9821A533D733D190119')] ]
wireshark/test/captures/mongo-zstd.pcapng
°M<+ÿÿÿÿÿÿÿÿ1AMD Ryzen 5 1600 Six-Core Processor (with SSE4.2)Linux 5.15.0-53-generic7Dumpcap (Wireshark) 4.1.0 (v4.1.0rc0-891-gade32a12f2bc)°Xenp0s3  port 27017 Linux 5.15.0-53-genericXÄÊ+ãD’¡¡RT5'@ͼE“³Í@@Ãü hƒN £i‰Œi+ò ïÏiPù‰Ã k¡lüÖÜÝI(µ/ý IIDdropcolllsididعïˆ:ŽD¬Áç2Ó¡¤$dbdbÄlVîió7Counters provided by dumpcapVî D®7Vî=ó7*l
wireshark/test/captures/protobuf_test_leading_dot.pcapng
ÌM<+ÿÿÿÿÿÿÿÿ6Intel(R) Core(TM) i5-4300U CPU @ 1.90GHz (with SSE4.2)+64-bit Windows 7 Service Pack 1, build 7601;Dumpcap (Wireshark) 3.1.1-hqx (v3.1.1rc0-470-gfb3e4ffe2bd3)Ì ÿÿ2\Device\NPF_{F034326D-3D1D-4756-B3D8-7B5413C71351}无线网络连接  +64-bit Windows 7 Service Pack 1, build 7601 pŽ”(„EOOàLþèó¸îeï‡EA$@%òÀ¨'œEOüO»-N"I am in a.b.a.b.c*2 I'm in a.b.cplŽ”÷îECounters provided by dumpcapŽ”ŒDŽ”÷îEl
wireshark/test/captures/sipmsg.log
File opened. Mar 6 13:34:22.599 UDP[3:0]10.102.131.194:5060 OPENED Mar 6 13:34:22.616 UDP[6:0]10.102.130.185:5060 OPENED Mar 6 13:34:49.416 On [6:0]10.102.130.185:5060 received from 10.102.130.150:5060 REGISTER sip:csp.noklab.net SIP/2.0 Via: SIP/2.0/UDP 192.168.1.100:5060;branch=z9hG4bK26b7a48d From: sip:[email protected] To: sip:[email protected] Call-ID: [email protected] CSeq: 144 REGISTER User-Agent: CSCO/7 Contact: <sip:[email protected]:5060> Content-Length: 0 Expires: 3600 ---------------------------------------- Mar 6 13:34:49.516 On [6:0]10.102.130.185:5060 sent to 10.102.130.150:5060 SIP/2.0 200 OK Via: SIP/2.0/UDP 192.168.1.100:5060;received=10.102.130.150;branch=z9hG4bK26b7a48d;rport=5060 From: sip:[email protected] To: sip:[email protected] Call-ID: [email protected] CSeq: 144 REGISTER Contact: <sip:[email protected]:5060>;expires=34 ---------------------------------------- Mar 6 13:39:06.100 On 127.0.0.1:5060 received from 127.0.0.1:5070 INVITE sip:[email protected]:5060;acme_realm=cpea8500 SIP/2.0 Via: SIP/2.0/UDP 127.0.0.1:5070;branch=z9hG4bKIWFuqpq6n00c0o1eckfm741;acme_irealm=public;acme_sa=192.168.109.112 Contact: "B5-2C23-052 Blu"<sip:[email protected]:5070> GenericID: 117318834600008@0008250123d0 Supported: 100rel From: "B5-2C23-052 Blu"<sip:[email protected]:5060>;tag=0000047b000ce0e0 To: <sip:[email protected]:5060> Call-ID: [email protected] CSeq: 2 INVITE P-Asserted-Identity: "B5-2C23-052 Blu"<sip:[email protected]:5060> Content-Length: 187 Content-Type: application/sdp v=0 o=IWF 10 10 IN IP4 192.168.109.113 s=H323 Call c=IN IP4 192.168.109.113 t=0 0 m=audio 29156 RTP/AVP 18 0 a=rtpmap:18 G729/8000/1 a=fmtp:18 annexb=yes a=rtpmap:0 PCMU/8000/1 ---------------------------------------- Mar 6 13:39:06.104 On 127.0.0.1:5060 sent to 127.0.0.1:5070 SIP/2.0 100 Trying Via: SIP/2.0/UDP 127.0.0.1:5070;branch=z9hG4bKIWFuqpq6n00c0o1eckfm741 From: "B5-2C23-052 Blu"<sip:[email protected]:5060>;tag=0000047b000ce0e0 To: <sip:[email protected]:5060> Call-ID: [email protected] CSeq: 2 INVITE ---------------------------------------- Mar 6 13:39:06.122 On 127.0.0.1:2945 sent to 127.0.0.1:2944 0000: ac 3e fd 01 00 07 89 d9 00 fc 10 00 00 00 02 00 .>.............. 0010: 00 00 00 00 f1 21 00 00 00 02 00 6d 30 00 6a 8c .....!.....m0.j. 0020: 00 02 20 01 80 00 06 70 75 62 6c 69 63 83 00 05 .. ....public... 0030: 24 57 45 53 54 84 00 08 63 70 65 61 38 35 30 30 $WEST...cpea8500 0040: 86 00 05 24 45 41 53 54 88 00 01 01 8b 00 01 00 ...$EAST........ 0050: 89 00 02 00 02 8a 00 04 00 00 00 00 98 00 04 00 ................ 0060: 00 00 00 99 00 04 00 00 00 00 9a 00 04 00 00 00 ................ 0070: 00 94 00 04 87 19 1f 0a 96 00 01 01 a7 00 01 00 ................ 0080: a8 00 01 00 a9 00 02 00 00 21 00 00 00 02 00 76 .........!.....v 0090: 30 00 73 8c 00 02 10 01 80 00 08 63 70 65 61 38 0.s........cpea8 00a0: 35 30 30 83 00 05 24 45 41 53 54 84 00 06 70 75 500...$EAST...pu 00b0: 62 6c 69 63 86 00 05 24 57 45 53 54 87 00 06 c0 blic...$WEST.... 00c0: a8 6d 71 71 e4 88 00 01 01 8b 00 01 01 89 00 02 .mqq............ 00d0: 00 02 8a 00 04 00 00 00 00 98 00 04 00 00 00 00 ................ 00e0: 99 00 04 00 00 00 00 9a 00 04 00 00 00 00 94 00 ................ 00f0: 04 7f 00 00 01 96 00 01 01 a7 00 01 00 a8 00 01 ................ 0100: 00 a9 00 02 00 00 ...... Transaction = 494041 { Context = $ { Add = $ { Flow { index=1E irealm=public idest=$WEST erealm=cpea8500 esource=$EAST media=audio trans=UDP mode=off num=2 bw=0 peakr=0 avgr=0 mbs=0 subscr=135.25.31.10 } }, Add = $ { Flow { index=1W irealm=cpea8500 idest=$EAST erealm=public esource=$WEST edest=192.168.109.113:29156 media=audio trans=UDP mode=1way num=2 bw=0 peakr=0 avgr=0 mbs=0 subscr=127.0.0.1 } } } } ---------------------------------------- Mar 6 13:39:06.127 On 127.0.0.1:5060 sent to 127.0.0.1:5070 SIP/2.0 181 Call Is Being Forwarded Via: SIP/2.0/UDP 127.0.0.1:5070;branch=z9hG4bKIWFuqpq6n00c0o1eckfm741;acme_iwf_2833_preferred=101 From: "B5-2C23-052 Blu"<sip:[email protected]:5060>;tag=0000047b000ce0e0 To: <sip:[email protected]:5060> Call-ID: [email protected] CSeq: 2 INVITE ---------------------------------------- Jun 8 14:35:50.233 UDP[3:0]10.102.131.194:5060 CLOSED Jun 8 14:35:50.233 UDP[6:0]10.102.130.185:5060 CLOSED File closed.
Text
wireshark/test/captures/text2pcap_hash_eol.txt
2015-10-01 21:16:24.317453 127.0.0.1 -> 127.0.0.1 UDP 96 Source port: 36887 Destination port: 36888 0000 00 00 00 00 00 00 00 00 00 00 00 00 08 00 45 00 ..............E. 0010 00 3e 3b f2 40 00 40 11 00 bb 7f 00 00 01 7f 00 .>;.@.@......... 0020 00 01 90 17 90 18 00 2a 00 00 00 00 01 00 00 01 .......*........ 0030 00 00 00 00 00 00 01 01 01 01 01 01 01 01 20 23 .............. # 0040 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 01 ................ 0050 01 01 01 01 01 01 01 01 01 01 01 01 2f cc 9c e4 ............/... #TEXT2PCAP test_directive
wireshark/test/config/80211_keys.tmpl
# Keys needed for the decryption test suite "wep","1234567890" "wpa-pwd","Induction" "wpa-pwd","test0815" "wpa-pwd","12345678" "wpa-psk","a5001e18e0b3f792278825bc3abff72d7021d7c157b600470ef730e2490835d4" "wpa-psk","79258f6ceeecedd3482b92deaabdb675f09bcb4003ef5074f5ddb10a94ebe00a" "wpa-psk","23a9ee58c7810546ae3e7509fda9f97435778d689e53a54891c56d02f18ca162" "wpa-psk","ecbfe709d6151eaba6a4fd9cba94fbb570c1fc4c15506fad3185b4a0a0cfda9a" "wpa-psk","a4b0b2efa7f77d1006eccf1a814b62125c15fac5c137d9cdff8c75c43194268f" "wpa-psk","fc738f5b63ba93ebf0a45d42c5a0b1b5064649fa98f59bc062c2944de3780fe276088c95daaf672deb6780051aa13563" "msk","fc3fe399f0ab9eeb5b6e87b6e2b276d828e874de1773d4a925f5410d96565b22b1471711baffb8611b28d2a09cc1a6aaffbbfdf3cccf12db57f175c53bfe2b7b"
wireshark/test/config/80211_keys.user_tk_tmpl
# Keys needed for the 80211 user TK test cases in decryption test suite "tk","d0e57d224c1bb8806089d8c23154074c" "tk","6eaf63f4ad7997ced353723de3029f4d" "tk","fb42811bcb59b7845376246454fbdab7" "tk","4e30e8c019bea43ea5262b10853b818d" "tk","70cdbf2e5bc0ca22e53930818a5d80e4" "tk","4e6abbcf9dc0943936700b6825952218f58a47dfdf51dbb8ce9b02fd7d2d9e40" "tk","502085ca205e668f7e7c61cdf4f731336bb31e4f5b28ec91860174192e9b2190" "tk","755a9c1c9e605d5ff62849e4a17a935c" "tk","7ff30f7a8dd67950eaaf2f20a869a62d" "tk","b3dc2ff2d88d0d34c1ddc421cea17f304af3c46acbbe7b6d808b6ebf1b98ec38" "tk","a745ee2313f86515a155c4cb044bc148ae234b9c72707f772b69c2fede3e4016"
wireshark/test/config/esp_sa.tmpl
"IPv4","192.168.0.1","192.168.0.100","0x070883c2","AES-CBC [RFC3602]","0x5de1a4c2c72662c9fda7a7c78cd25623","HMAC-SHA-1-96 [RFC2404]","0x51c9213c18232f8f26c70c4dee6e0e6d56e31e8a" "IPv4","192.168.0.100","192.168.0.1","0xc254fe64","AES-CBC [RFC3602]","0x88e1dad7140af03b8d4f3d734d21be4b","HMAC-SHA-1-96 [RFC2404]","0x3e00d517c1220d4b7d2950fcc02edd4b6023d278"
wireshark/test/config/ikev1_decryption_table.tmpl
# This file is automatically generated, DO NOT MODIFY. fafaeb49382a763c,735be0cb62f82675c4f7bf8fbab9b56834ba76d6ab4fa240 ab3e0f1f8bc3be8e,928392441e00ece5e8af3bc545416e86afa1291e7c013271514738b70ef25d22 29f6006212625def,c26a81baec6070d289f0b4f13c2fb182e3d5406d11c29ebc2fc179502541325b
wireshark/test/config/ikev2_decryption_table.tmpl
# This file is automatically generated, DO NOT MODIFY. 1234567890123456,0987654321098765,,,"NULL [RFC2410]",,,"NONE [RFC4306]" 19ab98963486359f,78f13157ccd3b3d8,2e0e194070fc658c2bfbfdbf8b956be4b2eaa33d02a43cca,219f3080e631774b8d5836d3a675b099b1e271c9bdcf6e15,"3DES [RFC2451]",c96f5bad08aebbff60509c7495f11c183818b916,e742ac415cdfdd709c9de92769a169e0a5224f79,"HMAC_SHA1_160 [RFC4595]" ea684d21597afd36,d9fe2ab22dac23ac,be83fe15f6a9976941870830fe26c014b863b3,79e0f4476861a76e64329e787b1c4ff38d732f,"AES-CCM-128 with 12 octet ICV [RFC5282]",,,"NONE [RFC4306]" a2926ae833c6f138,5464c57d0dc5e272,5daf82e6fd7e57d5fbf76cd5af73fd46035db0bc,68848f4a7602b20c7d033cc998b0c097032ac38a,"AES-CTR-128 [RFC5930]",,,"ANY 96-bits of Authentication [No Checking]" 81f24c0acd8fa55c,192383172724c706,aaa06839eaf0959d486eeecda7a48b23080963b5fd7217928e8fbf58,92dc96ec87076caa84e26b3621c7c469427e2e4bcc1b962362a3dde3,"AES-CTR-192 [RFC5930]",65777be31ee2137f31cf23fa0ee834dfede11cc0adc9a84541026642c09df2bf96056a2036e97a67ce7d3c5b6f37e17e8fe64f4ef23e14f5997fb7671df3adaf,42abd4709f1b94fd8c2270c74aae3fbe61c0b9c109c55f3e3b9ed7e480bc75c3985c15234caa623c8ca0606c303921d7cfd44861df1e798370b2ee95fe712e52,"HMAC_SHA2_512_256 [RFC4868]" 191ccd371a7a1f7b,bc123d15e4af593f,9096ddd2933620e8f48122c53a3f562cb0222c1cf97ce41fcc874ea2582a89ac,6718c6b2bbef2f234eac4c13832f885d87b574afd2af0111161e99b5dc61b4d4,"AES-CBC-256 [RFC3602]",12d532c3e83c757906af548dfe1ccf223ca5507af77898454e2d55c8ace57a17,30c4ead18c93024b58a86c1e3db60f550221801026853170b4cb0248d3a95329,"HMAC_SHA2_256_128 [RFC4868]" cd7ae76304b277e2,74f6080ed799d463,daa0a85a81e6adda7b8c568f1c4cfaa6e9f9edb242e9895f012caaa642eacf4d004903,e02281ba4bb8ed20321faff956b95ce7f841b3039984dad4ed4625e77743fce4a04f32,"AES-CCM-256 with 16 octet ICV [RFC5282]",,,"NONE [RFC4306]" 5d48bfeeb7d574da,bbb73016c0503640,91b817d036d97db3ace64475cd8d1cbeab186295020211a9cf0c16cec10b92b453ecd24e,d04516586721974d970627d85f7d031433b6558c0ec6faecf9217e5445e17e7eeee6bc68,"AES-GCM-256 with 8 octet ICV [RFC5282]",,,"NONE [RFC4306]" 0158b8fb90b7623d,13514610cea16160,647075bf167447a1c8683e8dbe4794b4cfe73799cc6bec34905441159ce13705c8dfb3a9,15c9eae6f94631d63068bf44bb69999abc07b3d15e915fd8f0ed99ad481efd75deb02a5e,"AES-GCM-256 with 16 octet ICV [RFC5282]",,,"NONE [RFC4306]"
wireshark/test/config/ssl_keys.tmpl
"127.0.0.1","443","http","TEST_KEYS_DIRrsasnakeoil2.key","" "127.0.0.1","9131","http","TEST_KEYS_DIRkey.p12","WebAS"
wireshark/test/keys/dhe1_keylog.dat
# the client's random number and the master secret for test/captures/dhe1.pcapng.gz CLIENT_RANDOM 531f88d114fcf9ce9729b5458f73e1807324459029ee4bea43f8ee4ce06c77c0 3CC9E5068E674393C10E540430F60AB794C028B277CAD9C708758400B803AD4FC81D6796AFD14D8952F7CD9E4268B4DB
wireshark/test/keys/http2-data-reassembly.keys
CLIENT_RANDOM 59b4b71f50e71bff50f88388679c0156714d158bf10edd29f1d45fb4fffb3010 750fc27332a9cc17802defc48bd5693c9278d68680ae64d9dffa1e638ebd17a7902ad69501c413571b7c63dc23a0918b
wireshark/test/keys/rsa-p-lt-q.key
-----BEGIN RSA PRIVATE KEY----- MIICXAIBAAKBgQDhP+fIyQK5qfcXejONrbSZPrW8xuVOO6R34/cBVTKDz1D+f/TH 5fTxgZuxnZxsVv//cEK6YRI66q8MmrLejgTucm9Q6LSwJfBXnDenicUkzDz0tdPA Ng+dpF3WaIRYJ/Dow4Yt8EdPS+NrwtcO4TyThyCLsz28sC2roagex/zmswIDAQAB AoGBAIBJ82tKAQFZql92vBH/UxLMwpln9oXeNkgKUE40BfdIhmrM2c9YS2+ZT+GT UNnrmxYTDA425zkjHNvi5+kVnfxW9TsvJmJF2MDx/mtjx8GtMkOiFrqkYGdPRIoz vg8UZZPKZ+YQY+2AvIFPYSHHvzwJMc+dBUushD091O8aLifRAkEA5tgQKCWrCQTi qWEx2zzeH4WfZuQ1eufp77vWYG8XkRX6wW2Eb2vSl/avxNN7Ej5KBBB1dtSmAjGM 9oNBYqaqlwJBAPnLxjc9rqc8F/E8uVikd9tzRxFUjHqxG2wEIpGpnAmJ0eb3Xids NzjGDVkzkKzRPfkZN4H2jAscKhEdZXlv9EUCQCMzy7Lzm5NyhUYjJkEylQTlkZtV LbqiZxBB6r0l88gSO/0HQGzlWmYGHmO7hEcR7KOWBvOqFe67s61b8rqig90CQB+H aO1wC6twGlWIpJxbpgU896toUJLr59oqa3KXRequSqAseOXg8tdnqCeqKoiloHzg gfEVfXephmXCoBxD1UECQCxYuPVRq6sl+UgnH0unPl4F/1biRkunhcYaVx6JQBtz tRZm0DCietfGzhrfI2IxmEdGzOSoHbA18HLtOqzgwSA= -----END RSA PRIVATE KEY-----
wireshark/test/keys/rsa-p-lt-q.p8
-----BEGIN PRIVATE KEY----- MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBAOE/58jJArmp9xd6 M42ttJk+tbzG5U47pHfj9wFVMoPPUP5/9Mfl9PGBm7GdnGxW//9wQrphEjrqrwya st6OBO5yb1DotLAl8FecN6eJxSTMPPS108A2D52kXdZohFgn8OjDhi3wR09L42vC 1w7hPJOHIIuzPbywLauhqB7H/OazAgMBAAECgYEAgEnza0oBAVmqX3a8Ef9TEszC mWf2hd42SApQTjQF90iGaszZz1hLb5lP4ZNQ2eubFhMMDjbnOSMc2+Ln6RWd/Fb1 Oy8mYkXYwPH+a2PHwa0yQ6IWuqRgZ09EijO+DxRlk8pn5hBj7YC8gU9hIce/PAkx z50FS6yEPT3U7xouJ9ECQQDm2BAoJasJBOKpYTHbPN4fhZ9m5DV65+nvu9ZgbxeR FfrBbYRva9KX9q/E03sSPkoEEHV21KYCMYz2g0FipqqXAkEA+cvGNz2upzwX8Ty5 WKR323NHEVSMerEbbAQikamcCYnR5vdeJ2w3OMYNWTOQrNE9+Rk3gfaMCxwqER1l eW/0RQJAIzPLsvObk3KFRiMmQTKVBOWRm1UtuqJnEEHqvSXzyBI7/QdAbOVaZgYe Y7uERxHso5YG86oV7ruzrVvyuqKD3QJAH4do7XALq3AaVYiknFumBTz3q2hQkuvn 2iprcpdF6q5KoCx45eDy12eoJ6oqiKWgfOCB8RV9d6mGZcKgHEPVQQJALFi49VGr qyX5SCcfS6c+XgX/VuJGS6eFxhpXHolAG3O1FmbQMKJ618bOGt8jYjGYR0bM5Kgd sDXwcu06rODBIA== -----END PRIVATE KEY-----
wireshark/test/keys/rsasnakeoil2.key
-----BEGIN RSA PRIVATE KEY----- MIICWwIBAAKBgQCkblMUCt4s42BVmvJCpq9HEi8Xzvq63E5jVjS5unNLeEQ9xmxp pCWzYQKdCQQ/cj3YJ9OwWkV3tzbkJiPMEriu3qe2OoI8fCRZCviWQ4ujKTY/kX9d xyOUKX8Kzgq9jZsvGReq1Y7sZqI36z9XUzzyqrt5GUuQfqejmf6ETInwPQIDAQAB AoGAedqEWKsBIPTTtDziYYBTDnEsUxGA/685rCX7ZtQEkx4qPDlqqBMMGVW/8Q34 hugrap+BIgSTzHcLB6I4DwiksUpR08x0hf0oxqqjMo0KykhZDfUUfxR85JHUrFZM GznurVhfSBXX4Il9Tgc/RPzD32FZ6gaz9sFumJh0LKKadeECQQDWOfP6+nIAvmyH aRINErBSlK+xv2mZ4jEKvROIQmrpyNyoOStYLG/DRPlEzAIA6oQnowGgS6gwaibg g7yVTgBpAkEAxH6dcwhIDRTILvtUdKSWB6vdhtXFGdebaU4cuUOW2kWwPpyIj4XN D+rezwfptmeOr34DCA/QKCI/BWkbFDG2tQJAVAH971nvAuOp46AMeBvwETJFg8qw Oqw81x02X6TMEEm4Xi+tE7K5UTXnGld2Ia3VjUWbCaUhm3rFLB39Af/IoQJAUn/G o5GKjtN26SLk5sRjqXzjWcVPJ/Z6bdA6Bx71q1cvFFqsi3XmDxTRz6LG4arBIbWK mEvrXa5jP2ZN1EC7MQJAYTfwPZ8/4x/USmA4vx9FKdADdDoZnA9ZSwezWaqa44My bJ0SY/WmNU+Z4ldVIkcevwwwcxqLF399hjrXWhzlBQ== -----END RSA PRIVATE KEY-----
wireshark/test/keys/snakeoil-rsa.key
-----BEGIN RSA PRIVATE KEY----- MIICWwIBAAKBgQCkblMUCt4s42BVmvJCpq9HEi8Xzvq63E5jVjS5unNLeEQ9xmxp pCWzYQKdCQQ/cj3YJ9OwWkV3tzbkJiPMEriu3qe2OoI8fCRZCviWQ4ujKTY/kX9d xyOUKX8Kzgq9jZsvGReq1Y7sZqI36z9XUzzyqrt5GUuQfqejmf6ETInwPQIDAQAB AoGAedqEWKsBIPTTtDziYYBTDnEsUxGA/685rCX7ZtQEkx4qPDlqqBMMGVW/8Q34 hugrap+BIgSTzHcLB6I4DwiksUpR08x0hf0oxqqjMo0KykhZDfUUfxR85JHUrFZM GznurVhfSBXX4Il9Tgc/RPzD32FZ6gaz9sFumJh0LKKadeECQQDWOfP6+nIAvmyH aRINErBSlK+xv2mZ4jEKvROIQmrpyNyoOStYLG/DRPlEzAIA6oQnowGgS6gwaibg g7yVTgBpAkEAxH6dcwhIDRTILvtUdKSWB6vdhtXFGdebaU4cuUOW2kWwPpyIj4XN D+rezwfptmeOr34DCA/QKCI/BWkbFDG2tQJAVAH971nvAuOp46AMeBvwETJFg8qw Oqw81x02X6TMEEm4Xi+tE7K5UTXnGld2Ia3VjUWbCaUhm3rFLB39Af/IoQJAUn/G o5GKjtN26SLk5sRjqXzjWcVPJ/Z6bdA6Bx71q1cvFFqsi3XmDxTRz6LG4arBIbWK mEvrXa5jP2ZN1EC7MQJAYTfwPZ8/4x/USmA4vx9FKdADdDoZnA9ZSwezWaqa44My bJ0SY/WmNU+Z4ldVIkcevwwwcxqLF399hjrXWhzlBQ== -----END RSA PRIVATE KEY-----
wireshark/test/keys/tls-over-tls.key
-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAuIpwxT/BdUhlQNwpm11Wrz6oEV5glXDeFBSp3CUd7SX+2O1z iXxe6cW0p2ljPQjnvVFe/NTkDcGhgaXVH+8/sk1N1s101L98ZxjLaMaMKhj+yf6Q IOvN1weaMt9qnl8JRrk5f6aNGWTr+dqLU2veE6zX1oT/lFtDTafopyAFGpEJnDRz NudARQpt1CnakRu2zkKsQ/ApMjRa9Zm4w2Kf+M23IeU+j/gi5wAU9/D39ke67tKO GDNXYfMfFcgsmDrH+a2xrU3Zf9KtXiPI5lw2dXs3UG19Sr+HRoydCrIPPJBMzUKw 9M3Hc7i0aMO2vn94GJKC6mqpOeeQIWo5Y4GMnwIDAQABAoIBAE6eLAzcbH6aqQhI wzD9QsDF4LQFkQAZZYMIipTO+0DcvwWLo30fDxBoud3Yd/64nIF6+QydZcq2gyfI jlNcibZcWJz6SpuYOFdzqLSqYWxN2b4URTLBQqApDPg/VhzCQCFxJ53KRrJa3G1F PbX8bk/TguBRKND7UGD095i7e3ElPmmTEe4RSpzqYqCsUQ9e1ndhAVRKYSMXHdbY N4wtM+P2bIJghWQjyo2P4Jb7gXF47ghBett4krduvxv96m3dIuGAM0Y7V9okclQT 7cv/8iDUXBhbWzIsB3bXG0lm9QttQ3T0doSlqg0q2DgLiYBHEhK9SiUAVt+dQftP kRYa+gECgYEA9c1tNXM2u4SOgMUyiFkU8Slr7MbywMq2jnalbea63Q8JxNWNW9VX wQOBZvKK/BEGEP9kOeS1aMQ5dUZOxzI6SN9eCfEDFzIA5VknaZcJdBW+X3kyy/wF 7awEWcZ6B9HrtRZ+vA2rQ4LyfJd2M1eGuK6+0WJa0hGjglbFBrHJNLECgYEAwDJg ls3IWhPEQXWFwXXv1SM63hnbUpGFICLqLz26+qZ64hR52ZcvuZsS3l7slO+5YSim fQNXXYZiwQm+SdtL5V5anUKvtaMWw9VlZVNOFPYyh1RyaGr5CO0j5aoC76DnD2ee cRQPv8Uodf50FR5Y9uyxijGOfA3SMj4WfLppak8CgYAnDg3VGUpP/x7ZTPvbeDQA oXE5fN7jTRI2jpl4XdnA9/u4X6oHNl2sGE9+OPlmVZoeJ0YYgMNmMw9iF9q6gbuL CpqZf8ba76H+zuyZNVtWK4JFDy/IA3I5skQ6s3N+PJdz/XADlzRoFK1MqJAqVjTc sT82a6c8i3rsYbcKekMa8QKBgBPjorc4aGlZ7k9P2B2jFMSbtrXROy8aPAqNUmq9 GqJhpAnNUKbBzICKmbNFY3ouLKLvT/tT4zCcfY+4cGa4OOxtjTcE9aX4UJzHcoy/ yC7HI4d5p7VCjK7ty28y3sbpgb/IW08cYlzYDE3ZnS2qTE6RQ/YnFrWjwILOhgk2 ST5lAoGAIYpxE2Qcz9nHHfx2Fn70Ysg8XUoyk61QBSeZEmVBPp1XhVEz7idJc4PZ tozarw3mo9aqY/sX2f+mwLhL0E5wxy6kcVICkYhE9y45bESAeWk9YIlOMUzTW84C bVJUJiciDPuTJks6XVrRzc0GayaxpblhKHeR7w+6V22+lS7wgTg= -----END RSA PRIVATE KEY-----
wireshark/test/keys/tls12-chacha20poly1305.keys
# SSL/TLS secrets log file, generated by OpenSSL CLIENT_RANDOM ab85fc5f6db67c9cf825cd8a1f34cf6c5e89ec09656a4944ec5536a36aed5728 aee53fb112659bc2b3801db7d230c25100dfb79ffb00f086256885dfc1f8fc545e526fc92fe884348a84964bb7b4bbc0 CLIENT_RANDOM 59ee68728af906f3fb825bf306f7f40ae1fb68fc13d769a00843d3f2ba5dac45 7b126413d8876316b67a35fc3d530bca6574070a5a4b829868b3f94c36b0b1f673ab0a18f213ee7a5fb47ba50676acb3 CLIENT_RANDOM 1c73db9d11c0c4fb8acf3f62ec0a50a1559eeaded9a9a54e829f9f291d592069 f38dbcf8c3eb3c30514fbcfe2208efab88b3593468b8695e8c1f55662ffe5ab4151ed4ead5fb207c8707ea073e8aaaba RSA 3e2ffbd86d10f694 0303f4d5b30f42e5df09d7d3fe363b4ff104b469bcade9e4506bf6cc621441d577cc5fbe448979fdd9be53556881058a CLIENT_RANDOM 121cdd2ae0f9f089b48897272e37c89cf41f4eb8262ac1272cc02a73fd23fa0b 2a169a4a71b6b988d525546ca3f56d185f16490eecc19659963f796bf8c6b4cf10f8739aeb0f2162451c1e46718c137d CLIENT_RANDOM db9350951d7ace9c91422b44b526a549fa3ca97b9e0d5c421c08ee3223b3d74a bd0c489a9d428510d937ae09038a0cc84bc3ed72dfe136712d94fcc001d46f54b33992dd3f2f897625cad07f7f9caeb2 CLIENT_RANDOM 55a097b1bf4bf3c0f7161a81f15be86a78f2ca05f9784b07e0af761c1f9f1d65 bdf97a381c55c50421757a63227717b6a5c840ef8fc61975e0c495cff55f4dde41080b63c896f94639351697810bfcd9 CLIENT_RANDOM f6fb54f9367adab3128073734d14e7230e6fe3c5c494caa15e130b5a955dbcb6 8409faa69c4c6ebf56cbad7b9ace4f41815de444410c2c595f409b54a80014e0acf944b8ee6f62eb0ef873714514a19f
wireshark/test/keys/tls12-dsb-1.keys
# first CLIENT_RANDOM f67a28b386b31c620d76c0026fdd9888edbe6bf0f5b715b2caca158f84ae9d66 cc38e78182b9dfd74ef3103d79bbc99cfc9b4dad209ed209062b5481e63353128da7571b13cfd4d3a5ae7d0520fb346d
wireshark/test/keys/tls12-dsb-2.keys
CLIENT_RANDOM 1e0d63b41d7c7bb639559cfc9f06ffd5c65fe4a9df31abc5af833b0d834436f4 c7f5dda54fb417181cb26e52112afaf9e1756addd77d3c479d96a609c0d3c9bb9929c8475cafb4dbad8f72e868a43e02
wireshark/test/keys/tls13-20-chacha20poly1305.keys
# SSL/TLS secrets log file, generated by OpenSSL SERVER_HANDSHAKE_TRAFFIC_SECRET 3d89529eeebe176375ef29bd146a49e02c375771628244948f6e9408457fdbc1 4e1fbe0594634161e1af3f8b6e940561ba3fc2174b6cefb5b0da3e040cfb23e4 SERVER_TRAFFIC_SECRET_0 3d89529eeebe176375ef29bd146a49e02c375771628244948f6e9408457fdbc1 1e519f7e8d3e8d45d7cf13038f50a50e1a2f6f0a9f918b9ae856f3269068ef8f CLIENT_HANDSHAKE_TRAFFIC_SECRET 3d89529eeebe176375ef29bd146a49e02c375771628244948f6e9408457fdbc1 69c6f71caf5031fc55671bf88c70a77acac0d81de20d3cf5e6b4dd7fac4ffe4d CLIENT_TRAFFIC_SECRET_0 3d89529eeebe176375ef29bd146a49e02c375771628244948f6e9408457fdbc1 292497c163345eaa5b16b0b7907faa31dbb7ce76ac9a1c35ca99ba9dafeae72c SERVER_HANDSHAKE_TRAFFIC_SECRET 4cfd639e633b0ccdedab543dd333639d2bacd023b2f12a94724eb1c20f5b8e11 976e0b13800a6c2335ed9bb124f38fb2f654c6585dfc14db72eb09cd30148a1c SERVER_TRAFFIC_SECRET_0 4cfd639e633b0ccdedab543dd333639d2bacd023b2f12a94724eb1c20f5b8e11 f7e0e34d554b2431353dadb226d9fbf248dd15e76944234bfb13b1102af43aaf CLIENT_HANDSHAKE_TRAFFIC_SECRET 4cfd639e633b0ccdedab543dd333639d2bacd023b2f12a94724eb1c20f5b8e11 d46820a69b572a15d2ef8778d32f1e00300725ab42bc9f3e8f6ede598a1ab532 CLIENT_TRAFFIC_SECRET_0 4cfd639e633b0ccdedab543dd333639d2bacd023b2f12a94724eb1c20f5b8e11 69bf451e01db2af6924af9b78759a9f3e84affab23300238dbf94c54709cdda1
wireshark/test/keys/tls13-rfc8446-noearly.keys
CLIENT_HANDSHAKE_TRAFFIC_SECRET 2635fafc16c49a3e997ef714c303806dc8dbf634a2005b0e0186521c4ad6f9df 3a497c91f6e130fbc18fc9f773b92bb0d538dfedc30e964cde0676396f24d0df SERVER_HANDSHAKE_TRAFFIC_SECRET 2635fafc16c49a3e997ef714c303806dc8dbf634a2005b0e0186521c4ad6f9df 1a63b313c605f90e0b3c5717ebbbc62e1da3fe8e2aa66e499409a06b89040783 CLIENT_TRAFFIC_SECRET_0 2635fafc16c49a3e997ef714c303806dc8dbf634a2005b0e0186521c4ad6f9df 1ce3e54d6b980d838f79564fd33d43a7664df24ead913c316c379ca3dd349b74 SERVER_TRAFFIC_SECRET_0 2635fafc16c49a3e997ef714c303806dc8dbf634a2005b0e0186521c4ad6f9df befa80156bd5cb23899c23afadd8deb87c4117323b3e184085b57c8f4dc56760 CLIENT_HANDSHAKE_TRAFFIC_SECRET b67947da9d3e4b2ce8acffa975e30aa7ef90f7ec0d39de78db392f38b9a9a41d f4b31725da386891edbf521b96547be8b166487ca56ac197ac8df728c303ee80 SERVER_HANDSHAKE_TRAFFIC_SECRET b67947da9d3e4b2ce8acffa975e30aa7ef90f7ec0d39de78db392f38b9a9a41d ee3a6c64336e7f22214ab8f4b1aba29b0e7c72c84890a240d5c0c451ffceee9a CLIENT_TRAFFIC_SECRET_0 b67947da9d3e4b2ce8acffa975e30aa7ef90f7ec0d39de78db392f38b9a9a41d ceca66e29c1452990be5d1a439805adb9e582931051e847d8ad676147fd63b13 SERVER_TRAFFIC_SECRET_0 b67947da9d3e4b2ce8acffa975e30aa7ef90f7ec0d39de78db392f38b9a9a41d bf428b9e2e4853bab9c442f23d0dc45a9d552ab31ec96c7b9633ed16694924d0
wireshark/test/keys/tls13-rfc8446.keys
CLIENT_HANDSHAKE_TRAFFIC_SECRET 2635fafc16c49a3e997ef714c303806dc8dbf634a2005b0e0186521c4ad6f9df 3a497c91f6e130fbc18fc9f773b92bb0d538dfedc30e964cde0676396f24d0df SERVER_HANDSHAKE_TRAFFIC_SECRET 2635fafc16c49a3e997ef714c303806dc8dbf634a2005b0e0186521c4ad6f9df 1a63b313c605f90e0b3c5717ebbbc62e1da3fe8e2aa66e499409a06b89040783 CLIENT_TRAFFIC_SECRET_0 2635fafc16c49a3e997ef714c303806dc8dbf634a2005b0e0186521c4ad6f9df 1ce3e54d6b980d838f79564fd33d43a7664df24ead913c316c379ca3dd349b74 SERVER_TRAFFIC_SECRET_0 2635fafc16c49a3e997ef714c303806dc8dbf634a2005b0e0186521c4ad6f9df befa80156bd5cb23899c23afadd8deb87c4117323b3e184085b57c8f4dc56760 CLIENT_EARLY_TRAFFIC_SECRET b67947da9d3e4b2ce8acffa975e30aa7ef90f7ec0d39de78db392f38b9a9a41d 42c0101fad261571cb8799c86a1eb4afe6dcef4a5f88664ac63e4c77452a77ef CLIENT_HANDSHAKE_TRAFFIC_SECRET b67947da9d3e4b2ce8acffa975e30aa7ef90f7ec0d39de78db392f38b9a9a41d f4b31725da386891edbf521b96547be8b166487ca56ac197ac8df728c303ee80 SERVER_HANDSHAKE_TRAFFIC_SECRET b67947da9d3e4b2ce8acffa975e30aa7ef90f7ec0d39de78db392f38b9a9a41d ee3a6c64336e7f22214ab8f4b1aba29b0e7c72c84890a240d5c0c451ffceee9a CLIENT_TRAFFIC_SECRET_0 b67947da9d3e4b2ce8acffa975e30aa7ef90f7ec0d39de78db392f38b9a9a41d ceca66e29c1452990be5d1a439805adb9e582931051e847d8ad676147fd63b13 SERVER_TRAFFIC_SECRET_0 b67947da9d3e4b2ce8acffa975e30aa7ef90f7ec0d39de78db392f38b9a9a41d bf428b9e2e4853bab9c442f23d0dc45a9d552ab31ec96c7b9633ed16694924d0
wireshark/test/keys/udt-dtls.key
-----BEGIN PRIVATE KEY----- MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDg/jR4vVFNpBta s1uv4IXhqpePHIIuOoOnb2sK6G8gXdCxKm406JLecNuIqq0eUydH6oawar96vYGM lLKTfvMnN3RgL3wNG6G+x+zYPjMaS6G+X8SPAG/XtlIxzDFdzliJT4WpKw1nNa7V sryYTYLZ4aX0hbbETuvXhdZTqCUYX8t2TYyTjeTBybWLyXcWZReGShHPlThn55b0 rtUXkUQoTl3iv330jvJX8MN8haRLwr4+s2jYIGhsINdvMziGo89Eh1FYJXeC4en0 FTEzIx9XA8FiVA8Rf1EEJv6syIvE11GGBCrX0Hu1brg7n6Y0TvkQGcL8tbrFDKlE W7HlQk9TAgMBAAECggEBALfNk8orRxc5gItJSRbWQilH9saYEJV5ggIv2G+x0M7N NWb2dc/NS+ZipkXwwLqsTcPHiT7oBgNce1AATh6GsFeSSwUk5Z/DuhAkPY2uyoqp zLm8fNQiFDxSGrXJzW6H2vZZu6Smoi11wp2bhcyaTdJ3L98huVyH6M1J7fyruZo9 Z8GssyCSujfze7cIc7Py1wEEC0+LzGijkc70HDgsxwj6TXKx/ifXi+5tCCesA3z1 6AIe0zMTcwpXRbdEZGO9fvBxGoK9hwiVJ6/sPmxFXWpBfu6TMLJjLcwwvnoNuWdH 83+PfD+sdE7zo6CUEGDHPvJ/Fyq5lIGjEP0ErruGKPkCgYEA9PyUvC3nHkGN+5t9 JNDaNZWEaiBW7w5yQN5MnQCwK3I8ajE+EDoPl0YDhJW4HFS/iwu8WZeb4pN0WLkT C2K7Tfb+9G7ebLoQ997l0zEcnHqiKA1CqGpu/g0mcVlGFExthvgv9rmt+1QLTXHJ F/YirWCTyyIgw11iY2ap8u7kSK0CgYEA6xuHHFTtH2pjRXxuOBhwsvA+8oNd/4A8 phNfJgS7t9LYDMqzyer0ixdkae1vH4peZ1USn6326Vx53hYulH0A69iNM7zrNWM7 JI+y0ftyb/Ji7CwpQAd3JVjUjvoQljrpPqrArm8fGhTucqS8E1fY1u98b3N85RrY u3uAcwGb9/8CgYAys08ovqs1FMYIiz5T7zEpo77ao8S6BphYmmjqmSjcZPDh3T0F 6K4vVVsHBmEq49McOJqLRBgLxQ5wCiVJ1u4CjZpoBcXcZIl8ctHHakOMksiaV1wz NIux4hDRpnMdYk/MffKXMggymksYhPLkFZlJnlIX2QFEzT++aJHFZ/EwpQKBgCs8 QLiBFao1UlQw8cP3GqKNc8X9Sof1+TFBVroTHMJNT9XqYO28+4OopZqlQ041j+7I wkgDIekATJj+00oTQtwcUrs0/rwup22tz2C2MPFNTcvIwz03Ij4H++7fJbW617Hi jNSHMt0FBGSozr1v5jyAhg2o20r2iOzRZWnA3gHZAoGAXkvhtQxD5sdypzUhs6uS d3PgKTbQK8PLU5KGl7DZ2oaemQ2QUQw0J9tlEQRItTxB+MDf21FddD8n6c1a3zJa gay7xiarE8JN0pHoQ1qCqZBwWXQRSPiNu8Bxu2oPpUi2iQdBVQG1bACQOToVUDpv wCW2aisjPbg71ZkZEvhk2cg= -----END PRIVATE KEY-----
Lua
wireshark/test/lua/acme_file.lua
------------------------------------------ -- acme_file_reader.lua -- Author: Hadriel Kaplan (hadrielk at yahoo dot com) -- version = 1.0 -- date = 3/3/2014 ------------------------------------------ --[[ This is a Wireshark Lua-based capture file reader. This "capture file" reader reads message logs from Acme Packet (now Oracle) Session Border Controllers, such as sipmsg.log files. There are several variants of the log file format, as well as some changes that can happen based on how the log file is generated and retrieved; for example if it's generated through a 'tail' command, or FTP'ed by a FTP client which adds carriage-returns. This Lua file reader tries to handle such conditions. Note: this script wasn't written to be super-efficient, nor clever. When you've been writing Lua for a while you get used to writing in a different, more elegant fashion than this script is; but other people find it hard to read such Lua code, so I've tried to keep this simpler. Features: -handles sipmsg type logs, sipdns type logs, algd type logs -handles both IPv4 and IPv6, for both UDP and TCP -reads sipmsg logs from 3800, 4250, 4500, 9200, 6300 SBCs -handles logs with extra carriage-returns and linefeeds, such as from certain FTP'ed cases -handles logs generated/copied from a 'tail' command on the SBC ACLI -handles MBCD messages in logs, and puts their decoded ascii description in comments in Wireshark Issues: -for very large logs (many megabytes), it takes a long time (many minutes) -creates fake IP and UDP/TCP headers, which might be misleading -has to guess sometimes, though it hasn't guessed wrong yet as far as I know To-do: - make it use Struct.tohex/fromhex now that we have the Struct library in Wireshark - make it use a linux cooked-mode pseudo-header (see https://gitlab.com/wireshark/wireshark/-/wikis/SLL) - make it use preferences, once I write C-code for Wireshark to do that :) - rewrite some of the pattern searches to use real regex/PCRE instead? Example SIP over UDP message: Aug 26 19:25:10.685 On [5:0]2.1.1.1:5060 received from 2.1.2.115:5060 REGISTER sip:2.1.1.1:5060 SIP/2.0 Via: SIP/2.0/UDP 2.1.2.115:5060;branch=z9hG4bK6501441021660x81000412 From: <sip:[email protected]:5060>;tag=520052-7015560x81000412 To: <sip:[email protected]:5060> Call-ID: [email protected] CSeq: 247 REGISTER Contact: <sip:[email protected]:5060;transport=udp> Expires: 300 Max-Forwards: 70 Authorization: Digest username="public_115",realm="empirix.com",uri="sip:2.1.1.1",response="5d61837cc54dc27018a40f2532e622de",nonce="430f6ff09ecd8c3fdfc5430b6e7e437a4cf77057",algorithm=md5 Content-Length: 0 ---------------------------------------- Another one: 2007-03-06 13:38:48.037 OPENED 2007-03-06 13:38:48.037 OPENED 2007-03-06 13:38:48.037 OPENED Mar 6 13:38:54.959 On [1:0]135.25.29.135:5060 received from 192.168.109.138:65471 OPTIONS sip:135.25.29.135 SIP/2.0 Accept: application/sdp User-Agent: ABS GW v5.1.0 To: sip:135.25.29.135 From: sip:192.168.109.138;tag=a2a090ade36bb108da70b0c8f7ba02e9 Contact: sip:192.168.109.138 Call-ID: [email protected] CSeq: 347517161 OPTIONS Via: SIP/2.0/UDP 192.168.109.138;branch=z9hG4bK21feac80fe9a63c1cf2988baa2af0849 Max-Forwards: 70 Content-Length: 0 ---------------------------------------- Another SIP over UDP (from 9200): File opened. Jun 8 14:34:22.599 UDP[3:0]10.102.131.194:5060 OPENED Jun 8 14:34:22.616 UDP[6:0]10.102.130.185:5060 OPENED Jun 8 14:34:49.416 On [6:0]10.102.130.185:5060 received from 10.102.130.150:5060 REGISTER sip:csp.noklab.net SIP/2.0 Via: SIP/2.0/UDP 192.168.1.100:5060;branch=z9hG4bK26b7a48d From: sip:[email protected] To: sip:[email protected] Call-ID: [email protected] CSeq: 144 REGISTER User-Agent: CSCO/7 Contact: <sip:[email protected]:5060> Content-Length: 0 Expires: 3600 ---------------------------------------- Example SIP over TCP message (note it ends in the middle of a header name): Jan 12 00:03:54.700 On 172.25.96.200:8194 received from 172.25.32.28:5060 SIP/2.0 200 OK From: Unavailable <sip:[email protected]:5060;user=phone>;tag=1200822480 To: 24001900011 <sip:[email protected]:5060;user=phone>;tag=03c86c0b27df1b1254aeccbc000 Call-ID: [email protected] CSe ---------------------------------------- Example SIP Pre and Post-NAT messages: Post-NAT from private<realm=e911core> encoded: SIP/2.0 302 Moved Temporarily Call-ID: SD27o9f04-fcc63aa885c83e22a1be64cfc210b55e-vjvtv00 CSeq: 2 INVITE From: <sip:[email protected]:5060;user=phone;e911core=TSD5051AEPCORE-dnamt76v6nm04;CKE=BSLD-5cuduig6t52l2;e911vpn=TSD5051AEPVPN-7gdq13vt8fi59>;tag=SD27o9f04-10000000-0-1424021314 To: <sip:[email protected];user=phone;CKE=BSLD-8blt7m3dhnj17>;tag=10280004-0-1239441202 Via: SIP/2.0/UDP 127.254.254.1:5060;branch=z9hG4bK5i4ue300dgrdras7q281.1 Server: DC-SIP/1.2 Content-Length: 0 Contact: <sip:[email protected]:5060;e911core=TSD5051AEPCORE-5n86t36uuma01> ---------------------------------------- Pre-NAT to private<realm=e911core> decode: ACK sip:[email protected];user=phone;CKE=BSLD-8blt7m3dhnj17 SIP/2.0 Via: SIP/2.0/UDP 127.254.254.1:5060;branch=z9hG4bK5i4ue300dgrdras7q281.1 Call-ID: SD27o9f04-fcc63aa885c83e22a1be64cfc210b55e-vjvtv00 CSeq: 2 ACK From: <sip:[email protected]:5060;user=phone;e911core=TSD5051AEPCORE-dnamt76v6nm04;CKE=BSLD-5cuduig6t52l2;e911vpn=TSD5051AEPVPN-7gdq13vt8fi59>;tag=SD27o9f04-10000000-0-1424021314 To: <sip:[email protected];user=phone;CKE=BSLD-8blt7m3dhnj17>;tag=10280004-0-1239441202 Max-Forwards: 70 ---------------------------------------- Example DNS message: Nov 1 23:03:12.811 On 10.21.232.194:1122 received from 10.21.199.204:53 DNS Response 3916 flags=8503 q=1 ans=0 auth=1 add=0 net-ttl=0 Q:NAPTR 7.6.5.4.3.2.1.0.1.2.e164 NS:SOA e164 ttl=0 netnumber01 rname=user.netnumber01 ser=223 ref=0 retry=0 exp=0 minttl=0 0000: 0f 4c 85 03 00 01 00 00 00 01 00 00 01 37 01 36 .L...........7.6 0010: 01 35 01 34 01 33 01 32 01 31 01 30 01 31 01 32 .5.4.3.2.1.0.1.2 0020: 04 65 31 36 34 00 00 23 00 01 04 65 31 36 34 00 .e164..#...e164. 0030: 00 06 00 01 00 00 00 00 00 33 0b 6e 65 74 6e 75 .........3.netnu 0040: 6d 62 65 72 30 31 00 04 75 73 65 72 0b 6e 65 74 mber01..user.net 0050: 6e 75 6d 62 65 72 30 31 00 00 00 00 df 00 00 00 number01........ 0060: 00 00 00 00 00 00 00 00 00 00 00 00 00 ............. ---------------------------------------- Example MGCP message (note the IP/UDP headers are in the hex): Mar 1 14:37:26.683 On [0:803]172.16.84.141:2427 sent to 172.16.74.100:2427 Packet: 0000: 00 04 00 00 00 01 00 02 00 00 03 23 0a ad 00 c9 ...........#.... 0010: 45 00 00 a8 23 36 00 00 3c 11 63 fd ac 10 54 8d E...#6..<.c...T. 0020: ac 10 4a 64 09 7b 09 7b 00 94 16 c2 32 35 30 20 ..Jd.{.{....250 250 55363 Connection Deleted P: PS=6551, OS=1048160, PR=6517, OR=1042720, PL=0, JI=1, LA=5, PC/RPS=6466, PC/ROS=1034560, PC/RPL=0, PC/RJI=0 ---------------------------------------- Example MBCD message: Mar 1 14:37:26.672 On 127.0.0.1:2946 sent to 127.0.0.1:2944 0000: ac 3e fd a8 01 01 77 36 9e 00 37 10 0c 34 4c bc .>....w6..7..4L. 0010: 00 30 23 0c 34 4c bc 00 11 33 00 0e 35 00 04 00 .0#.4L...3..5... 0020: 00 00 00 30 00 04 00 00 00 00 23 0c 34 4c bd 00 ...0......#.4L.. 0030: 11 33 00 0e 35 00 04 00 00 00 00 30 00 04 00 00 .3..5......0.... 0040: 00 00 .. Transaction = 24589982 { Context = 204754108 { Subtract = 204754108 { Audit { Stats, Flow } }, Subtract = 204754109 { Audit { Stats, Flow } } } } ---------------------------------------- ]]---------------------------------------- -- debug printer, set DEBUG to true to enable printing debug info -- set DEBUG2 to true to enable really verbose printing local DEBUG, DEBUG2 = true, false local dprint = function() end local dprint2 = function() end if DEBUG or DEBUG2 then dprint = function(...) print(table.concat({"Lua:", ...}," ")) end if DEBUG2 then dprint2 = dprint end end -- this should be done as a preference setting local ALWAYS_UDP = true local fh = FileHandler.new("Oracle Acme Packet logs", "acme", "A file reader for Oracle Acme Packet message logs such as sipmsg.log","rs") -- There are certain things we have to create fake state/data for, because they -- don't exist in the log file for example to create IP headers we have to create -- fake identification field values, and to create timestamps we have to guess the -- year (and in some cases month/day as well), and for TCP we have to create fake -- connection info, such as sequence numbers. We can't simply have a global static -- variable holding such things, because Wireshark reads the file sequentially at -- first, but then calls seek_read for random packets again and we don't want to -- re-create the fake info again because it will be wrong. So we need to create it -- for each packet and remember what we created for each packet, so that seek_read -- gets the same values. We could store the variables in a big table, keyed by the -- specific header info line for each one; but instead we'll key it off of the file -- position number, since read() sets it for Wireshark and seek_read() gets it from -- Wireshark. So we'll have a set of global statics used during read(), but the -- actual per-packet values will be stored in a table indexed/keyed by the file -- position number. A separate table holds TCP peer connection info as described -- later. -- I said above that this state is "global", but really it can't be global to this -- whole script file, because more than one file can be opened for reading at the -- same time. For example if the user presses the reload button, the capture file -- will be opened for reading before the previous (same) one is closed. So we have -- to store state per-file. The good news is Wireshark gives us a convenient way to -- do that, using the CaptureInfo.private_table attribute/member. We can save a Lua -- table with whatever contents we want, to this private_table member, and get it -- later during the other read/seek_read/cose function calls. -- So to store this per-file state, we're going to use Lua class objects. They're -- just Lua tables that have functions and meta-functions and can be treated like -- objects in terms of syntax/behavior. local State = {} local State_mt = { __index = State } function State.new() local new_class = { -- the new instance -- stuff we need to keep track of to cerate fake info ip_ident = 0, tyear = 0, tmonth = 0, tmin = 0, tsec = 0, tmilli = 0, nstime = NSTime(), -- the following table holds per-packet info -- the key index will be a number - the file position - but it won't be an array type table (too sparse). -- Each packet's entry is a table holding the "static" variables for that packet; this sub-table will be -- an array style instead of hashmap, to reduce size/performance -- This table needs to be cleared whenever the file is closed/opened. packets = {}, -- the following local table holds TCP peer "connection" info, which is basically -- TCP control block (TCB) type information; this is needed to create and keep track -- of fake TCP sockets/headers for messages that went over TCP, for example for fake -- sequence number info. -- The key index for this is the local+remote ip:port strings concatenated. -- The value is a sub-table, array style, holding the most recent sequence numbers. -- This whole table needs to be cleared whenever the file is closed/opened. tcb = {}, } setmetatable( new_class, State_mt ) -- all instances share the same metatable return new_class end -- the indices for the State.packets{} variable sub-tables local IP_IDENT = 1 local TTIME = 2 local LOCAL_SEQ = 3 local REMOTE_SEQ = 4 -- the indices for the State.tcb{} sub-tables local TLOCAL_SEQ = 1 local TREMOTE_SEQ = 2 -- helper functions local char = string.char local floor = math.floor -- takes a Lua number and converts it into a 2-byte string binary (network order) local function dec2bin16(num) return Struct.pack(">I2",num) end -- takes a Lua number and converts it into a 4-byte string binary (network order) local function dec2bin32(num) return Struct.pack(">I4",num) end -- function to skip log info before/between/after messages local delim = "^%-%-%-%-%-%-%-%-%-%-%-%-%-%-%-%-%-%-%-%-%-%-%-%-%-%-%-%-%-%-%-%-%-%-%-%-%-%-%-%-$" -- words that must be found to be skipped. "File ..." is found in 9200 logs) local skiplist = { " OPENED", " CLOSED", " STARTED", " STOPPED", "^File ", delim } -- pre/post NAT entries local pre_nat_header_pattern = "^Pre%-NAT to private<realm=([^>]+)> decode:\r?$" local post_nat_header_pattern = "^Post%-NAT from private<realm=([^>]+)> encoded:\r?$" local function skip_ahead(file, line, position) repeat local found = #line == 0 -- will be false unless the line is empty for i, word in ipairs(skiplist) do if line:find(word) then found = true break end end if found then position = file:seek() line = file:read() if not line then return nil end elseif line:find(pre_nat_header_pattern) or line:find(post_nat_header_pattern) then -- skip the whole message found = true repeat line = file:read() until line:find(delim) end until not found return line, position end -- following pattern grabs month, day, hour, min, sec, millisecs local header_time_pattern = "^(%u%l%l) ?(%d%d?) (%d%d?):(%d%d):(%d%d)%.(%d%d%d) On " -- tail'ed file has no month/day local header_tail_time_pattern = "^(%d%d):(%d%d)%.(%d%d%d) On " -- grabs local and remote IPv4:ports (not phy/vlan), and words in between (i.e., "sent to" or "received from") local header_address_pattern = "(%d%d?%d?%.%d%d?%d?%.%d%d?%d?%.%d%d?%d?):(%d+) (%l+ %l+) (%d%d?%d?%.%d%d?%d?%.%d%d?%d?%.%d%d?%d?):(%d+) ?\r?$" -- grabs local and remote IPv6:ports (not phy/vlan), and words in between (i.e., "sent to" or "received from") local header_v6address_pattern = "%[([:%x]+)%]:(%d+) (%l+ %l+) %[([:%x]+)%]:(%d+) ?\r?$" -- grabs phy/vlan info local header_phy_pattern = "%[(%d+):(%d+)%]" local SENT = 1 local RECV = 2 local function get_direction(phrase) if #phrase == 7 and phrase:find("sent to") then return SENT elseif #phrase == 13 and phrase:find("received from") then return RECV end dprint("direction phrase not found") return nil end -- monthlist table for getting month number value from 3-char name (number is table index) local monthlist = {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"} -- Compute the difference in seconds between local time and UTC -- from http://lua-users.org/wiki/TimeZone local function get_timezone() local now = os.time() return os.difftime(now, os.time(os.date("!*t", now))) end local timezone = get_timezone() function State:get_timestamp(line, file_position, seeking) local i, line_pos, month, day, hour, min, sec, milli = line:find(header_time_pattern) if not month then return end if seeking then -- we've seen this packet before, just go get the saved timestamp sec = self.packets[file_position][TTIME] if not sec then dprint("failed to get saved timestamp for packet at position:", file_position) return end return sec, line_pos end -- find the month's number for index, name in ipairs(monthlist) do if month == name then month = index break end end if type(month) ~= "number" then return end day = tonumber(day) hour = tonumber(hour) min = tonumber(min) sec = tonumber(sec) milli = tonumber(milli) if not day or not hour or not min or not sec or not milli then dprint("timestamp could not be determined") return nil end -- we don't know what year the log file was created, so we have to guess -- if we guess the current system year, then a log of December loaded in January will appear wrong, -- as will a log file which lasts over new year -- so we're going to check the current system month, and if it's less than the log file's then we'll -- assume the log file started last year; if the system month is larger or equal, then we'll assume the log -- file is of this year. We only do this checking once per file. if self.tyear == 0 then local curr_year, curr_month = tonumber(os.date("%Y")), tonumber(os.date("%m")) if curr_month < month then -- use last year if curr_year > 0 then curr_year = curr_year - 1 end end self.tyear = curr_year -- XXX - but for purposes of testing, we just force the year to -- 2014, so that we can compare the result of this code reading -- an Acme log with the result of the pcapng reader reading a -- pcapng file with the same packets - the time stamps in -- pcapng files are times since the Epoch, so the year is known self.tyear = 2014 end -- if this message's month is less than previous message's, then year wrapped if month < self.tmonth then self.tyear = self.tyear + 1 end self.tmonth = month local timet = os.time({ ["year"] = self.tyear, ["month"] = month, ["day"] = day, ["hour"] = hour, ["min"] = min, ["sec"] = sec }) if not timet then dprint("timestamp conversion failed") end timet = timet + timezone -- make an NSTime self.nstime = NSTime(timet, milli * 1000000) self.packets[file_position][TTIME] = self.nstime timet = timet + (milli/1000) dprint2("found time of ", os.date("%c",timet), " with value=",timet) return self.nstime, line_pos end -- get_tail_time() gets a fictitious timestamp starting from 19:00:00 on Dec 31, 1969, and incrementing based -- on the minutes/secs/millisecs seen (i.e., if the minute wrapped then hour increases by 1, etc.). -- this is needed for tail'ed log files, since they don't show month/day/hour function State:get_tail_time(line, file_position, seeking) local i, line_pos, min, sec, milli = line:find(header_tail_time_pattern) if not min then return end if seeking then -- we've seen this packet before, just go get the saved timestamp sec = self.packets[file_position][TTIME] if not sec then dprint("failed to get saved timestamp for packet at position:", file_position) return end return sec, line_pos end min = tonumber(min) sec = tonumber(sec) milli = tonumber(milli) if not min or not sec or not milli then dprint("timestamp could not be determined") return nil end -- get difference in time local tmin, tsec, tmilli, nstime = self.tmin, self.tsec, self.tmilli, self.nstime local ttime = nstime.secs -- min, sec, milli are what the log says this tail'ed packet is -- tmin, tsec, tmilli are what we got from last packet -- nstime is the unix time of that, and ttime is the seconds of that unix time -- if minutes wrapped, or they're equal but seconds wrapped, then handle it as if in the next hour if (min < tmin) or (min == tmin and sec < tsec) or (min == tmin and sec == tsec and milli < tmilli) then -- something wrapped, calculate difference as if in next hour ttime = ttime + (((min * 60) + sec + 3600) - ((tmin * 60) + tsec)) else ttime = ttime + (((min * 60) + sec) - ((tmin * 60) + tsec)) end self.tmin, self.tsec, self.tmilli = min, sec, milli self.nstime = NSTime(ttime, milli * 1000000) self.packets[file_position][TTIME] = self.nstime return self.nstime, line_pos end local hexbin = { ["0"]=0, ["1"]=1, ["2"]=2, ["3"]=3, ["4"]=4, ["5"]=5, ["6"]=6, ["7"]=7, ["8"]=8, ["9"]=9, ["a"]=10, ["b"]=11, ["c"]=12, ["d"]=13, ["e"]=14, ["f"]=15, ["00"]=0, ["01"]=1, ["02"]=2, ["03"]=3, ["04"]=4, ["05"]=5, ["06"]=6, ["07"]=7, ["08"]=8, ["09"]=9, ["0a"]=10, ["0b"]=11, ["0c"]=12, ["0d"]=13, ["0e"]=14, ["0f"]=15, ["10"]=16, ["11"]=17, ["12"]=18, ["13"]=19, ["14"]=20, ["15"]=21, ["16"]=22, ["17"]=23, ["18"]=24, ["19"]=25, ["1a"]=26, ["1b"]=27, ["1c"]=28, ["1d"]=29, ["1e"]=30, ["1f"]=31, ["20"]=32, ["21"]=33, ["22"]=34, ["23"]=35, ["24"]=36, ["25"]=37, ["26"]=38, ["27"]=39, ["28"]=40, ["29"]=41, ["2a"]=42, ["2b"]=43, ["2c"]=44, ["2d"]=45, ["2e"]=46, ["2f"]=47, ["30"]=48, ["31"]=49, ["32"]=50, ["33"]=51, ["34"]=52, ["35"]=53, ["36"]=54, ["37"]=55, ["38"]=56, ["39"]=57, ["3a"]=58, ["3b"]=59, ["3c"]=60, ["3d"]=61, ["3e"]=62, ["3f"]=63, ["40"]=64, ["41"]=65, ["42"]=66, ["43"]=67, ["44"]=68, ["45"]=69, ["46"]=70, ["47"]=71, ["48"]=72, ["49"]=73, ["4a"]=74, ["4b"]=75, ["4c"]=76, ["4d"]=77, ["4e"]=78, ["4f"]=79, ["50"]=80, ["51"]=81, ["52"]=82, ["53"]=83, ["54"]=84, ["55"]=85, ["56"]=86, ["57"]=87, ["58"]=88, ["59"]=89, ["5a"]=90, ["5b"]=91, ["5c"]=92, ["5d"]=93, ["5e"]=94, ["5f"]=95, ["60"]=96, ["61"]=97, ["62"]=98, ["63"]=99, ["64"]=100, ["65"]=101, ["66"]=102, ["67"]=103, ["68"]=104, ["69"]=105, ["6a"]=106, ["6b"]=107, ["6c"]=108, ["6d"]=109, ["6e"]=110, ["6f"]=111, ["70"]=112, ["71"]=113, ["72"]=114, ["73"]=115, ["74"]=116, ["75"]=117, ["76"]=118, ["77"]=119, ["78"]=120, ["79"]=121, ["7a"]=122, ["7b"]=123, ["7c"]=124, ["7d"]=125, ["7e"]=126, ["7f"]=127, ["80"]=128, ["81"]=129, ["82"]=130, ["83"]=131, ["84"]=132, ["85"]=133, ["86"]=134, ["87"]=135, ["88"]=136, ["89"]=137, ["8a"]=138, ["8b"]=139, ["8c"]=140, ["8d"]=141, ["8e"]=142, ["8f"]=143, ["90"]=144, ["91"]=145, ["92"]=146, ["93"]=147, ["94"]=148, ["95"]=149, ["96"]=150, ["97"]=151, ["98"]=152, ["99"]=153, ["9a"]=154, ["9b"]=155, ["9c"]=156, ["9d"]=157, ["9e"]=158, ["9f"]=159, ["a0"]=160, ["a1"]=161, ["a2"]=162, ["a3"]=163, ["a4"]=164, ["a5"]=165, ["a6"]=166, ["a7"]=167, ["a8"]=168, ["a9"]=169, ["aa"]=170, ["ab"]=171, ["ac"]=172, ["ad"]=173, ["ae"]=174, ["af"]=175, ["b0"]=176, ["b1"]=177, ["b2"]=178, ["b3"]=179, ["b4"]=180, ["b5"]=181, ["b6"]=182, ["b7"]=183, ["b8"]=184, ["b9"]=185, ["ba"]=186, ["bb"]=187, ["bc"]=188, ["bd"]=189, ["be"]=190, ["bf"]=191, ["c0"]=192, ["c1"]=193, ["c2"]=194, ["c3"]=195, ["c4"]=196, ["c5"]=197, ["c6"]=198, ["c7"]=199, ["c8"]=200, ["c9"]=201, ["ca"]=202, ["cb"]=203, ["cc"]=204, ["cd"]=205, ["ce"]=206, ["cf"]=207, ["d0"]=208, ["d1"]=209, ["d2"]=210, ["d3"]=211, ["d4"]=212, ["d5"]=213, ["d6"]=214, ["d7"]=215, ["d8"]=216, ["d9"]=217, ["da"]=218, ["db"]=219, ["dc"]=220, ["dd"]=221, ["de"]=222, ["df"]=223, ["e0"]=224, ["e1"]=225, ["e2"]=226, ["e3"]=227, ["e4"]=228, ["e5"]=229, ["e6"]=230, ["e7"]=231, ["e8"]=232, ["e9"]=233, ["ea"]=234, ["eb"]=235, ["ec"]=236, ["ed"]=237, ["ee"]=238, ["ef"]=239, ["f0"]=240, ["f1"]=241, ["f2"]=242, ["f3"]=243, ["f4"]=244, ["f5"]=245, ["f6"]=246, ["f7"]=247, ["f8"]=248, ["f9"]=249, ["fa"]=250, ["fb"]=251, ["fc"]=252, ["fd"]=253, ["fe"]=254, ["ff"]=255 } local function iptobytes(ipaddr) local bytes = { ipaddr:match("(%d+)%.(%d+)%.(%d+)%.(%d+)") } if not #bytes == 4 then dprint("failed to get ip address bytes for '", ipaddr, "'") return end local ip = "" for i, byte in ipairs(bytes) do ip = ip .. char(tonumber(byte)) end return ip end local function hexword2bin(word) if #word == 4 then return char(hexbin[word:sub(1,2)], hexbin[word:sub(3,4)]) elseif #word == 3 then return char(hexbin[word:sub(1,1)], hexbin[word:sub(2,3)]) elseif #word < 3 then return char(0, hexbin[word]) end return nil -- error end -- convert this 2620:0:60:8ac::102 to its 16-byte binary (=8 of 2-byte words) local NUMWORDS = 8 local function ipv6tobytes(ipaddr) -- start with all 16 bytes being zeroes local words = { "\00\00", "\00\00", "\00\00", "\00\00", "\00\00", "\00\00", "\00\00", "\00\00" } -- now walk from front of ipv6 address string replacing byte numbers above; -- if we hit a "::", then jump to end and do it in reverse local colon_s, colon_e = ipaddr:find("::%x") if colon_s then -- there's a double-colon, so split the string and do the end first, backwards -- get each chunk first local t = {} local index, wordix = 1, NUMWORDS for w in string.gmatch(ipaddr:sub(colon_e - 1), ":(%x+)") do t[index] = hexword2bin(w) index = index + 1 end for ix=index-1, 1, -1 do words[wordix] = t[ix] wordix = wordix - 1 end ipaddr = ipaddr:sub(1, colon_s) end local i = 1 for w in string.gmatch(ipaddr, "(%x+):?") do words[i] = hexword2bin(w) i = i + 1 end if not #words == NUMWORDS then dprint("failed to get IPv6 address bytes for '", ipaddr, "'") return end return table.concat(words) end -- calculates checksum as done for IP, TCP, UDP local function checksum(chunk) local sum = 0 -- take every 2-byte value and add them up for one, two in chunk:gmatch("(.)(.)") do sum = sum + (string.byte(one) * 256) + (string.byte(two)) while floor(sum / 65536) > 0 do -- add carry/overflow value sum = (sum % 65536) + (floor(sum / 65536)) end end -- now get one's complement of that sum = 65535 - sum -- and return it as a 2-byte string return dec2bin16(sum) end ---------------------------------------- -- protocol type number local PROTO_UDP = "\17" local PROTO_TCP = "\06" -- enum local IPv4 = 1 local IPv6 = 2 -- both type enums and header lengths local UDP = 8 local TCP = 20 ---------------------------------------- -- Packet creation/serialization occurs using a Lua class object model -- There's a single base class 'Packet' which has data/methods every packet type has -- 'RawPacket' and 'DataPacket' both derive from 'Packet'. -- 'RawPacket' is for packets which the log file has the raw IP/UDP headers for, -- such as ALG log messages (MGCP/NCS). Since the IP headers are in them, we use those. -- 'DataPacket' is for packets which the log file only has payload data for, and -- we need to create fake IP/UDP or IP/TCP headers for. -- 'BinPacket' and'AsciiPacket' both derive from 'DataPacket'. -- 'BinPacket' is for binary-style logged packets, such as MBCD or DNS, while -- 'AsciiPacket' is for ascii-style ones such as SIP. -- 'DnsPacket' derives from 'BinPacket', for DNS-style logs. -- Each class has a read_data() method, which reads in the packet data, builds the packet, -- and sets the Wireshark buffer. Some classes have a get_data() method which read_data() -- calls, to get the payload data before building a fake packet. -- The base Packet class has a get_hex_data() and get_ascii_data() methods, to get the payload -- in either form, and those base methods are called by get_data() or read_data() of derived -- classes. -- For performance reasons, packet data is read line-by-line into a table (called bufftbl), -- which is concatenated at the end. This avoids Lua building interim strings and garbage -- collecting them. But it makes the code uglier. The get_data()/get_hex_data()/get_ascii_data() -- methods read into this table they get passed, while the read_data() functions handle managing -- the table. ---------------------------------------- ---------------------------------------- -- The base Packet class, from which others derive -- all Packets have a ptype, timestamp, source and dest address:port, and data -- local Packet = {} local Packet_mt = { __index = Packet } function Packet.new(state, timestamp, direction, source_ip, source_port, dest_ip, dest_port, ptype, ttype, file_position) local new_class = { -- the new instance ["state"] = state, ["timestamp"] = timestamp, ["direction"] = direction, ["source_ip"] = source_ip, ["source_port"] = source_port, ["dest_ip"] = dest_ip, ["dest_port"] = dest_port, ["ptype"] = ptype, ["ttype"] = ttype, ["file_position"] = file_position } setmetatable( new_class, Packet_mt ) -- all instances share the same metatable return new_class end function Packet:set_comment(comment) self["comment"] = comment end function Packet:set_wslua_fields(frame) frame.time = self.timestamp frame.rec_type = wtap_rec_types.PACKET frame.flags = wtap_presence_flags.TS -- for timestamp if self.comment then frame.comment = self.comment end return true end local packet_hexline_pattern = "^ %x%x%x0: %x%x" function Packet:get_hex_data(file, line, bufftbl, index) local start = index dprint2("Packet:get_hex_data() called") repeat for word in line:gmatch("(%x%x) ") do bufftbl[index] = char(hexbin[word]) index = index + 1 if ((index - start) % 16) == 0 then break end end line = file:read() until not line or not line:find(packet_hexline_pattern) return index - start, line end function Packet:get_ascii_data(file, line, bufftbl, index, only_newline) local bufflen = 0 -- keep tally of total length of payload local found_delim = true dprint2("Packet:get_ascii_data() called") repeat bufftbl[index] = line bufflen = bufflen + #line -- sanity check if line has "\r" at end, and if so only add \n if line:find("\r",-1,true) then bufftbl[index+1] = "\n" bufflen = bufflen + 1 dprint2("Found carriage-return at end of line") elseif only_newline then -- only add a newline bufftbl[index+1] = "\n" bufflen = bufflen + 1 else bufftbl[index+1] = "\r\n" bufflen = bufflen + 2 end index = index + 2 -- read next line now line = file:read() if not line then -- hit eof? found_delim = false break end until line:find(delim) -- get rid of last \r\n, if we found a dashed delimiter, as it's not part of packet if found_delim then bufflen = bufflen - bufftbl[index-1]:len() bufftbl[index-1] = nil end dprint2("Packet:get_ascii_data() returning", bufflen) return bufflen end ---------------------------------------- -- RawPacket class, for packets that the log file contains the whole IP header for, such as algd logs -- local RawPacket = {} local RawPacket_mt = { __index = RawPacket } setmetatable( RawPacket, Packet_mt ) -- make RawPacket inherit from Packet function RawPacket.new(...) local new_class = Packet.new(...) -- the new instance setmetatable( new_class, RawPacket_mt ) -- all instances share the same metatable return new_class end function RawPacket:read_data(file, frame, line, seeking) local bufftbl = {} -- table to hold data bytes local index = 1 -- start at first slot in array -- need to skip "Packet:" line and first 0000: line, it's internal junk line = file:read() line = file:read() dprint2("RawPacket:read_data() getting hex from line='", line, "'") local bufflen, line = self:get_hex_data(file, line, bufftbl, index) if not bufflen or bufflen < 21 then dprint("error getting binary data") return false end -- add remainder as more packet data, but first delete overlap -- see if frag bits are set in IP header, to see if UDP/TCP header exists if self.ptype == IPv4 then -- grab byte with frag flags and first byte of offset local flag = string.byte(bufftbl[7]) -- converts binary character to number local frag_offset = flag % 32 -- masks off upper 3 bits frag_offset = (frag_offset * 256) + string.byte(bufftbl[8]) flag = floor(flag / 224) -- shift right flag = flag % 2 -- mask upper bits if flag == 1 or frag_offset > 0 then -- we have a fragmented IPv4 packet, so no proto header -- only save first 20 bytes (the IP header) for i=bufflen, 21, -1 do bufftbl[i] = nil end bufflen = 20 else -- only save first 20 + proto size bytes local save if bufftbl[10] == PROTO_UDP then save = 28 elseif bufftbl[10] == PROTO_TCP then save = 40 else dprint("failed to fix raw packet overlap") return end for i=bufflen, save+1, -1 do bufftbl[i] = nil end bufflen = save end end -- TODO: IPv6 -- now read in rest of message, if any -- first skip extra empty newline if #line == 0 then line = file:read() end bufflen = bufflen + self:get_ascii_data(file, line, bufftbl, bufflen+1, true) frame.data = table.concat(bufftbl) return true end ---------------------------------------- -- DataPacket class, for packets that the log file contains just the payload data for -- local DataPacket = {} local DataPacket_mt = { __index = DataPacket } setmetatable( DataPacket, Packet_mt ) -- make DataPacket inherit from Packet function DataPacket.new(...) local new_class = Packet.new(...) -- the new instance setmetatable( new_class, DataPacket_mt ) -- all instances share the same metatable return new_class end function DataPacket:set_tcbkey(key) self["tcbkey"] = key return end function DataPacket:build_ipv4_hdr(bufflen, proto, seeking) local len = bufflen + 20 -- 20 byte IPv4 header size -- figure out the ip identification value local ip_ident if seeking then ip_ident = self.state.packets[self.file_position][IP_IDENT] else -- increment ident value self.state.ip_ident = self.state.ip_ident + 1 if self.state.ip_ident == 65536 then self.state.ip_ident = 1 end ip_ident = self.state.ip_ident -- save it for future seeking self.state.packets[self.file_position][IP_IDENT] = ip_ident end -- use a table to concatenate as it's slightly faster that way local hdrtbl = { "\69\00", -- 1=ipv4 and 20 byte header length dec2bin16(len), -- 2=packet length bytes dec2bin16(ip_ident), -- 3=ident field bytes "\00\00\64", -- 4=flags/fragment offset, ttl proto, -- 5=proto "\00\00", -- 6=checksum (using zero for now) iptobytes(self.source_ip), -- 7=source ip iptobytes(self.dest_ip) -- 8=dest ip } -- calc IPv4 header checksum, and set its value hdrtbl[6] = checksum(table.concat(hdrtbl)) return table.concat(hdrtbl) end function DataPacket:build_ipv6_hdr(bufflen, proto) -- use a table to concatenate as it's slightly faster that way local hdrtbl = { "\96\00\00\00", -- 1=ipv6 version, class, label dec2bin16(bufflen), -- 2=packet length bytes proto .. "\64", -- 4=proto, ttl ipv6tobytes(self.source_ip), -- 5=source ip ipv6tobytes(self.dest_ip) -- 6=dest ip } return table.concat(hdrtbl) end -- calculates TCP/UDP header checksums with pseudo-header info function DataPacket:calc_header_checksum(bufftbl, bufflen, hdrtbl, proto) -- first create pseudo IP header if self.ptype == IPv4 then local iphdrtbl = { iptobytes(self.source_ip), -- 1=source ip iptobytes(self.dest_ip), -- 2=dest ip "\00", -- zeros proto, -- proto dec2bin16(bufflen) -- payload length bytes } bufftbl[1] = table.concat(iphdrtbl) elseif self.ptype == IPv6 then local iphdrtbl = { ipv6tobytes(self.source_ip), -- 1=source ip ipv6tobytes(self.dest_ip), -- 2=dest ip "\00\00", -- zeroes dec2bin16(bufflen), -- payload length bytes "\00\00\00", -- zeros proto -- proto } bufftbl[1] = table.concat(iphdrtbl) end -- and pseudo TCP or UDP header bufftbl[2] = table.concat(hdrtbl) -- see if payload is odd length local odd = false if bufflen % 2 == 1 then -- odd number of payload bytes, add zero byte at end odd = true -- remember to undo this bufftbl[#bufftbl+1] = "\00" end local result = checksum(table.concat(bufftbl)) -- remove pseudo-headers bufftbl[1] = nil bufftbl[2] = nil if odd then bufftbl[#bufftbl] = nil end return result end function DataPacket:build_udp_hdr(bufflen, bufftbl) local len = bufflen + 8 -- 8 for size of UDP header local hdrtbl = { dec2bin16(self.source_port), -- 1=source port bytes dec2bin16(self.dest_port), -- 2=dest port bytes dec2bin16(len), -- 3=payload length bytes "\00\00" -- 4=checksum } if bufftbl then -- calc udp checksum (only done for IPv6) hdrtbl[4] = self:calc_header_checksum(bufftbl, len, hdrtbl, PROTO_UDP) end return table.concat(hdrtbl) end function DataPacket:build_tcp_hdr(bufflen, bufftbl, seeking) local len = bufflen + 20 -- 20 for size of TCP header local local_seq, remote_seq if seeking then local_seq = self.state.packets[self.file_position][LOCAL_SEQ] remote_seq = self.state.packets[self.file_position][REMOTE_SEQ] else -- find socket/tcb info for this "stream", create if not found if not self.state.tcb[self.tcbkey] then -- create them self.state.tcb[self.tcbkey] = {} local_seq = 1 remote_seq = 1 self.state.packets[self.file_position][LOCAL_SEQ] = 1 self.state.packets[self.file_position][REMOTE_SEQ] = 1 -- set tcb to next sequence numbers, so that the correct "side" -- acknowledges receiving these bytes if self.direction == SENT then -- this packet is being sent, so local sequence increases next time self.state.tcb[self.tcbkey][TLOCAL_SEQ] = bufflen+1 self.state.tcb[self.tcbkey][TREMOTE_SEQ] = 1 else -- this packet is being received, so remote sequence increases next time -- and local side will acknowldge it next time self.state.tcb[self.tcbkey][TLOCAL_SEQ] = 1 self.state.tcb[self.tcbkey][TREMOTE_SEQ] = bufflen+1 end else -- stream already exists, so send the current tcb seqs and update for next time if self.direction == SENT then -- this packet is being sent, so local sequence increases next time local_seq = self.state.tcb[self.tcbkey][TLOCAL_SEQ] remote_seq = self.state.tcb[self.tcbkey][TREMOTE_SEQ] self.state.tcb[self.tcbkey][TLOCAL_SEQ] = local_seq + bufflen else -- this packet is being received, so the "local" seq number of the packet is the remote's seq really local_seq = self.state.tcb[self.tcbkey][TREMOTE_SEQ] remote_seq = self.state.tcb[self.tcbkey][TLOCAL_SEQ] -- and remote seq needs to increase next time (remember local_seq is TREMOTE_SEQ) self.state.tcb[self.tcbkey][TREMOTE_SEQ] = local_seq + bufflen end self.state.packets[self.file_position][LOCAL_SEQ] = local_seq self.state.packets[self.file_position][REMOTE_SEQ] = remote_seq end end local hdrtbl = { dec2bin16(self.source_port), -- 1=source port bytes dec2bin16(self.dest_port), -- 2=dest port bytes dec2bin32(local_seq), -- 3=sequence dec2bin32(remote_seq), -- 4=ack number "\80\16\255\255", -- 5=offset, flags, window size "\00\00", -- 6=checksum "\00\00" -- 7=urgent pointer } -- calc tcp checksum hdrtbl[6] = self:calc_header_checksum(bufftbl, len, hdrtbl, PROTO_TCP) return table.concat(hdrtbl) end function DataPacket:build_packet(bufftbl, bufflen, seeking) dprint2("DataPacket:build_packet() called with ptype=",self.ptype) if self.ptype == IPv4 then if self.ttype == UDP then bufftbl[2] = self:build_udp_hdr(bufflen) bufftbl[1] = self:build_ipv4_hdr(bufflen + 8, PROTO_UDP, seeking) elseif self.ttype == TCP then bufftbl[2] = self:build_tcp_hdr(bufflen, bufftbl, seeking) bufftbl[1] = self:build_ipv4_hdr(bufflen + 20, PROTO_TCP, seeking) end elseif self.ptype == IPv6 then -- UDP for IPv6 requires checksum calculation, so we can't avoid more work if self.ttype == UDP then bufftbl[2] = self:build_udp_hdr(bufflen, bufftbl) bufftbl[1] = self:build_ipv6_hdr(bufflen + 8, PROTO_UDP) elseif self.ttype == TCP then bufftbl[2] = self:build_tcp_hdr(bufflen, bufftbl, seeking) bufftbl[1] = self:build_ipv6_hdr(bufflen + 20, PROTO_TCP) end else dprint("DataPacket:build_packet: invalid packet type (neither IPv4 nor IPv6)") return nil end return table.concat(bufftbl) end -- for performance, we read each line into a table and concatenate it at end -- but it makes this code super ugly function DataPacket:read_data(file, frame, line, seeking) local bufftbl = { "", "" } -- 2 slots for ip and udp/tcp headers local index = 3 -- start at third slot in array local comment -- for any packet comments dprint2("DataPacket: read_data(): calling get_data") local bufflen = self:get_data(file, line, bufftbl, index) if not bufflen then dprint("DataPacket: error getting ascii or binary data") return false end local buff = self:build_packet(bufftbl, bufflen, seeking) frame.data = buff return true end ---------------------------------------- -- BinPacket class, for packets that the log file contains binary payload data for, such as MBCD -- local BinPacket = {} local BinPacket_mt = { __index = BinPacket } setmetatable( BinPacket, DataPacket_mt ) -- make BinPacket inherit from DataPacket function BinPacket.new(...) local new_class = DataPacket.new(...) -- the new instance setmetatable( new_class, BinPacket_mt ) -- all instances share the same metatable return new_class end function BinPacket:get_comment_data(file, line, stop_pattern) local comments = {} while line and not line:find(stop_pattern) do if #line > 0 then comments[#comments+1] = line comments[#comments+1] = "\r\n" end line = file:read() end if #comments > 0 then -- get rid of extra "\r\n" comments[#comments] = nil self:set_comment(table.concat(comments)) end return line end function BinPacket:get_data(file, line, bufftbl, index) local is_alg = false local bufflen, line = self:get_hex_data(file, line, bufftbl, index) -- now eat rest of message until delimiter or end of file -- we'll put them in comments line = self:get_comment_data(file, line, delim) -- return the bufflen, which is the same as number of table entries we made return bufflen end ---------------------------------------- -- DnsPacket class, for DNS packets (which are binary but with comments at top) -- local DnsPacket = {} local DnsPacket_mt = { __index = DnsPacket } setmetatable( DnsPacket, BinPacket_mt ) -- make DnsPacket inherit from BinPacket function DnsPacket.new(...) local new_class = BinPacket.new(...) -- the new instance setmetatable( new_class, DnsPacket_mt ) -- all instances share the same metatable return new_class end local binpacket_start_pattern = "^ 0000: %x%x %x%x %x%x %x%x %x%x %x%x %x%x %x%x " function DnsPacket:get_data(file, line, bufftbl, index) -- it's UDP regardless of what parse_header() thinks self.ttype = UDP -- comments are at top instead of bottom of message line = self:get_comment_data(file, line, binpacket_start_pattern) local bufflen, line = self:get_hex_data(file, line, bufftbl, index) -- now eat rest of message until delimiter or end of file while line and not line:find(delim) do line = file:read() end -- return the bufflen, which is the same as number of table entries we made return bufflen end ---------------------------------------- -- AsciiPacket class, for packets that the log file contains ascii payload data for -- local AsciiPacket = {} local AsciiPacket_mt = { __index = AsciiPacket } setmetatable( AsciiPacket, DataPacket_mt ) -- make AsciiPacket inherit from DataPacket function AsciiPacket.new(...) local new_class = DataPacket.new(...) -- the new instance setmetatable( new_class, AsciiPacket_mt ) -- all instances share the same metatable return new_class end function AsciiPacket:get_data(file, line, bufftbl, index) return self:get_ascii_data(file, line, bufftbl, index) end ---------------------------------------- -- To determine packet type, we peek at the first line of 'data' following the log -- message header. Its pattern determines the Packet object type. -- The following are the patterns we look for; if it doesn't match one of these, -- then it's an AsciiPacket: local packet_patterns = { { "^ 0000: %x%x %x%x %x%x %x%x %x%x %x%x %x%x %x%x ", BinPacket }, { "^Packet:$", RawPacket }, { "^DNS Query %d+ flags=%d+ q=%d+ ans=%d+", DnsPacket }, { "^DNS Response %d+ flags=%d+ q=%d+ ans=%d+", DnsPacket } } -- indeces for above local PP_PATTERN = 1 local PP_CLASS = 2 local function get_packet_class(line) for i, t in ipairs(packet_patterns) do if line:find(t[PP_PATTERN]) then dprint2("got class type=",i) return t[PP_CLASS] end end dprint2("got class type AsciiPacket") return AsciiPacket end ---------------------------------------- -- parses header line -- returns nil on failure -- the header lines look like this: -- Aug 10 14:30:11.134 On [1:544]10.201.145.237:5060 received from 10.210.1.193:5060 -- this one has no phy/vlan info in brackets: -- Mar 6 13:39:06.122 On 127.0.0.1:2945 sent to 127.0.0.1:2944 -- this one is IPv6: -- Aug 10 14:30:11.140 On [3:0][2620:0:60:8ac::102]:5060 sent to [2620:0:60:8ab::12]:5060 -- this is from a tail'ed log output: -- 52:22.434 On [0:0]205.152.56.211:5060 received from 205.152.56.75:5060 local loopback_pattern = "^127%.0%.0%.%d+$" local function parse_header(state, file, line, file_position, seeking) if seeking then -- verify we've seen this packet before if not state.packets[file_position] then dprint("parse_header: packet at file position ", file_position, " not saved previously") return end else -- first time through, create sub-table for the packet state.packets[file_position] = {} end -- get time info, and line match ending position local timestamp, line_pos = state:get_timestamp(line, file_position, seeking) if not timestamp then -- see if it's a tail'ed log instead timestamp, line_pos = state:get_tail_time(line, file_position, seeking) end if not timestamp then dprint("parse_header: could not parse time portion") return end local ptype, ttype = IPv4, UDP -- get phy/vlan if present -- first skip past time portion local phy, vlan, i, j, k line_pos = line_pos + 1 i, j, phy, vlan = line:find(header_phy_pattern, line_pos) if i then phy = tonumber(phy) vlan = tonumber(vlan) line_pos = j -- skip past this portion for next match else -- if there's no phy/vlan info, then assume it's TCP (unless it's loopback address we'll check later) ttype = TCP end -- get addresses and direction local local_ip, local_port, direction, remote_ip, remote_port = line:match(header_address_pattern, line_pos) if not local_ip then -- try IPv6 local_ip, local_port, direction, remote_ip, remote_port = line:match(header_v6address_pattern, line_pos) if not local_ip then dprint("parse_header: could not parse address portion") return nil end ptype = IPv6 end if local_ip:find(loopback_pattern) and remote_ip:find(loopback_pattern) then -- internal loopback packets never have phy/vlan but are always UDP messages (for all intents) ttype = UDP end -- override above decisions based on configuration if ALWAYS_UDP then ttype = UDP end direction = get_direction(direction) if direction == nil then dprint("parse_header: failed to convert direction") return nil end local source_ip, source_port, dest_ip, dest_port = local_ip, local_port, remote_ip, remote_port if direction == RECV then -- swap them source_ip, source_port, dest_ip, dest_port = remote_ip, remote_port, local_ip, local_port end -- convert source_port = tonumber(source_port) dest_port = tonumber(dest_port) -- peek at next line to determine packet type local position = file:seek() line = file:read() dprint2("parse_header: peeking at line='", line, "'") packet_class = get_packet_class(line) file:seek("set", position) -- go back dprint2("parse_header calling packet_class.new with:", tostring(timestamp), direction, source_ip, source_port, dest_ip, dest_port, ptype, ttype, file_position) local packet = packet_class.new(state, timestamp, direction, source_ip, source_port, dest_ip, dest_port, ptype, ttype, file_position) if not packet then dprint("parse_header: parser failed to create Packet object") end if ttype == TCP then -- if the packet is tcp type, then set the key for TCB table lookup packet:set_tcbkey(table.concat({ "[", local_ip, "]:", local_port, "->[", remote_ip, "]:", remote_port })) end return packet end ---------------------------------------- -- file handling functions for Wireshark to use -- The read_open is called by Wireshark once per file, to see if the file is this reader's type. -- It passes in (1) a File and (2) CaptureInfo object to this function -- Since there is no exact magic sequence to search for, we have to use heuristics to guess if the file -- is our type or not, which we do by parsing a message header. -- Since Wireshark uses the file cursor position for future reading of this file, we also have to seek back to the beginning -- so that our normal read() function works correctly. local function read_open(file, capture) dprint2("read_open called") -- save current position to return later local position = file:seek() local line = file:read() if not line then return false end dprint2("read_open: got this line begin:\n'", line, "'") line, position = skip_ahead(file, line, position) if not line then return false end dprint2("read_open: got this line after skip:\n'", line, "', with position=", position) local state = State.new() if parse_header(state, file, line, position) then dprint2("read_open success") file:seek("set",position) capture.time_precision = wtap_filetypes.TSPREC_MSEC -- for millisecond precision capture.encap = wtap.RAW_IP -- whole file is raw IP format capture.snapshot_length = 0 -- unknown snaplen capture.comment = "Oracle Acme Packet SBC message log" capture.os = "VxWorks or Linux" capture.hardware = "Oracle Acme Packet SBC" -- reset state variables capture.private_table = State.new() dprint2("read_open returning true") return true end dprint2("read_open returning false") return false end ---------------------------------------- -- this is used by both read() and seek_read() local function read_common(funcname, file, capture, frame, position, seeking) dprint2(funcname, "read_common called") local state = capture.private_table if not state then dprint(funcname, "error getting capture state") return false end local line = file:read() if not line then dprint(funcname, "hit end of file") return false end line, position = skip_ahead(file, line, position) if not line then if file:read(0) ~= nil then dprint(funcname, "did not hit end of file after skipping but ending anyway") else dprint2(funcname, "hit end of file after skipping") end return false end dprint2(funcname, ": parsing line='", line, "'") local phdr = parse_header(state, file, line, position, seeking) if not phdr then dprint(funcname, "failed to parse header") return false end line = file:read() dprint2(funcname,": calling class object's read_data()") phdr:read_data(file, frame, line, seeking) if not phdr:set_wslua_fields(frame) then dprint(funcname, "failed to set Wireshark packet header info") return end dprint2(funcname, "read_common returning position") return position end ---------------------------------------- -- Wireshark/tshark calls read() for each frame/record in the file -- It passes in (1) a File, (2) CaptureInfo, and (3) a FrameInfo object to this function -- It expects in return the file offset position the record starts at, -- or nil/false if there's an error or end-of-file is reached. -- The offset position is used later: wireshark remembers it and gives -- it to seek_read() at various random times local function read(file, capture, frame) dprint2("read called") local position = file:seek() position = read_common("read", file, capture, frame, position) if not position then if file:read(0) ~= nil then dprint("read failed to call read_common") else dprint2("read: reached end of file") end return false end return position end ---------------------------------------- -- Wireshark/tshark calls seek_read() for each frame/record in the file, at random times -- It passes in (1) File, (2) CaptureInfo, (3) FrameInfo, and (4) the offset position number -- It expects in return true for successful parsing, or nil/false if there's an error. local function seek_read(file, capture, frame, offset) dprint2("seek_read called") file:seek("set",offset) if not read_common("seek_read", file, capture, frame, offset, true) then dprint("seek_read failed to call read_common") return false end return true end ---------------------------------------- -- Wireshark/tshark calls read_close() when it's closing the file completely -- It passes in (1) a File and (2) CaptureInfo object to this function -- this is a good opportunity to clean up any state you may have created during -- file reading. -- In our case there *is* state to reset, but we only saved it in -- the capture.private_table, so Wireshark will clean it up for us. local function read_close(file, capture) dprint2("read_close called") return true end ---------------------------------------- -- An often unused function, Wireshark calls this when the sequential walk-through is over -- It passes in (1) a File and (2) CaptureInfo object to this function -- (i.e., no more calls to read(), only to seek_read()). -- In our case there *is* some state to reset, but we only saved it in -- the capture.private_table, so Wireshark will clean it up for us. local function seq_read_close(file, capture) dprint2("seq_read_close called") return true end -- set above functions to the FileHandler fh.read_open = read_open fh.read = read fh.seek_read = seek_read fh.read_close = read_close fh.seq_read_close = seq_read_close fh.extensions = "log" -- this is just a hint -- and finally, register the FileHandler! register_filehandler(fh)
Lua
wireshark/test/lua/add_packet_field.lua
--[[ The tree:add_packet_field() method returns a value and offset in addition to a tree item. This file tests whether the value and offset are correct. As for the value, its correctness is tested in several ways for a given input. 1. The returned value should match a precomputed value 2. The returned value should match the value obtained from a Field object right after tree:add_packet_field() is called 3. The returned value should match the value obtained from a Field object right after tree:add() is called with the same input as tree:add_packet_field() 4. The returned value should match the value obtained from the corresponding value function called on the input tvbrange There are some incompatibilties and limitations due to handling of encodings. Incompatibilities are noted with the text INCOMPATIBILITY in a nearby comment. ]] local field_setup = require "field_setup" --[[ This dissector expects a capture with at least one packet on UDP 65333. All the actual test data is synthetic. ]]-- local myproto = Proto("test", "Test") field_data = field_setup(myproto, "test") function hexlify_string(s) local sep = "" local hx = "" for i=1,#s do hx = hx .. sep .. string.format("%02x", s:byte(i)) sep = " " end return hx end --[[ Ensure the value is represented in a way that shows up when printed. It is assumed the string representation is relatively short. The test suite will report an error if we print invalid utf8 for any reason. We work around this by passing a substitution string used when the real string has invalid utf8. We also print the output bytes in hex after the string, and those bytes are always faithful to the real output. ]]-- function format_value_for_print(v, substitution) local t = type(v) local s if t == "string" then local hx = hexlify_string(v) if substitution ~= nil then s = string.format("(invalid utf8) \"%s\" [%s]", substitution, hx) else s = string.format("\"%s\" [%s]", v, hx) end else s = tostring(v) end return string.format("(%s) %s", type(v), s) end function format_encoding_for_print(enc) local char_enc = "ASCII" if bit.band(enc, ENC_UTF_16) ~= 0 then char_enc = "UTF-16" end local enc_enc = "BE" if bit.band(enc, ENC_LITTLE_ENDIAN) ~= 0 then end_enc = "LE" end if enc == ENC_ISO_8601_DATE_TIME then char_enc = "ISO_8601" end_enc = "-" end return string.format("%s %s", char_enc, end_enc) end function print_test_data(test_data) print(string.format("TEST: using field type: %s", test_data.field_type)) if test_data.hexlify then print(string.format("TEST: input was hexlified from: \"%s\"", test_data.original_input)) end print(string.format("TEST: using data: [%s]", test_data.input)) print(string.format("TEST: using offset: %d", test_data.input_offset)) print(string.format("TEST: using encoding: %s", format_encoding_for_print(test_data.encoding))) print() end function general_equality_test(a, b) return a == b end --equal or both nan function float_equality_test(a, b) return a == b or (a ~= a and b ~= b) end function recent_field_value(t) local values = {field_data[t].value_field()} return values[#values].value end function add_packet_field_returns_precomputed_value(test_data) print(string.format(" EXPECT: precomputed return value: %s", format_value_for_print(test_data.expect_precomputed))) print(string.format(" OUTPUT: add_packet_field returned value: %s", format_value_for_print(test_data.returned_value))) if test_data.equality_function(test_data.returned_value, test_data.expect_precomputed) then print(" PASS: the return value is correct") print() return true end print(" FAIL: the returned value is incorrect") print() return false end function add_packet_field_then_value_field_returns_expected_value(test_data) print(string.format(" EXPECT: value field value %s", format_value_for_print(test_data.expect_add_pf_field_value))) print(string.format(" OUTPUT: value field after tree:add_packet_field() returned: %s", format_value_for_print(test_data.returned_add_pf_field_value))) local incompatible = test_data.expect_add_pf_field_value ~= test_data.expect_precomputed if incompatible then print(" WARNING: the value field does not return the same value as the other implementations") end if test_data.equality_function(test_data.returned_add_pf_field_value, test_data.expect_add_pf_field_value) then print(" PASS: the value field is correct") print() return true end print(" FAIL: the value field is incorrect") print() return false end function tree_add_then_value_field_returns_expected_value(test_data) if test_data.skip_tree_add_test then print(" SKIP: " .. test_data.skip_tree_add_test_message) print() return true end print(string.format(" EXPECT: value field value %s", format_value_for_print(test_data.expect_add_field_value))) print(string.format(" OUTPUT: value field after tree:add() returned: %s", format_value_for_print(test_data.returned_add_field_value))) local incompatible = test_data.expect_add_field_value ~= test_data.expect_precomputed if incompatible then print(" WARNING: the value field does not return the same value as the other implementations") end if test_data.equality_function(test_data.returned_add_field_value, test_data.expect_add_field_value) then print(" PASS: the value field is correct") print() return true end print(" FAIL: the value field is incorrect") print() return false end --[[ The tvbrange:string() function can return invalid utf8 even when the input is valid. ]] function tvbrange_returns_expected_value(test_data) if test_data.tvbr_fn == nil then print(" SKIP: no tvbrange function for this field type") print() return true end local tvbr_value, tvbr_fn_printable = test_data.tvbr_fn(test_data.input_tvbrange, test_data.encoding) local pass = test_data.equality_function(tvbr_value, test_data.expect_tvbrange_value) local incompatible = test_data.expect_tvbrange_value ~= test_data.expect_precomputed local tvbr_value_printable = format_value_for_print(tvbr_value) local expect_value_printable = format_value_for_print(test_data.expect_tvbrange_value, test_data.expect_tvbrange_value_printable) if pass then --if the outputs are equal, then the substitute is useable for both tvbr_value_printable = format_value_for_print(tvbr_value, test_data.expect_tvbrange_value_printable) end print(string.format(" TEST: using tvbrange function %s", tvbr_fn_printable)) print(string.format(" EXPECT: tvbrange value %s", expect_value_printable)) print(string.format(" OUTPUT: tvbrange returned %s", tvbr_value_printable)) if incompatible then print(" WARNING: the tvbr function is not compatible with the other implementations") end if pass then print(" PASS: the the tvbr function works as expected") print() return true end print(" FAIL: the the tvbr function works as expected") print() return false end function add_packet_field_returns_correct_offset(test_data) print(string.format(" EXPECT: offset %d", test_data.expect_offset)) print(string.format(" OUTPUT: add_packet_field returned offset %d", test_data.returned_offset)) if test_data.returned_offset == test_data.expect_offset then print(" PASS: the returned offset is correct") print() return true end print(" FAIL: the returned offset is incorrect") print() return false end function add_packet_field_all_tests(tree, test_data) print_test_data(test_data) local ret = true and add_packet_field_returns_precomputed_value(test_data) and add_packet_field_then_value_field_returns_expected_value(test_data) and tree_add_then_value_field_returns_expected_value(test_data) and tvbrange_returns_expected_value(test_data) and add_packet_field_returns_correct_offset(test_data) return ret end function generate_test_data_for_case(tree, field_type, case, tvbr_fn, equality_function, use_offset) local input = case.input if case.hexlify then input = hexlify_string(case.input) end local input_byte_length = string.len(input:gsub(" ", "")) / 2 local input_offset = 0 if use_offset then input = "77 " .. input input_offset = 1 end local input_tvb = ByteArray.new(input):tvb() local input_tvbrange if case.fake_input_length == nil then input_tvbrange = input_tvb(input_offset, input_byte_length) else input_tvbrange = input_tvb(input_offset, case.fake_input_length) end local t = field_data[field_type] local add_pf_leaf, returned_value, returned_offset = tree:add_packet_field(t.packet_field, input_tvbrange, case.encoding) local add_pf_field_value = recent_field_value(field_type) local add_leaf = nil local add_field_value = nil local skip_tree_add_test_message = nil local skip_tree_add_test = false if case.encoding == ENC_ASCII + ENC_BIG_ENDIAN then add_leaf = tree:add(t.packet_field, input_tvbrange) add_field_value = recent_field_value(field_type) elseif case.encoding == ENC_ASCII + ENC_LITTLE_ENDIAN then add_leaf = tree:add_le(t.packet_field, input_tvbrange) add_field_value = recent_field_value(field_type) else skip_tree_add_test = true skip_tree_add_test_message = "tree:add() only uses ASCII encoding" end local expect_add_pf_field_value = case.output if case.incompatible_add_pf_field then expect_add_pf_field_value = case.expect_add_pf_field_value end local expect_add_field_value = case.output if case.incompatible_add_field then expect_add_field_value = case.expect_add_field_value end local expect_tvbrange_value = case.output if case.incompatible_tvbrange then expect_tvbrange_value = case.expect_tvbrange_value end local expect_offset = input_byte_length + input_offset if case.variable_input_length then expect_offset = case.input_length + input_offset end return { field_type = field_type, hexlify = case.hexlify, original_input = case.input, input = input, input_offset = input_offset, input_tvbrange = input_tvbrange, encoding = case.encoding, returned_value = returned_value, returned_offset = returned_offset, returned_add_pf_field_value = add_pf_field_value, returned_add_field_value = add_field_value, tvbr_fn = tvbr_fn, equality_function = equality_function, expect_precomputed = case.output, expect_add_pf_field_value = expect_add_pf_field_value, expect_add_field_value = expect_add_field_value, skip_tree_add_test = skip_tree_add_test, skip_tree_add_test_message = skip_tree_add_test_message, expect_tvbrange_value = expect_tvbrange_value, expect_tvbrange_value_printable = case.expect_tvbrange_value_printable, expect_offset = expect_offset } end function run_test_cases_all_tests(tree, field_type, test_cases, tvbr_fn, equality_function) local test_data for _ , case in ipairs(test_cases) do test_data = generate_test_data_for_case(tree, field_type, case, tvbr_fn, equality_function, true) if not add_packet_field_all_tests(tree, test_data) then return false end test_data = generate_test_data_for_case(tree, field_type, case, tvbr_fn, equality_function, false) if not add_packet_field_all_tests(tree, test_data) then return false end end return true end function simple_integer_tests(tree) local uint8_test_cases = { {input = "ff", encoding = ENC_LITTLE_ENDIAN, output = 0xff}, {input = "00", encoding = ENC_LITTLE_ENDIAN, output = 0x00}, {input = "ff", encoding = ENC_BIG_ENDIAN, output = 0xff}, {input = "00", encoding = ENC_BIG_ENDIAN, output = 0x00}, } local uint16_test_cases = { {input = "ff 00", encoding = ENC_LITTLE_ENDIAN, output = 0x00ff}, {input = "00 ff", encoding = ENC_LITTLE_ENDIAN, output = 0xff00}, {input = "ff 00", encoding = ENC_BIG_ENDIAN, output = 0xff00}, {input = "00 ff", encoding = ENC_BIG_ENDIAN, output = 0x00ff}, } local uint24_test_cases = { {input = "ff 00 00", encoding = ENC_LITTLE_ENDIAN, output = 0x0000ff}, {input = "00 ff 00", encoding = ENC_LITTLE_ENDIAN, output = 0x00ff00}, {input = "00 00 ff", encoding = ENC_LITTLE_ENDIAN, output = 0xff0000}, {input = "ff 00 00", encoding = ENC_BIG_ENDIAN, output = 0xff0000}, {input = "00 ff 00", encoding = ENC_BIG_ENDIAN, output = 0x00ff00}, {input = "00 00 ff", encoding = ENC_BIG_ENDIAN, output = 0x0000ff}, } local uint32_test_cases = { {input = "ff 00 00 00", encoding = ENC_LITTLE_ENDIAN, output = 0x000000ff}, {input = "00 ff 00 00", encoding = ENC_LITTLE_ENDIAN, output = 0x0000ff00}, {input = "00 00 ff 00", encoding = ENC_LITTLE_ENDIAN, output = 0x00ff0000}, {input = "00 00 00 ff", encoding = ENC_LITTLE_ENDIAN, output = 0xff000000}, {input = "ff 00 00 00", encoding = ENC_BIG_ENDIAN, output = 0xff000000}, {input = "00 ff 00 00", encoding = ENC_BIG_ENDIAN, output = 0x00ff0000}, {input = "00 00 ff 00", encoding = ENC_BIG_ENDIAN, output = 0x0000ff00}, {input = "00 00 00 ff", encoding = ENC_BIG_ENDIAN, output = 0x000000ff}, } function tvbr_uint (tvbr, encoding) if encoding == ENC_LITTLE_ENDIAN then return tvbr:le_uint(), "le_uint()" else return tvbr:uint(), "uint()" end end local int8_test_cases = { {input = "ff", encoding = ENC_LITTLE_ENDIAN, output = -0x01}, {input = "00", encoding = ENC_LITTLE_ENDIAN, output = 0x00}, {input = "ff", encoding = ENC_BIG_ENDIAN, output = -0x01}, {input = "00", encoding = ENC_BIG_ENDIAN, output = 0x00}, } local int16_test_cases = { {input = "ff 00", encoding = ENC_LITTLE_ENDIAN, output = 0x00ff}, {input = "00 ff", encoding = ENC_LITTLE_ENDIAN, output = -0x0100}, {input = "ff 00", encoding = ENC_BIG_ENDIAN, output = -0x0100}, {input = "00 ff", encoding = ENC_BIG_ENDIAN, output = 0x00ff}, } local int24_test_cases = { {input = "ff 00 00", encoding = ENC_LITTLE_ENDIAN, output = 0x0000ff}, {input = "00 ff 00", encoding = ENC_LITTLE_ENDIAN, output = 0x00ff00}, {input = "00 00 ff", encoding = ENC_LITTLE_ENDIAN, output = -0x010000}, {input = "ff 00 00", encoding = ENC_BIG_ENDIAN, output = -0x010000}, {input = "00 ff 00", encoding = ENC_BIG_ENDIAN, output = 0x00ff00}, {input = "00 00 ff", encoding = ENC_BIG_ENDIAN, output = 0x0000ff}, } local int32_test_cases = { {input = "ff 00 00 00", encoding = ENC_LITTLE_ENDIAN, output = 0x000000ff}, {input = "00 ff 00 00", encoding = ENC_LITTLE_ENDIAN, output = 0x0000ff00}, {input = "00 00 ff 00", encoding = ENC_LITTLE_ENDIAN, output = 0x00ff0000}, {input = "00 00 00 ff", encoding = ENC_LITTLE_ENDIAN, output = -0x01000000}, {input = "ff 00 00 00", encoding = ENC_BIG_ENDIAN, output = -0x01000000}, {input = "00 ff 00 00", encoding = ENC_BIG_ENDIAN, output = 0x00ff0000}, {input = "00 00 ff 00", encoding = ENC_BIG_ENDIAN, output = 0x0000ff00}, {input = "00 00 00 ff", encoding = ENC_BIG_ENDIAN, output = 0x000000ff}, } function tvbr_int(tvbr, encoding) if encoding == ENC_LITTLE_ENDIAN then return tvbr:le_int(), "le_int()" else return tvbr:int(), "int()" end end return true and run_test_cases_all_tests(tree, "uint8", uint8_test_cases, tvbr_uint, general_equality_test) and run_test_cases_all_tests(tree, "uint16", uint16_test_cases, tvbr_uint, general_equality_test) and run_test_cases_all_tests(tree, "uint24", uint24_test_cases, tvbr_uint, general_equality_test) and run_test_cases_all_tests(tree, "uint32", uint32_test_cases, tvbr_uint, general_equality_test) and run_test_cases_all_tests(tree, "int8", int8_test_cases, tvbr_int, general_equality_test) and run_test_cases_all_tests(tree, "int16", int16_test_cases, tvbr_int, general_equality_test) and run_test_cases_all_tests(tree, "int24", int24_test_cases, tvbr_int, general_equality_test) and run_test_cases_all_tests(tree, "int32", int32_test_cases, tvbr_int, general_equality_test) end function integer64_tests(tree) local uint64_test_cases = { {input = "ff 00 00 00 00 00 00 00", encoding = ENC_LITTLE_ENDIAN, output = UInt64(0x000000ff, 0x00000000)}, {input = "00 ff 00 00 00 00 00 00", encoding = ENC_LITTLE_ENDIAN, output = UInt64(0x0000ff00, 0x00000000)}, {input = "00 00 ff 00 00 00 00 00", encoding = ENC_LITTLE_ENDIAN, output = UInt64(0x00ff0000, 0x00000000)}, {input = "00 00 00 ff 00 00 00 00", encoding = ENC_LITTLE_ENDIAN, output = UInt64(0xff000000, 0x00000000)}, {input = "00 00 00 00 ff 00 00 00", encoding = ENC_LITTLE_ENDIAN, output = UInt64(0x00000000, 0x000000ff)}, {input = "00 00 00 00 00 ff 00 00", encoding = ENC_LITTLE_ENDIAN, output = UInt64(0x00000000, 0x0000ff00)}, {input = "00 00 00 00 00 00 ff 00", encoding = ENC_LITTLE_ENDIAN, output = UInt64(0x00000000, 0x00ff0000)}, {input = "00 00 00 00 00 00 00 ff", encoding = ENC_LITTLE_ENDIAN, output = UInt64(0x00000000, 0xff000000)}, {input = "ff 00 00 00 00 00 00 00", encoding = ENC_BIG_ENDIAN, output = UInt64(0x00000000, 0xff000000)}, {input = "00 ff 00 00 00 00 00 00", encoding = ENC_BIG_ENDIAN, output = UInt64(0x00000000, 0x00ff0000)}, {input = "00 00 ff 00 00 00 00 00", encoding = ENC_BIG_ENDIAN, output = UInt64(0x00000000, 0x0000ff00)}, {input = "00 00 00 ff 00 00 00 00", encoding = ENC_BIG_ENDIAN, output = UInt64(0x00000000, 0x000000ff)}, {input = "00 00 00 00 ff 00 00 00", encoding = ENC_BIG_ENDIAN, output = UInt64(0xff000000, 0x00000000)}, {input = "00 00 00 00 00 ff 00 00", encoding = ENC_BIG_ENDIAN, output = UInt64(0x00ff0000, 0x00000000)}, {input = "00 00 00 00 00 00 ff 00", encoding = ENC_BIG_ENDIAN, output = UInt64(0x0000ff00, 0x00000000)}, {input = "00 00 00 00 00 00 00 ff", encoding = ENC_BIG_ENDIAN, output = UInt64(0x000000ff, 0x00000000)}, } function tvbr_uint(tvbr, encoding) if encoding == ENC_LITTLE_ENDIAN then return tvbr:le_uint64(), "le_uint64()" else return tvbr:uint64(), "uint64()" end end local int64_test_cases = { {input = "ff 00 00 00 00 00 00 00", encoding = ENC_LITTLE_ENDIAN, output = Int64(0x000000ff, 0x00000000)}, {input = "00 ff 00 00 00 00 00 00", encoding = ENC_LITTLE_ENDIAN, output = Int64(0x0000ff00, 0x00000000)}, {input = "00 00 ff 00 00 00 00 00", encoding = ENC_LITTLE_ENDIAN, output = Int64(0x00ff0000, 0x00000000)}, {input = "00 00 00 ff 00 00 00 00", encoding = ENC_LITTLE_ENDIAN, output = Int64(0xff000000, 0x00000000)}, {input = "00 00 00 00 ff 00 00 00", encoding = ENC_LITTLE_ENDIAN, output = Int64(0x00000000, 0x000000ff)}, {input = "00 00 00 00 00 ff 00 00", encoding = ENC_LITTLE_ENDIAN, output = Int64(0x00000000, 0x0000ff00)}, {input = "00 00 00 00 00 00 ff 00", encoding = ENC_LITTLE_ENDIAN, output = Int64(0x00000000, 0x00ff0000)}, {input = "00 00 00 00 00 00 00 ff", encoding = ENC_LITTLE_ENDIAN, output = Int64(0x00000000, 0xff000000)}, {input = "ff 00 00 00 00 00 00 00", encoding = ENC_BIG_ENDIAN, output = Int64(0x00000000, 0xff000000)}, {input = "00 ff 00 00 00 00 00 00", encoding = ENC_BIG_ENDIAN, output = Int64(0x00000000, 0x00ff0000)}, {input = "00 00 ff 00 00 00 00 00", encoding = ENC_BIG_ENDIAN, output = Int64(0x00000000, 0x0000ff00)}, {input = "00 00 00 ff 00 00 00 00", encoding = ENC_BIG_ENDIAN, output = Int64(0x00000000, 0x000000ff)}, {input = "00 00 00 00 ff 00 00 00", encoding = ENC_BIG_ENDIAN, output = Int64(0xff000000, 0x00000000)}, {input = "00 00 00 00 00 ff 00 00", encoding = ENC_BIG_ENDIAN, output = Int64(0x00ff0000, 0x00000000)}, {input = "00 00 00 00 00 00 ff 00", encoding = ENC_BIG_ENDIAN, output = Int64(0x0000ff00, 0x00000000)}, {input = "00 00 00 00 00 00 00 ff", encoding = ENC_BIG_ENDIAN, output = Int64(0x000000ff, 0x00000000)}, } function tvbr_int(tvbr, encoding) if encoding == ENC_LITTLE_ENDIAN then return tvbr:le_int64(), "le_int64()" else return tvbr:int64(), "int64()" end end return true and run_test_cases_all_tests(tree, "uint64", uint64_test_cases, tvbr_uint, general_equality_test) and run_test_cases_all_tests(tree, "int64", int64_test_cases, tvbr_int, general_equality_test) end function string_tests(tree) local ABC_ascii = "41 42 43" local ABCzD_ascii = "41 42 43 00 44" local SHARK_16_little = "b5 30 e1 30" local SHARKzSA_16_little = "b5 30 e1 30 00 00 b5 30" local SHARK_16_big = "30 b5 30 e1" local SHARKzSA_16_big = "30 b5 30 e1 00 00 30 b5" local string_test_cases = { {input = ABC_ascii, encoding = ENC_ASCII, output = "ABC"}, {input = ABCzD_ascii, encoding = ENC_ASCII, output = "ABC"}, {input = SHARK_16_little, encoding = ENC_ASCII, output = "�0�0"}, {input = SHARK_16_little, encoding = ENC_UTF_16 + ENC_LITTLE_ENDIAN, output = "サメ"}, {input = SHARKzSA_16_little, encoding = ENC_UTF_16 + ENC_LITTLE_ENDIAN, output = "サメ"}, {input = SHARK_16_big, encoding = ENC_UTF_16 + ENC_BIG_ENDIAN, output = "サメ"}, {input = SHARKzSA_16_big, encoding = ENC_UTF_16 + ENC_BIG_ENDIAN, output = "サメ"}, } function tvbr_string(tvbr, encoding) return tvbr:string(encoding), string.format("string(%s)", format_encoding_for_print(encoding)) end --[[ stringz computes its own input length by looking for null the input length includes the null, which is 2 bytes for utf16 ]]-- local stringz_tests = { {input = ABCzD_ascii, encoding = ENC_ASCII, output = "ABC", variable_input_length = true, input_length = 4 }, {input = SHARKzSA_16_little, encoding = ENC_UTF_16 + ENC_LITTLE_ENDIAN, output = "サメ", variable_input_length = true, input_length = 6, }, {input = SHARKzSA_16_big, encoding = ENC_UTF_16 + ENC_BIG_ENDIAN, output = "サメ", variable_input_length = true, input_length = 6, }, } function tvbr_stringz(tvbr, encoding) return tvbr:stringz(encoding), string.format("stringz(%s)", format_encoding_for_print(encoding)) end local ustring_tests = { {input = SHARK_16_big, encoding = ENC_UTF_16 + ENC_BIG_ENDIAN, output = "サメ"}, {input = SHARKzSA_16_big, encoding = ENC_UTF_16 + ENC_BIG_ENDIAN, output = "サメ"}, } function tvbr_ustring(tvbr, encoding) return tvbr:ustring(), "ustring()" end local le_ustring_tests = { {input = SHARK_16_little, encoding = ENC_UTF_16 + ENC_LITTLE_ENDIAN, output = "サメ"}, {input = SHARKzSA_16_little, encoding = ENC_UTF_16 + ENC_LITTLE_ENDIAN, output = "サメ"}, } function tvbr_le_ustring(tvbr, encoding) return tvbr:le_ustring(), "le_ustring()" end local ustringz_tests = { {input = SHARKzSA_16_big, encoding = ENC_UTF_16 + ENC_BIG_ENDIAN, output = "サメ", variable_input_length = true, input_length = 6 }, } function tvbr_ustringz(tvbr, encoding) return tvbr:ustringz(), "ustringz()" end local le_ustringz_tests = { {input = SHARKzSA_16_little, encoding = ENC_UTF_16 + ENC_LITTLE_ENDIAN, output = "サメ", variable_input_length = true, input_length = 6 }, } function tvbr_le_ustringz(tvbr, encoding) return tvbr:le_ustringz(), "le_ustringz()" end return true and run_test_cases_all_tests(tree, "string", string_test_cases, tvbr_string, general_equality_test) and run_test_cases_all_tests(tree, "stringz", stringz_tests, tvbr_stringz, general_equality_test) and run_test_cases_all_tests(tree, "string", ustring_tests, tvbr_ustring, general_equality_test) and run_test_cases_all_tests(tree, "string", le_ustring_tests, tvbr_le_ustring, general_equality_test) and run_test_cases_all_tests(tree, "stringz", ustringz_tests, tvbr_ustringz, general_equality_test) and run_test_cases_all_tests(tree, "stringz", le_ustringz_tests, tvbr_le_ustringz, general_equality_test) end function bool_char_tests(tree) local bool_tests = { {input = "ff", encoding = ENC_BIG_ENDIAN, output = true}, {input = "00", encoding = ENC_BIG_ENDIAN, output = false}, {input = "01", encoding = ENC_BIG_ENDIAN, output = true}, {input = "ff", encoding = ENC_LITTLE_ENDIAN, output = true}, {input = "00", encoding = ENC_LITTLE_ENDIAN, output = false}, {input = "01", encoding = ENC_LITTLE_ENDIAN, output = true}, } local char_tests = { {input = "ff", encoding = ENC_BIG_ENDIAN, output = 0xff}, {input = "00", encoding = ENC_BIG_ENDIAN, output = 0x00}, {input = "30", encoding = ENC_BIG_ENDIAN, output = 0x30}, {input = "ff", encoding = ENC_LITTLE_ENDIAN, output = 0xff}, {input = "00", encoding = ENC_LITTLE_ENDIAN, output = 0x00}, {input = "30", encoding = ENC_LITTLE_ENDIAN, output = 0x30}, } return true and run_test_cases_all_tests(tree, "boolean", bool_tests, nil, general_equality_test) and run_test_cases_all_tests(tree, "char", char_tests, nil, general_equality_test) end function float_tests(tree) local be_float = { {input = "3c 00 00 00", encoding = ENC_BIG_ENDIAN, output = 0.0078125}, {input = "bd a0 00 00", encoding = ENC_BIG_ENDIAN, output = -0.078125}, {input = "3f 48 00 00", encoding = ENC_BIG_ENDIAN, output = 0.78125}, {input = "c0 fa 00 00", encoding = ENC_BIG_ENDIAN, output = -7.8125}, {input = "42 9c 40 00", encoding = ENC_BIG_ENDIAN, output = 78.125}, {input = "c4 43 50 00", encoding = ENC_BIG_ENDIAN, output = -781.25}, {input = "45 f4 24 00", encoding = ENC_BIG_ENDIAN, output = 7812.5}, {input = "c7 98 96 80", encoding = ENC_BIG_ENDIAN, output = -78125.0}, {input = "49 3e bc 20", encoding = ENC_BIG_ENDIAN, output = 781250.0}, {input = "ca ee 6b 28", encoding = ENC_BIG_ENDIAN, output = -7812500.0}, {input = "00 00 00 00", encoding = ENC_BIG_ENDIAN, output = 0.0}, {input = "80 00 00 00", encoding = ENC_BIG_ENDIAN, output = -0.0}, {input = "7f c0 00 00", encoding = ENC_BIG_ENDIAN, output = 0/0}, {input = "7f 80 00 00", encoding = ENC_BIG_ENDIAN, output = 1/0}, {input = "ff 80 00 00", encoding = ENC_BIG_ENDIAN, output = -1/0}, } local le_float = { {input = "00 00 00 3c", encoding = ENC_LITTLE_ENDIAN, output = 0.0078125}, {input = "00 00 a0 bd", encoding = ENC_LITTLE_ENDIAN, output = -0.078125}, {input = "00 00 48 3f", encoding = ENC_LITTLE_ENDIAN, output = 0.78125}, {input = "00 00 fa c0", encoding = ENC_LITTLE_ENDIAN, output = -7.8125}, {input = "00 40 9c 42", encoding = ENC_LITTLE_ENDIAN, output = 78.125}, {input = "00 50 43 c4", encoding = ENC_LITTLE_ENDIAN, output = -781.25}, {input = "00 24 f4 45", encoding = ENC_LITTLE_ENDIAN, output = 7812.5}, {input = "80 96 98 c7", encoding = ENC_LITTLE_ENDIAN, output = -78125.0}, {input = "20 bc 3e 49", encoding = ENC_LITTLE_ENDIAN, output = 781250.0}, {input = "28 6b ee ca", encoding = ENC_LITTLE_ENDIAN, output = -7812500.0}, {input = "00 00 00 00", encoding = ENC_LITTLE_ENDIAN, output = 0.0}, {input = "00 00 00 80", encoding = ENC_LITTLE_ENDIAN, output = -0.0}, {input = "00 00 c0 7f", encoding = ENC_LITTLE_ENDIAN, output = 0/0}, {input = "00 00 80 7f", encoding = ENC_LITTLE_ENDIAN, output = 1/0}, {input = "00 00 80 ff", encoding = ENC_LITTLE_ENDIAN, output = -1/0}, } local be_double = { {input = "3f 80 00 00 00 00 00 00", encoding = ENC_BIG_ENDIAN, output = 0.0078125}, {input = "bf e9 00 00 00 00 00 00", encoding = ENC_BIG_ENDIAN, output = -0.78125}, {input = "40 88 6a 00 00 00 00 00", encoding = ENC_BIG_ENDIAN, output = 781.25}, {input = "c0 f3 12 d0 00 00 00 00", encoding = ENC_BIG_ENDIAN, output = -78125.0}, {input = "41 92 a0 5f 20 00 00 00", encoding = ENC_BIG_ENDIAN, output = 78125000.0}, {input = "c1 fd 1a 94 a2 00 00 00", encoding = ENC_BIG_ENDIAN, output = -7812500000.0}, {input = "42 9c 6b f5 26 34 00 00", encoding = ENC_BIG_ENDIAN, output = 7812500000000.0}, {input = "c3 06 34 57 85 d8 a0 00", encoding = ENC_BIG_ENDIAN, output = -781250000000000.0}, {input = "43 a5 af 1d 78 b5 8c 40", encoding = ENC_BIG_ENDIAN, output = 7.8125e+17}, {input = "c4 10 f0 cf 06 4d d5 92", encoding = ENC_BIG_ENDIAN, output = -7.8125e+19}, {input = "00 00 00 00 00 00 00 00", encoding = ENC_BIG_ENDIAN, output = 0.0}, {input = "80 00 00 00 00 00 00 00", encoding = ENC_BIG_ENDIAN, output = -0.0}, {input = "7f f8 00 00 00 00 00 00", encoding = ENC_BIG_ENDIAN, output = 0/0}, {input = "7f f0 00 00 00 00 00 00", encoding = ENC_BIG_ENDIAN, output = 1/0}, {input = "ff f0 00 00 00 00 00 00", encoding = ENC_BIG_ENDIAN, output = -1/0}, } local le_double = { {input = "00 00 00 00 00 00 80 3f", encoding = ENC_LITTLE_ENDIAN, output = 0.0078125}, {input = "00 00 00 00 00 00 e9 bf", encoding = ENC_LITTLE_ENDIAN, output = -0.78125}, {input = "00 00 00 00 00 6a 88 40", encoding = ENC_LITTLE_ENDIAN, output = 781.25}, {input = "00 00 00 00 d0 12 f3 c0", encoding = ENC_LITTLE_ENDIAN, output = -78125.0}, {input = "00 00 00 20 5f a0 92 41", encoding = ENC_LITTLE_ENDIAN, output = 78125000.0}, {input = "00 00 00 a2 94 1a fd c1", encoding = ENC_LITTLE_ENDIAN, output = -7812500000.0}, {input = "00 00 34 26 f5 6b 9c 42", encoding = ENC_LITTLE_ENDIAN, output = 7812500000000.0}, {input = "00 a0 d8 85 57 34 06 c3", encoding = ENC_LITTLE_ENDIAN, output = -781250000000000.0}, {input = "40 8c b5 78 1d af a5 43", encoding = ENC_LITTLE_ENDIAN, output = 7.8125e+17}, {input = "92 d5 4d 06 cf f0 10 c4", encoding = ENC_LITTLE_ENDIAN, output = -7.8125e+19}, {input = "00 00 00 00 00 00 00 00", encoding = ENC_LITTLE_ENDIAN, output = 0.0}, {input = "00 00 00 00 00 00 00 80", encoding = ENC_LITTLE_ENDIAN, output = -0.0}, {input = "00 00 00 00 00 00 f8 7f", encoding = ENC_LITTLE_ENDIAN, output = 0/0}, {input = "00 00 00 00 00 00 f0 7f", encoding = ENC_LITTLE_ENDIAN, output = 1/0}, {input = "00 00 00 00 00 00 f0 ff", encoding = ENC_LITTLE_ENDIAN, output = -1/0}, } function tvbr_float(tvbr, encoding) return tvbr:float(), "float()" end function tvbr_le_float(tvbr, encoding) return tvbr:le_float(), "le_float()" end return true and run_test_cases_all_tests(tree, "float", be_float, tvbr_float, float_equality_test) and run_test_cases_all_tests(tree, "double", be_double, tvbr_float, float_equality_test) and run_test_cases_all_tests(tree, "float", le_float, tvbr_le_float, float_equality_test) and run_test_cases_all_tests(tree, "double", le_double, tvbr_le_float, float_equality_test) end function address_tests(tree) --INCOMPATIBILITY: value fields always assume big-endian encoding for IPv4 addresses local ipv4_test_cases = { {input = "01 00 00 00", encoding = ENC_LITTLE_ENDIAN, output = Address.ip("0.0.0.1"), incompatible_add_pf_field = true, expect_add_pf_field_value = Address.ip("1.0.0.0"), incompatible_add_field = true, expect_add_field_value = Address.ip("1.0.0.0") }, {input = "00 02 00 00", encoding = ENC_LITTLE_ENDIAN, output = Address.ip("0.0.2.0"), incompatible_add_pf_field = true, expect_add_pf_field_value = Address.ip("0.2.0.0"), incompatible_add_field = true, expect_add_field_value = Address.ip("0.2.0.0") }, {input = "00 00 03 00", encoding = ENC_LITTLE_ENDIAN, output = Address.ip("0.3.0.0"), incompatible_add_pf_field = true, expect_add_pf_field_value = Address.ip("0.0.3.0"), incompatible_add_field = true, expect_add_field_value = Address.ip("0.0.3.0") }, {input = "00 00 00 04", encoding = ENC_LITTLE_ENDIAN, output = Address.ip("4.0.0.0"), incompatible_add_pf_field = true, expect_add_pf_field_value = Address.ip("0.0.0.4"), incompatible_add_field = true, expect_add_field_value = Address.ip("0.0.0.4") }, {input = "01 00 00 00", encoding = ENC_BIG_ENDIAN, output = Address.ip("1.0.0.0")}, {input = "00 02 00 00", encoding = ENC_BIG_ENDIAN, output = Address.ip("0.2.0.0")}, {input = "00 00 03 00", encoding = ENC_BIG_ENDIAN, output = Address.ip("0.0.3.0")}, {input = "00 00 00 04", encoding = ENC_BIG_ENDIAN, output = Address.ip("0.0.0.4")}, } function tvbr_ipv4 (tvbr, encoding) if encoding == ENC_LITTLE_ENDIAN then return tvbr:le_ipv4(), "le_ipv4()" else return tvbr:ipv4(), "ipv4()" end end local ipv6_test_cases = { {encoding = ENC_BIG_ENDIAN, input = "0000 0000 0000 0000 0000 0000 0000 00ff", output = Address.ipv6("0000:0000:0000:0000:0000:0000:0000:00ff")}, {encoding = ENC_BIG_ENDIAN, input = "0000 0000 0000 0000 0000 0000 0000 ff00", output = Address.ipv6("0000:0000:0000:0000:0000:0000:0000:ff00")}, {encoding = ENC_BIG_ENDIAN, input = "0000 0000 0000 0000 0000 0000 00ff 0000", output = Address.ipv6("0000:0000:0000:0000:0000:0000:00ff:0000")}, {encoding = ENC_BIG_ENDIAN, input = "0000 0000 0000 0000 0000 0000 ff00 0000", output = Address.ipv6("0000:0000:0000:0000:0000:0000:ff00:0000")}, {encoding = ENC_BIG_ENDIAN, input = "0000 0000 0000 0000 0000 00ff 0000 0000", output = Address.ipv6("0000:0000:0000:0000:0000:00ff:0000:0000")}, {encoding = ENC_BIG_ENDIAN, input = "0000 0000 0000 0000 0000 ff00 0000 0000", output = Address.ipv6("0000:0000:0000:0000:0000:ff00:0000:0000")}, {encoding = ENC_BIG_ENDIAN, input = "0000 0000 0000 0000 00ff 0000 0000 0000", output = Address.ipv6("0000:0000:0000:0000:00ff:0000:0000:0000")}, {encoding = ENC_BIG_ENDIAN, input = "0000 0000 0000 0000 ff00 0000 0000 0000", output = Address.ipv6("0000:0000:0000:0000:ff00:0000:0000:0000")}, {encoding = ENC_BIG_ENDIAN, input = "0000 0000 0000 00ff 0000 0000 0000 0000", output = Address.ipv6("0000:0000:0000:00ff:0000:0000:0000:0000")}, {encoding = ENC_BIG_ENDIAN, input = "0000 0000 0000 ff00 0000 0000 0000 0000", output = Address.ipv6("0000:0000:0000:ff00:0000:0000:0000:0000")}, {encoding = ENC_BIG_ENDIAN, input = "0000 0000 00ff 0000 0000 0000 0000 0000", output = Address.ipv6("0000:0000:00ff:0000:0000:0000:0000:0000")}, {encoding = ENC_BIG_ENDIAN, input = "0000 0000 ff00 0000 0000 0000 0000 0000", output = Address.ipv6("0000:0000:ff00:0000:0000:0000:0000:0000")}, {encoding = ENC_BIG_ENDIAN, input = "0000 00ff 0000 0000 0000 0000 0000 0000", output = Address.ipv6("0000:00ff:0000:0000:0000:0000:0000:0000")}, {encoding = ENC_BIG_ENDIAN, input = "0000 ff00 0000 0000 0000 0000 0000 0000", output = Address.ipv6("0000:ff00:0000:0000:0000:0000:0000:0000")}, {encoding = ENC_BIG_ENDIAN, input = "00ff 0000 0000 0000 0000 0000 0000 0000", output = Address.ipv6("00ff:0000:0000:0000:0000:0000:0000:0000")}, {encoding = ENC_BIG_ENDIAN, input = "ff00 0000 0000 0000 0000 0000 0000 0000", output = Address.ipv6("ff00:0000:0000:0000:0000:0000:0000:0000")}, } function tvbr_ipv6 (tvbr, encoding) return tvbr:ipv6(), "ipv6()" end local ether_test_cases = { {input = "ff 00 00 00 00 00", encoding = 0, output = Address.ether("ff:00:00:00:00:00")}, {input = "00 ff 00 00 00 00", encoding = 0, output = Address.ether("00:ff:00:00:00:00")}, {input = "00 00 ff 00 00 00", encoding = 0, output = Address.ether("00:00:ff:00:00:00")}, {input = "00 00 00 ff 00 00", encoding = 0, output = Address.ether("00:00:00:ff:00:00")}, {input = "00 00 00 00 ff 00", encoding = 0, output = Address.ether("00:00:00:00:ff:00")}, {input = "00 00 00 00 00 ff", encoding = 0, output = Address.ether("00:00:00:00:00:ff")}, } function tvbr_ether (tvbr, encoding) return tvbr:ether(), "ether()" end return true and run_test_cases_all_tests(tree, "ipv4", ipv4_test_cases, tvbr_ipv4, general_equality_test) and run_test_cases_all_tests(tree, "ipv6", ipv6_test_cases, tvbr_ipv6, general_equality_test) and run_test_cases_all_tests(tree, "ether", ether_test_cases, tvbr_ether, general_equality_test) end function time_tests(tree) local time_cases = { {input ="00 01 02 03", encoding = ENC_BIG_ENDIAN, output = NSTime(0x00010203,0)}, {input ="03 02 01 00", encoding = ENC_LITTLE_ENDIAN, output = NSTime(0x00010203,0)}, {input ="00 01 02 03 04 05 06 07", encoding = ENC_BIG_ENDIAN, output = NSTime(0x00010203, 0x04050607)}, {input ="03 02 01 00 07 06 05 04", encoding = ENC_LITTLE_ENDIAN, output = NSTime(0x00010203, 0x04050607)}, } local string_cases = { {input = "1994-11-05T13:15:30Z", encoding = ENC_ISO_8601_DATE_TIME, output = NSTime(784041330, 0), hexlify=true}, {input = "1994-11-05T13:15:30Z12345", encoding = ENC_ISO_8601_DATE_TIME, output = NSTime(784041330, 0), hexlify=true, variable_input_length = true, input_length = 20}, } function tvbr_nstime(tvbr, encoding) if encoding == ENC_LITTLE_ENDIAN then return tvbr:le_nstime(), "le_nstime()" else return tvbr:nstime(encoding), string.format("nstime(%s)", format_encoding_for_print(encoding)) end end return true and run_test_cases_all_tests(tree, "relative_time", time_cases, tvbr_nstime, general_equality_test) and run_test_cases_all_tests(tree, "absolute_time", time_cases, tvbr_nstime, general_equality_test) and run_test_cases_all_tests(tree, "absolute_time", string_cases, tvbr_nstime, general_equality_test) end function bytearray_tests(tree) local bytes_tests = { {input = "00 01 02 03 ff", encoding = 0, output = ByteArray.new("00 01 02 03 ff")} } function tvbr_bytes(tvbr, encoding) return tvbr:bytes(), "bytes()" end local varbytes_tests = { {input = "04 00 01 02 ff", encoding = ENC_BIG_ENDIAN, output = ByteArray.new("00 01 02 ff"), fake_input_length = 1}, {input = "00 04 00 01 02 ff", encoding = ENC_BIG_ENDIAN, output = ByteArray.new("00 01 02 ff"), fake_input_length = 2}, {input = "00 00 00 04 00 01 02 ff", encoding = ENC_BIG_ENDIAN, output = ByteArray.new("00 01 02 ff"), fake_input_length = 4}, } return true and run_test_cases_all_tests(tree, "bytes", bytes_tests, tvbr_bytes, general_equality_test) and run_test_cases_all_tests(tree, "oid", bytes_tests, tvbr_bytes, general_equality_test) and run_test_cases_all_tests(tree, "rel_oid", bytes_tests, tvbr_bytes, general_equality_test) and run_test_cases_all_tests(tree, "system_id", bytes_tests, tvbr_bytes, general_equality_test) and run_test_cases_all_tests(tree, "uint_bytes", varbytes_tests, nil, general_equality_test) end function run_all_tests(tree) return true and simple_integer_tests(tree) and integer64_tests(tree) and string_tests(tree) and bool_char_tests(tree) and float_tests(tree) and address_tests(tree) and time_tests(tree) and bytearray_tests(tree) end local has_run = false function myproto.dissector(tvb, pkt, root) if has_run then return end has_run = true local tree = root:add(myproto, tvb(0)) if run_all_tests(tree) then print("All tests passed!") print() end end DissectorTable.get("udp.port"):add(65333, myproto)
Lua
wireshark/test/lua/dir.lua
-- test script for wslua Dir functions local testlib = require("testlib") local OTHER = "other" testlib.init( { [OTHER] = 0 } ) ------------- helper funcs ------------ -- the following are so we can use pcall (which needs a function to call) local function callDirFuncBase(name, t) t.result = Dir[name]() return true end local function callDirFunc(name, val, t) t.result = Dir[name](val) return true end local function makeFile(filename) local f = io.open(filename, "w") if not f then error ("failed to make file"..filename.." in directory\n".. "make sure to delete 'temp' directory before running again") end f:write("fooobarrloo") f:close() return true end -------------------------- -- for our called function results local t = {} testlib.testing("Dir basics") testlib.test(OTHER,"global", _G.Dir ~= nil) testlib.test(OTHER,"global", type(Dir.make) == 'function') testlib.test(OTHER,"global", type(Dir.remove) == 'function') testlib.test(OTHER,"global", type(Dir.remove_all) == 'function') testlib.test(OTHER,"global", type(Dir.open) == 'function') testlib.test(OTHER,"global", type(Dir.close) == 'function') testlib.test(OTHER,"global", type(Dir.exists) == 'function') testlib.test(OTHER,"global", type(Dir.personal_config_path) == 'function') testlib.test(OTHER,"global", type(Dir.global_config_path) == 'function') testlib.test(OTHER,"global", type(Dir.personal_plugins_path) == 'function') testlib.test(OTHER,"global", type(Dir.global_plugins_path) == 'function') testlib.testing("Dir paths/filenames") testlib.test(OTHER,"Dir.__FILE__", __FILE__ ~= nil) testlib.test(OTHER,"Dir.__DIR__", __DIR__ ~= nil) testlib.test(OTHER,"Dir.exists", pcall(callDirFunc, "exists", "temp", t)) testlib.test(OTHER,"Dir.personal_config_path", pcall(callDirFuncBase, "personal_config_path", t)) testlib.test(OTHER,"Dir.global_config_path", pcall(callDirFuncBase, "global_config_path", t)) testlib.test(OTHER,"Dir.personal_plugins_path", pcall(callDirFuncBase, "personal_plugins_path", t)) testlib.test(OTHER,"Dir.global_plugins_path", pcall(callDirFuncBase, "global_plugins_path", t)) -- Users expect trailing slashes for DATA_DIR and USER_DIR (bug 14619). local dirsep = package.config:sub(1,1) testlib.test(OTHER,"DATA_DIR", string.sub(DATA_DIR, -1) == dirsep) testlib.test(OTHER,"USER_DIR", string.sub(USER_DIR, -1) == dirsep) print("\nFor your information, I got the following info:\n") print("__FILE__ = '" .. __FILE__ .. "'") print("__DIR__ = '" .. __DIR__ .. "'") print("personal_config_path = '" .. Dir.personal_config_path() .. "'") print("global_config_path = '" .. Dir.global_config_path() .. "'") print("personal_plugins_path = '" .. Dir.personal_plugins_path() .. "'") print("global_plugins_path = '" .. Dir.global_plugins_path() .. "'") print("\n") testlib.testing("Directory manipulation") testlib.test(OTHER,"Dir.exists", pcall(callDirFunc, "exists", "temp", t)) if t.result == true or t.result == false then error("this testsuite requires there be no 'temp' directory or file; please remove it") end testlib.testing("Dir.make") testlib.test(OTHER,"Dir.make", pcall(callDirFunc, "make", "temp", t) and t.result == true) testlib.test(OTHER,"Dir.exists", pcall(callDirFunc, "exists", "temp", t) and t.result == true) -- make the same dir, should give false testlib.test(OTHER,"Dir.make", pcall(callDirFunc, "make", "temp", t) and t.result == false) testlib.testing("Dir.remove") testlib.test(OTHER,"Dir.remove", pcall(callDirFunc, "remove", "temp", t) and t.result == true) testlib.test(OTHER,"Dir.exists", pcall(callDirFunc, "exists", "temp", t) and t.result == nil) testlib.test(OTHER,"Dir.remove", pcall(callDirFunc, "remove", "temp", t) and t.result == false) Dir.make("temp") makeFile("temp/file.txt") -- will return nil because temp has a file testlib.test(OTHER,"Dir.remove", pcall(callDirFunc, "remove", "temp", t) and t.result == nil) testlib.testing("Dir.remove_all") testlib.test(OTHER,"Dir.remove_all", pcall(callDirFunc, "remove_all", "temp", t) and t.result == true) testlib.test(OTHER,"Dir.remove_all", pcall(callDirFunc, "remove_all", "temp", t) and t.result == false) Dir.make("temp") makeFile("temp/file1.txt") makeFile("temp/file2.txt") makeFile("temp/file3.txt") testlib.test(OTHER,"Dir.remove_all", pcall(callDirFunc, "remove_all", "temp", t) and t.result == true) testlib.test(OTHER,"Dir.remove_all", pcall(callDirFunc, "remove_all", "temp", t) and t.result == false) testlib.testing("Dir.open") Dir.make("temp") makeFile("temp/file1.txt") makeFile("temp/file2.txt") makeFile("temp/file3.txt") testlib.test(OTHER,"Dir.open", pcall(callDirFunc, "open", "temp", t)) testlib.test(OTHER,"Dir.open", type(t.result) == 'userdata') testlib.test(OTHER,"Dir.open", typeof(t.result) == 'Dir') io.stdout:write("calling Dir object...") local dir = t.result local files = {} files[dir()] = true io.stdout:write("passed\n") files[dir()] = true files[dir()] = true testlib.test(OTHER,"Dir.call", files["file1.txt"]) testlib.test(OTHER,"Dir.call", files["file2.txt"]) testlib.test(OTHER,"Dir.call", files["file3.txt"]) testlib.test(OTHER,"Dir.call", dir() == nil) testlib.test(OTHER,"Dir.call", dir() == nil) testlib.testing("Dir.close") testlib.test(OTHER,"Dir.close", pcall(callDirFunc, "close", dir, t)) testlib.test(OTHER,"Dir.close", pcall(callDirFunc, "close", dir, t)) testlib.testing("Negative testing 1") -- now try breaking it testlib.test(OTHER,"Dir.open", pcall(callDirFunc, "open", "temp", t)) dir = t.result -- call dir() now files = {} files[dir()] = true Dir.remove_all("temp") -- call it again files[dir()] = true files[dir()] = true testlib.test(OTHER,"Dir.call", files["file1.txt"]) testlib.test(OTHER,"Dir.call", files["file2.txt"]) testlib.test(OTHER,"Dir.call", files["file3.txt"]) testlib.test(OTHER,"Dir.close", pcall(callDirFunc, "close", dir, t)) testlib.testing("Negative testing 2") -- do it again, but this time don't do dir() until after removing the files Dir.make("temp") makeFile("temp/file1.txt") makeFile("temp/file2.txt") makeFile("temp/file3.txt") testlib.test(OTHER,"Dir.open", pcall(callDirFunc, "open", "temp", t)) dir = t.result Dir.remove_all("temp") -- now do it file = dir() testlib.test(OTHER,"Dir.call", file == nil) testlib.test(OTHER,"Dir.close", pcall(callDirFunc, "close", dir, t)) -- negative tests testlib.testing("Negative testing 3") -- invalid args testlib.test(OTHER,"Dir.make", not pcall(callDirFunc, "make", {}, t)) testlib.test(OTHER,"Dir.make", not pcall(callDirFunc, "make", nil, t)) testlib.test(OTHER,"Dir.remove", not pcall(callDirFunc, "remove", {}, t)) testlib.test(OTHER,"Dir.remove", not pcall(callDirFunc, "remove", nil, t)) testlib.test(OTHER,"Dir.remove_all", not pcall(callDirFunc, "remove_all", {}, t)) testlib.test(OTHER,"Dir.remove_all", not pcall(callDirFunc, "remove_all", nil, t)) testlib.test(OTHER,"Dir.open", not pcall(callDirFunc, "open", {}, t)) testlib.test(OTHER,"Dir.open", not pcall(callDirFunc, "open", nil, t)) testlib.test(OTHER,"Dir.close", not pcall(callDirFunc, "close", "dir", t)) testlib.test(OTHER,"Dir.close", not pcall(callDirFunc, "close", nil, t)) print("\n-----------------------------\n") testlib.getResults()
Lua
wireshark/test/lua/dissectFPM.lua
---------------------------------------- -- -- author: Hadriel Kaplan <[email protected]> -- Copyright (c) 2015, Hadriel Kaplan -- This code is in the Public Domain, or the BSD (3 clause) license -- if Public Domain does not apply in your country. -- -- Version: 1.0 -- ------------------------------------------ --[[ This code is a plugin for Wireshark, to dissect Quagga FPM Netlink protocol messages over TCP. This script is used for testing, so it does some odd things: * it dissects the FPM in two ways, controlled by a pref setting: 1) using the desegment_offset/desegment_len method 2) using the dissect_tcp_pdus() method * it removes any existing FPM dissector; there isn't one right now but there likely will be in the future. Wireshark has a "Netlink" protocol dissector, but it currently expects to be running on a Linux cooked-mode SLL header and link type. That's because Netlink has traditionally been used between the Linux kernel and user-space apps. But the open-source Quagga, zebra, and the commercial ZebOS routing products also send Netlink messages over TCP to other processes or even outside the box, to a "Forwarding Plane Manager" (FPM) that controls forwarding-plane devices (typically hardware). The Netlink message is encapsulated within an FPM header, which identifies an FPM message version (currently 1), the type of message it contains (namely a Netlink message), and its length. So we have: struct fpm_msg_hdr_t { uint8_t version; uint8_t msg_type; uint16_t msg_len; } followed by a Netlink message. ]]---------------------------------------- ---------------------------------------- -- do not modify this table local debug_level = { DISABLED = 0, LEVEL_1 = 1, LEVEL_2 = 2 } -- set this DEBUG to debug_level.LEVEL_1 to enable printing debug_level info -- set it to debug_level.LEVEL_2 to enable really verbose printing -- note: this will be overridden by user's preference settings local DEBUG = debug_level.LEVEL_1 local default_settings = { debug_level = DEBUG, enabled = true, -- whether this dissector is enabled or not port = 2620, max_msg_len = 4096, desegment = true, -- whether to TCP desegement or not dissect_tcp = false, -- whether to use the dissect_tcp_pdus method or not subdissect = true, -- whether to call sub-dissector or not subdiss_type = wtap.NETLINK, -- the encap we get the subdissector for } local dprint = function() end local dprint2 = function() end local function reset_debug_level() if default_settings.debug_level > debug_level.DISABLED then dprint = function(...) print(table.concat({"Lua:", ...}," ")) end if default_settings.debug_level > debug_level.LEVEL_1 then dprint2 = dprint end end end -- call it now reset_debug_level() ---------------------------------------- -- creates a Proto object, but doesn't register it yet local fpmProto = Proto("fpm", "FPM Header") ---------------------------------------- -- a function to convert tables of enumerated types to valstring tables -- i.e., from { "name" = number } to { number = "name" } local function makeValString(enumTable) local t = {} for name,num in pairs(enumTable) do t[num] = name end return t end local MsgType = { NONE = 0, NETLINK = 1, } local msgtype_valstr = makeValString(MsgType) ---------------------------------------- -- a table of all of our Protocol's fields local hdr_fields = { version = ProtoField.uint8 ("fpm.version", "Version", base.DEC), msg_type = ProtoField.uint8 ("fpm.type", "Type", base.DEC, msgtype_valstr), msg_len = ProtoField.uint16("fpm.length", "Length", base.DEC), } -- create a flat array table of the above that can be registered local pfields = {} -- recursive function to flatten the table into pfields local function flattenTable(tbl) for k,v in pairs(tbl) do if type(v) == 'table' then flattenTable(v) else pfields[#pfields+1] = v end end end -- call it flattenTable(hdr_fields) -- register them fpmProto.fields = pfields dprint2("fpmProto ProtoFields registered") ---------------------------------------- -- some forward "declarations" of helper functions we use in the dissector local createSLL -- due to a bug in wireshark, we need to keep newly created tvb's for longer -- than the duration of the dissect function local tvbs = {} function fpmProto.init() tvbs = {} end local FPM_MSG_HDR_LEN = 4 ---------------------------------------- -- the following function is used for the new dissect_tcp_pdus method -- this one returns the length of the full message local function get_fpm_length(tvbuf, pktinfo, offset) dprint2("FPM get_fpm_length function called") local lengthVal = tvbuf:range(offset + 2, 2):uint() if lengthVal > default_settings.max_msg_len then -- too many bytes, invalid message dprint("FPM message length is too long: ", lengthVal) lengthVal = tvbuf:len() end return lengthVal end -- the following is the dissection function called for -- the new dissect_tcp_pdus method local function dissect_fpm_pdu(tvbuf, pktinfo, root) dprint2("FPM dissect_fpm_pdu function called") local lengthTvbr = tvbuf:range(2, 2) local lengthVal = lengthTvbr:uint() -- set the protocol column to show our protocol name pktinfo.cols.protocol:set("FPM") -- We start by adding our protocol to the dissection display tree. local tree = root:add(fpmProto, tvbuf:range(offset, lengthVal)) local versionTvbr = tvbuf:range(0, 1) local versionVal = versionTvbr:uint() tree:add(hdr_fields.version, versionTvbr) local msgTypeTvbr = tvbuf:range(1, 1) local msgTypeVal = msgTypeTvbr:uint() tree:add(hdr_fields.msg_type, msgTypeTvbr) tree:add(hdr_fields.msg_len, lengthTvbr) local result if (versionVal == 1) and (msgTypeVal == MsgType.NETLINK) then -- it carries a Netlink message, so we're going to create -- a fake Linux SLL header for the built-in Netlink dissector local payload = tvbuf:raw(FPM_MSG_HDR_LEN, lengthVal - FPM_MSG_HDR_LEN) result = createSLL(payload) end -- looks good, go dissect it if result then -- ok now the hard part - try calling a sub-dissector? -- only if settings/prefs told us to of course... if default_settings.subdissect then dprint2("FPM trying sub-dissector for wtap encap type:", default_settings.subdiss_type) -- due to a bug in wireshark, we need to keep newly created tvb's for longer -- than the duration of the dissect function tvbs[#tvbs+1] = ByteArray.new(result, true):tvb("Netlink Message") DissectorTable.get("wtap_encap"):try(default_settings.subdiss_type, tvbs[#tvbs], pktinfo, root) -- local tvb = ByteArray.new(result, true):tvb("Netlink Message") -- DissectorTable.get("wtap_encap"):try(default_settings.subdiss_type, tvb, pktinfo, root) dprint2("FPM returning from sub-dissector") end else dprint("FPM header not correctly dissected") end return lengthVal, 0 end ---------------------------------------- -- the following function is used for dissecting using the -- old desegment_offset/desegment_len method -- it's a separate function because we run over TCP and thus might -- need to parse multiple messages in a single segment local function dissect(tvbuf, pktinfo, root, offset, origlen) dprint2("FPM dissect function called") local pktlen = origlen - offset if pktlen < FPM_MSG_HDR_LEN then -- we need more bytes pktinfo.desegment_offset = offset pktinfo.desegment_len = DESEGMENT_ONE_MORE_SEGMENT return 0, DESEGMENT_ONE_MORE_SEGMENT end local lengthTvbr = tvbuf:range(offset + 2, 2) local lengthVal = lengthTvbr:uint() if lengthVal > default_settings.max_msg_len then -- too many bytes, invalid message dprint("FPM message length is too long: ", lengthVal) return pktlen, 0 end if pktlen < lengthVal then dprint2("Need more bytes to desegment FPM") pktinfo.desegment_offset = offset pktinfo.desegment_len = (lengthVal - pktlen) return 0, -(lengthVal - pktlen) end -- set the protocol column to show our protocol name pktinfo.cols.protocol:set("FPM") -- We start by adding our protocol to the dissection display tree. local tree = root:add(fpmProto, tvbuf:range(offset, lengthVal)) local versionTvbr = tvbuf:range(offset, 1) local versionVal = versionTvbr:uint() tree:add(hdr_fields.version, versionTvbr) local msgTypeTvbr = tvbuf:range(offset + 1, 1) local msgTypeVal = msgTypeTvbr:uint() tree:add(hdr_fields.msg_type, msgTypeTvbr) tree:add(hdr_fields.msg_len, lengthTvbr) local result if (versionVal == 1) and (msgTypeVal == MsgType.NETLINK) then -- it carries a Netlink message, so we're going to create -- a fake Linux SLL header for the built-in Netlink dissector local payload = tvbuf:raw(offset + FPM_MSG_HDR_LEN, lengthVal - FPM_MSG_HDR_LEN) result = createSLL(payload) end -- looks good, go dissect it if result then -- ok now the hard part - try calling a sub-dissector? -- only if settings/prefs told us to of course... if default_settings.subdissect then dprint2("FPM trying sub-dissector for wtap encap type:", default_settings.subdiss_type) -- due to a bug in wireshark, we need to keep newly created tvb's for longer -- than the duration of the dissect function tvbs[#tvbs+1] = ByteArray.new(result, true):tvb("Netlink Message") DissectorTable.get("wtap_encap"):try(default_settings.subdiss_type, tvbs[#tvbs], pktinfo, root) -- local tvb = ByteArray.new(result, true):tvb("Netlink Message") -- DissectorTable.get("wtap_encap"):try(default_settings.subdiss_type, tvb, pktinfo, root) dprint2("FPM returning from sub-dissector") end else dprint("FPM header not correctly dissected") end return lengthVal, 0 end ---------------------------------------- -- The following creates the callback function for the dissector. -- It's the same as doing "appProto.dissector = function (tvbuf,pkt,root)" -- The 'tvbuf' is a Tvb object, 'pktinfo' is a Pinfo object, and 'root' is a TreeItem object. -- Whenever Wireshark dissects a packet that our Proto is hooked into, it will call -- this function and pass it these arguments for the packet it's dissecting. function fpmProto.dissector(tvbuf, pktinfo, root) dprint2("fpmProto.dissector called") local bytes_consumed = 0 if default_settings.dissect_tcp then dprint2("using new dissect_tcp_pdus method") dissect_tcp_pdus(tvbuf, root, FPM_MSG_HDR_LEN, get_fpm_length, dissect_fpm_pdu, default_settings.desegment) bytes_consumed = tvbuf:len() else dprint2("using old desegment_offset/desegment_len method") -- get the length of the packet buffer (Tvb). local pktlen = tvbuf:len() local offset, bytes_needed = 0, 0 tvbs = {} while bytes_consumed < pktlen do offset, bytes_needed = dissect(tvbuf, pktinfo, root, bytes_consumed, pktlen) if offset == 0 then if bytes_consumed > 0 then return bytes_consumed else return bytes_needed end end bytes_consumed = bytes_consumed + offset end end return bytes_consumed end ---------------------------------------- -- we want to have our protocol dissection invoked for a specific TCP port, -- so get the TCP dissector table and add our protocol to it -- first remove any existing dissector for that port, if there is one local old_dissector = DissectorTable.get("tcp.port"):get_dissector(default_settings.port) if old_dissector then dprint("Retrieved existing dissector") end local function enableDissector() DissectorTable.get("tcp.port"):set(default_settings.port, fpmProto) end -- call it now enableDissector() local function disableDissector() if old_dissector then DissectorTable.get("tcp.port"):set(default_settings.port, old_dissector) end end -------------------------------------------------------------------------------- -- preferences handling stuff -------------------------------------------------------------------------------- local debug_pref_enum = { { 1, "Disabled", debug_level.DISABLED }, { 2, "Level 1", debug_level.LEVEL_1 }, { 3, "Level 2", debug_level.LEVEL_2 }, } ---------------------------------------- -- register our preferences fpmProto.prefs.enabled = Pref.bool("Dissector enabled", default_settings.enabled, "Whether the FPM dissector is enabled or not") fpmProto.prefs.desegment = Pref.bool("Reassemble FPM messages spanning multiple TCP segments", default_settings.desegment, "Whether the FPM dissector should reassemble".. " messages spanning multiple TCP segments.".. " To use this option, you must also enable".. " \"Allow subdissectors to reassemble TCP".. " streams\" in the TCP protocol settings.") fpmProto.prefs.dissect_tcp = Pref.bool("Use dissect_tcp_pdus", default_settings.dissect_tcp, "Whether the FPM dissector should use the new" .. " dissect_tcp_pdus model or not") fpmProto.prefs.subdissect = Pref.bool("Enable sub-dissectors", default_settings.subdissect, "Whether the FPM packet's content" .. " should be dissected or not") fpmProto.prefs.debug = Pref.enum("Debug", default_settings.debug_level, "The debug printing level", debug_pref_enum) ---------------------------------------- -- a function for handling prefs being changed function fpmProto.prefs_changed() dprint2("prefs_changed called") default_settings.dissect_tcp = fpmProto.prefs.dissect_tcp default_settings.subdissect = fpmProto.prefs.subdissect default_settings.debug_level = fpmProto.prefs.debug reset_debug_level() if default_settings.enabled ~= fpmProto.prefs.enabled then default_settings.enabled = fpmProto.prefs.enabled if default_settings.enabled then enableDissector() else disableDissector() end -- have to reload the capture file for this type of change reload() end end dprint2("pcapfile Prefs registered") ---------------------------------------- -- the hatype field of the SLL must be 824 decimal, in big-endian encoding (0x0338) local ARPHRD_NETLINK = "\003\056" local WS_NETLINK_ROUTE = "\000\000" local function emptyBytes(num) return string.rep("\000", num) end createSLL = function (payload) dprint2("FPM createSLL function called") local sllmsg = { emptyBytes(2), -- Unused 2B ARPHRD_NETLINK, -- netlink type emptyBytes(10), -- Unused 10B WS_NETLINK_ROUTE, -- Route type payload -- the Netlink message } return table.concat(sllmsg) end
Lua
wireshark/test/lua/dissector.lua
---------------------------------------- -- script-name: dns_dissector.lua -- -- author: Hadriel Kaplan <hadrielk at yahoo dot com> -- Copyright (c) 2014, Hadriel Kaplan -- This code is in the Public Domain, or the BSD (3 clause) license if Public Domain does not apply -- in your country. -- -- Version: 2.1 -- -- Changes since 2.0: -- * fixed a bug with default settings -- * added ability for command-line to overide defaults -- -- Changes since 1.0: -- * made it use the new ProtoExpert class model for expert info -- * add a protocol column with the proto name -- * added heuristic dissector support -- * added preferences settings -- * removed byteArray2String(), and uses the new ByteArray:raw() method instead -- -- BACKGROUND: -- This is an example Lua script for a protocol dissector. The purpose of this script is two-fold: -- * To provide a reference tutorial for others writing Wireshark dissectors in Lua -- * To test various functions being called in various ways, so this script can be used in the test-suites -- I've tried to meet both of those goals, but it wasn't easy. No doubt some folks will wonder why some -- functions are called some way, or differently than previous invocations of the same function. I'm trying to -- to show both that it can be done numerous ways, but also I'm trying to test those numerous ways, and my more -- immediate need is for test coverage rather than tutorial guide. (the Lua API is sorely lacking in test scripts) -- -- OVERVIEW: -- This script creates an elementary dissector for DNS. It's neither comprehensive nor error-free with regards -- to the DNS protocol. That's OK. The goal isn't to fully dissect DNS properly - Wireshark already has a good -- DNS dissector built-in. We don't need another one. We also have other example Lua scripts, but I don't think -- they do a good job of explaining things, and the nice thing about this one is getting capture files to -- run it against is trivial. (plus I uploaded one) -- -- HOW TO RUN THIS SCRIPT: -- Wireshark and Tshark support multiple ways of loading Lua scripts: through a dofile() call in init.lua, -- through the file being in either the global or personal plugins directories, or via the command line. -- See the Wireshark User's Guide chapter on Lua (https://www.wireshark.org/docs/wsdg_html_chunked/wsluarm_modules.html). -- Once the script is loaded, it creates a new protocol named "MyDNS" (or "MYDNS" in some places). If you have -- a capture file with DNS packets in it, simply select one in the Packet List pane, right-click on it, and -- select "Decode As ...", and then in the dialog box that shows up scroll down the list of protocols to one -- called "MYDNS", select that and click the "ok" or "apply" button. Voila`, you're now decoding DNS packets -- using the simplistic dissector in this script. Another way is to download the capture file made for -- this script, and open that - since the DNS packets in it use UDP port 65333 (instead of the default 53), -- and since the MyDNS protocol in this script has been set to automatically decode UDP port 65333, it will -- automagically do it without doing "Decode As ...". -- ---------------------------------------- -- do not modify this table local debug_level = { DISABLED = 0, LEVEL_1 = 1, LEVEL_2 = 2 } -- set this DEBUG to debug_level.LEVEL_1 to enable printing debug_level info -- set it to debug_level.LEVEL_2 to enable really verbose printing -- note: this will be overridden by user's preference settings local DEBUG = debug_level.LEVEL_1 local default_settings = { debug_level = DEBUG, port = 65333, heur_enabled = true, heur_regmode = 1, } -- for testing purposes, we want to be able to pass in changes to the defaults -- from the command line; because you can't set lua preferences from the command -- line using the '-o' switch (the preferences don't exist until this script is -- loaded, so the command line thinks they're invalid preferences being set) -- so we pass them in as command arguments insetad, and handle it here: local args={...} -- get passed-in args if args and #args > 0 then for _, arg in ipairs(args) do local name, value = arg:match("(.+)=(.+)") if name and value then if tonumber(value) then value = tonumber(value) elseif value == "true" or value == "TRUE" then value = true elseif value == "false" or value == "FALSE" then value = false elseif value == "DISABLED" then value = debug_level.DISABLED elseif value == "LEVEL_1" then value = debug_level.LEVEL_1 elseif value == "LEVEL_2" then value = debug_level.LEVEL_2 else error("invalid commandline argument value") end else error("invalid commandline argument syntax") end default_settings[name] = value end end local dprint = function() end local dprint2 = function() end local function reset_debug_level() if default_settings.debug_level > debug_level.DISABLED then dprint = function(...) print(table.concat({"Lua:", ...}," ")) end if default_settings.debug_level > debug_level.LEVEL_1 then dprint2 = dprint end end end -- call it now reset_debug_level() dprint2("Wireshark version = ", get_version()) dprint2("Lua version = ", _VERSION) ---------------------------------------- -- Unfortunately, the older Wireshark/Tshark versions have bugs, and part of the point -- of this script is to test those bugs are now fixed. So we need to check the version -- end error out if it's too old. local major, minor, micro = get_version():match("(%d+)%.(%d+)%.(%d+)") if major and tonumber(major) <= 1 and ((tonumber(minor) <= 10) or (tonumber(minor) == 11 and tonumber(micro) < 3)) then error( "Sorry, but your Wireshark/Tshark version ("..get_version()..") is too old for this script!\n".. "This script needs Wireshark/Tshark version 1.11.3 or higher.\n" ) end -- more sanity checking -- verify we have the ProtoExpert class in wireshark, as that's the newest thing this file uses assert(ProtoExpert.new, "Wireshark does not have the ProtoExpert class, so it's too old - get the latest 1.11.3 or higher") ---------------------------------------- ---------------------------------------- -- creates a Proto object, but doesn't register it yet local dns = Proto("mydns","MyDNS Protocol") ---------------------------------------- -- multiple ways to do the same thing: create a protocol field (but not register it yet) -- the abbreviation should always have "<myproto>." before the specific abbreviation, to avoid collisions local pf_trasaction_id = ProtoField.new ("Transaction ID", "mydns.trans_id", ftypes.UINT16) local pf_flags = ProtoField.new ("Flags", "mydns.flags", ftypes.UINT16, nil, base.HEX) local pf_num_questions = ProtoField.uint16("mydns.num_questions", "Number of Questions") local pf_num_answers = ProtoField.uint16("mydns.num_answers", "Number of Answer RRs") local pf_num_authority_rr = ProtoField.uint16("mydns.num_authority_rr", "Number of Authority RRs") local pf_num_additional_rr = ProtoField.uint16("mydns.num_additional_rr", "Number of Additional RRs") -- within the flags field, we want to parse/show the bits separately -- note the "base" argument becomes the size of the bitmask'ed field when ftypes.BOOLEAN is used -- the "mask" argument is which bits we want to use for this field (e.g., base=16 and mask=0x8000 means we want the top bit of a 16-bit field) -- again the following shows different ways of doing the same thing basically local pf_flag_response = ProtoField.new ("Response", "mydns.flags.response", ftypes.BOOLEAN, {"this is a response","this is a query"}, 16, 0x8000, "is the message a response?") local pf_flag_opcode = ProtoField.new ("Opcode", "mydns.flags.opcode", ftypes.UINT16, nil, base.DEC, 0x7800, "operation code") local pf_flag_authoritative = ProtoField.new ("Authoritative", "mydns.flags.authoritative", ftypes.BOOLEAN, nil, 16, 0x0400, "is the response authoritative?") local pf_flag_truncated = ProtoField.bool ("mydns.flags.truncated", "Truncated", 16, nil, 0x0200, "is the message truncated?") local pf_flag_recursion_desired = ProtoField.bool ("mydns.flags.recursion_desired", "Recursion desired", 16, {"yes","no"}, 0x0100, "do the query recursivley?") local pf_flag_recursion_available = ProtoField.bool ("mydns.flags.recursion_available", "Recursion available", 16, nil, 0x0080, "does the server support recursion?") local pf_flag_z = ProtoField.uint16("mydns.flags.z", "World War Z - Reserved for future use", base.HEX, nil, 0x0040, "when is it the future?") local pf_flag_authenticated = ProtoField.bool ("mydns.flags.authenticated", "Authenticated", 16, {"yes","no"}, 0x0020, "did the server DNSSEC authenticate?") local pf_flag_checking_disabled = ProtoField.bool ("mydns.flags.checking_disabled", "Checking disabled", 16, nil, 0x0010) -- no, these aren't all the DNS response codes - this is just an example local rcodes = { [0] = "No Error", [1] = "Format Error", [2] = "Server Failure", [3] = "Non-Existent Domain", [9] = "Server Not Authoritative for zone" } -- the above rcodes table is used in this next ProtoField local pf_flag_rcode = ProtoField.uint16("mydns.flags.rcode", "Response code", base.DEC, rcodes, 0x000F) local pf_query = ProtoField.new("Query", "mydns.query", ftypes.BYTES) local pf_query_name = ProtoField.new("Name", "mydns.query.name", ftypes.STRING) local pf_query_name_len = ProtoField.new("Name Length", "mydns.query.name.len", ftypes.UINT8) local pf_query_label_count = ProtoField.new("Label Count", "mydns.query.label.count", ftypes.UINT8) local rrtypes = { [1] = "A (IPv4 host address)", [2] = "NS (authoritative name server)", [28] = "AAAA (for geeks only)" } local pf_query_type = ProtoField.uint16("mydns.query.type", "Type", base.DEC, rrtypes) -- again, not all class types are listed here local classes = { [0] = "Reserved", [1] = "IN (Internet)", [2] = "The 1%", [5] = "First class", [6] = "Business class", [65535] = "Cattle class" } local pf_query_class = ProtoField.uint16("mydns.query.class", "Class", base.DEC, classes, nil, "keep it classy folks") ---------------------------------------- -- this actually registers the ProtoFields above, into our new Protocol -- in a real script I wouldn't do it this way; I'd build a table of fields programmatically -- and then set dns.fields to it, so as to avoid forgetting a field dns.fields = { pf_trasaction_id, pf_flags, pf_num_questions, pf_num_answers, pf_num_authority_rr, pf_num_additional_rr, pf_flag_response, pf_flag_opcode, pf_flag_authoritative, pf_flag_truncated, pf_flag_recursion_desired, pf_flag_recursion_available, pf_flag_z, pf_flag_authenticated, pf_flag_checking_disabled, pf_flag_rcode, pf_query, pf_query_name, pf_query_name_len, pf_query_label_count, pf_query_type, pf_query_class } ---------------------------------------- -- create some expert info fields (this is new functionality in 1.11.3) -- Expert info fields are very similar to proto fields: they're tied to our protocol, -- they're created in a similar way, and registered by setting a 'experts' field to -- a table of them just as proto fields were put into the 'dns.fields' above -- The old way of creating expert info was to just add it to the tree, but that -- didn't let the expert info be filterable in wireshark, whereas this way does local ef_query = ProtoExpert.new("mydns.query.expert", "DNS query message", expert.group.REQUEST_CODE, expert.severity.CHAT) local ef_response = ProtoExpert.new("mydns.response.expert", "DNS response message", expert.group.RESPONSE_CODE, expert.severity.CHAT) local ef_ultimate = ProtoExpert.new("mydns.response.ultimate.expert", "DNS answer to life, the universe, and everything", expert.group.COMMENTS_GROUP, expert.severity.NOTE) -- some error expert info's local ef_too_short = ProtoExpert.new("mydns.too_short.expert", "DNS message too short", expert.group.MALFORMED, expert.severity.ERROR) local ef_bad_query = ProtoExpert.new("mydns.query.missing.expert", "DNS query missing or malformed", expert.group.MALFORMED, expert.severity.WARN) -- register them dns.experts = { ef_query, ef_too_short, ef_bad_query, ef_response, ef_ultimate } ---------------------------------------- -- we don't just want to display our protocol's fields, we want to access the value of some of them too! -- There are several ways to do that. One is to just parse the buffer contents in Lua code to find -- the values. But since ProtoFields actually do the parsing for us, and can be retrieved using Field -- objects, it's kinda cool to do it that way. So let's create some Fields to extract the values. -- The following creates the Field objects, but they're not 'registered' until after this script is loaded. -- Also, these lines can't be before the 'dns.fields = ...' line above, because the Field.new() here is -- referencing fields we're creating, and they're not "created" until that line above. -- Furthermore, you cannot put these 'Field.new()' lines inside the dissector function. -- Before Wireshark version 1.11, you couldn't even do this concept (of using fields you just created). local questions_field = Field.new("mydns.num_questions") local query_type_field = Field.new("mydns.query.type") local query_class_field = Field.new("mydns.query.class") local response_field = Field.new("mydns.flags.response") -- here's a little helper function to access the response_field value later. -- Like any Field retrieval, you can't retrieve a field's value until its value has been -- set, which won't happen until we actually use our ProtoFields in TreeItem:add() calls. -- So this isResponse() function can't be used until after the pf_flag_response ProtoField -- has been used inside the dissector. -- Note that calling the Field object returns a FieldInfo object, and calling that -- returns the value of the field - in this case a boolean true/false, since we set the -- "mydns.flags.response" ProtoField to ftype.BOOLEAN way earlier when we created the -- pf_flag_response ProtoField. Clear as mud? -- -- A shorter version of this function would be: -- local function isResponse() return response_field()() end -- but I though the below is easier to understand. local function isResponse() local response_fieldinfo = response_field() return response_fieldinfo() end -------------------------------------------------------------------------------- -- preferences handling stuff -------------------------------------------------------------------------------- -- a "enum" table for our enum pref, as required by Pref.enum() -- having the "index" number makes ZERO sense, and is completely illogical -- but it's what the code has expected it to be for a long time. Ugh. local debug_pref_enum = { { 1, "Disabled", debug_level.DISABLED }, { 2, "Level 1", debug_level.LEVEL_1 }, { 3, "Level 2", debug_level.LEVEL_2 }, } dns.prefs.debug = Pref.enum("Debug", default_settings.debug_level, "The debug printing level", debug_pref_enum) dns.prefs.port = Pref.uint("Port number", default_settings.port, "The UDP port number for MyDNS") dns.prefs.heur = Pref.bool("Heuristic enabled", default_settings.heur_enabled, "Whether heuristic dissection is enabled or not") ---------------------------------------- -- a function for handling prefs being changed function dns.prefs_changed() dprint2("prefs_changed called") default_settings.debug_level = dns.prefs.debug reset_debug_level() default_settings.heur_enabled = dns.prefs.heur if default_settings.port ~= dns.prefs.port then -- remove old one, if not 0 if default_settings.port ~= 0 then dprint2("removing MyDNS from port",default_settings.port) DissectorTable.get("udp.port"):remove(default_settings.port, dns) end -- set our new default default_settings.port = dns.prefs.port -- add new one, if not 0 if default_settings.port ~= 0 then dprint2("adding MyDNS to port",default_settings.port) DissectorTable.get("udp.port"):add(default_settings.port, dns) end end end dprint2("MyDNS Prefs registered") ---------------------------------------- ---- some constants for later use ---- -- the DNS header size local DNS_HDR_LEN = 12 -- the smallest possible DNS query field size -- has to be at least a label null terminator, 2-bytes type and 2-bytes class local MIN_QUERY_LEN = 5 ---------------------------------------- -- some forward "declarations" of helper functions we use in the dissector -- I don't usually use this trick, but it'll help reading/grok'ing this script I think -- if we don't focus on them. local getQueryName ---------------------------------------- -- The following creates the callback function for the dissector. -- It's the same as doing "dns.dissector = function (tvbuf,pkt,root)" -- The 'tvbuf' is a Tvb object, 'pktinfo' is a Pinfo object, and 'root' is a TreeItem object. -- Whenever Wireshark dissects a packet that our Proto is hooked into, it will call -- this function and pass it these arguments for the packet it's dissecting. function dns.dissector(tvbuf,pktinfo,root) dprint2("dns.dissector called") -- set the protocol column to show our protocol name pktinfo.cols.protocol:set("MYDNS") -- We want to check that the packet size is rational during dissection, so let's get the length of the -- packet buffer (Tvb). -- Because DNS has no additional payload data other than itself, and it rides on UDP without padding, -- we can use tvb:len() or tvb:reported_len() here; but I prefer tvb:reported_length_remaining() as it's safer. local pktlen = tvbuf:reported_length_remaining() -- We start by adding our protocol to the dissection display tree. -- A call to tree:add() returns the child created, so we can add more "under" it using that return value. -- The second argument is how much of the buffer/packet this added tree item covers/represents - in this -- case (DNS protocol) that's the remainder of the packet. local tree = root:add(dns, tvbuf:range(0,pktlen)) -- now let's check it's not too short if pktlen < DNS_HDR_LEN then -- since we're going to add this protocol to a specific UDP port, we're going to -- assume packets in this port are our protocol, so the packet being too short is an error -- the old way: tree:add_expert_info(PI_MALFORMED, PI_ERROR, "packet too short") -- the correct way now: tree:add_proto_expert_info(ef_too_short) dprint("packet length",pktlen,"too short") return end -- Now let's add our transaction id under our dns protocol tree we just created. -- The transaction id starts at offset 0, for 2 bytes length. tree:add(pf_trasaction_id, tvbuf:range(0,2)) -- We'd like to put the transaction id number in the GUI row for this packet, in its -- INFO column/cell. First we need the transaction id value, though. Since we just -- dissected it with the previous code line, we could now get it using a Field's -- FieldInfo extractor, but instead we'll get it directly from the TvbRange just -- to show how to do that. We'll use Field/FieldInfo extractors later on... local transid = tvbuf:range(0,2):uint() pktinfo.cols.info:set("(".. transid ..")") -- now let's add the flags, which are all in the packet bytes at offset 2 of length 2 -- instead of calling this again and again, let's just use a variable local flagrange = tvbuf:range(2,2) -- for our flags field, we want a sub-tree local flag_tree = tree:add(pf_flags, flagrange) -- I'm indenting this for clarity, because it's adding to the flag's child-tree -- let's add the type of message (query vs. response) local query_flag_tree = flag_tree:add(pf_flag_response, flagrange) -- let's also add an expert info about it if isResponse() then query_flag_tree:add_proto_expert_info(ef_response, "It's a response!") if transid == 42 then tree:add_tvb_expert_info(ef_ultimate, tvbuf:range(0,2)) end else query_flag_tree:add_proto_expert_info(ef_query) end -- we now know if it's a response or query, so let's put that in the -- GUI packet row, in the INFO column cell -- this line of code uses a Lua trick for doing something similar to -- the C/C++ 'test ? true : false' shorthand pktinfo.cols.info:prepend(isResponse() and "Response " or "Query ") flag_tree:add(pf_flag_opcode, flagrange) if isResponse() then flag_tree:add(pf_flag_authoritative, flagrange) end flag_tree:add(pf_flag_truncated, flagrange) if isResponse() then flag_tree:add(pf_flag_recursion_available, flagrange) else flag_tree:add(pf_flag_recursion_desired, flagrange) end flag_tree:add(pf_flag_z, flagrange) if isResponse() then flag_tree:add(pf_flag_authenticated, flagrange) flag_tree:add(pf_flag_rcode, flagrange) end flag_tree:add(pf_flag_checking_disabled, flagrange) -- now add more to the main mydns tree tree:add(pf_num_questions, tvbuf:range(4,2)) tree:add(pf_num_answers, tvbuf:range(6,2)) -- another way to get a TvbRange is just to call the Tvb like this tree:add(pf_num_authority_rr, tvbuf(8,2)) -- or if we're crazy, we can create a sub-TvbRange, from a sub-TvbRange of the TvbRange tree:add(pf_num_additional_rr, tvbuf:range(10,2):range()()) local num_queries = questions_field()() local pos = DNS_HDR_LEN if num_queries > 0 then -- let's create a sub-tree, using a plain text description (not a field from the packet) local queries_tree = tree:add("Queries") local pktlen_remaining = pktlen - pos -- multiple questions in one query hasn't been used for a long time, but just in case, let's loop while num_queries > 0 and pktlen_remaining > 0 do if pktlen_remaining < MIN_QUERY_LEN then -- old way: queries_tree:add_expert_info(PI_MALFORMED, PI_ERROR, "query field missing or too short") queries_tree:add_proto_expert_info(ef_bad_query) return end -- we don't know how long this query field in total is, so we have to parse it first before -- adding it to the tree, because we want to identify the correct bytes it covers local label_count, name, name_len = getQueryName(tvbuf:range(pos,pktlen_remaining)) if not label_count then queries_tree:add_expert_info(PI_MALFORMED, PI_ERROR, name) return end -- now add the first query to the 'Queries' child tree we just created -- we're going to change the string generated by this later, after we figure out the subsequent fields. -- the whole query field is the query name field length we just got, plus 2-byte type and 2-byte class. local q_tree = queries_tree:add(pf_query, tvbuf:range(pos, name_len + 4)) q_tree:add(pf_query_name, tvbuf:range(pos, name_len), name) pos = pos + name_len pktinfo.cols.info:append(" "..name) -- the following tree items are generated by us, not encoded in the packet per se, so mark them as such q_tree:add(pf_query_name_len, name_len):set_generated() q_tree:add(pf_query_label_count, label_count):set_generated() q_tree:add(pf_query_type, tvbuf:range(pos, 2)) q_tree:add(pf_query_class, tvbuf:range(pos + 2, 2)) pos = pos + 4 -- now change the query text -- calling a Field returns a multival of one FieldInfo object for -- each value, so we select() only the most recent one q_tree:set_text(name..": type "..select(-1, query_type_field()).display ..", class "..select(-1, query_class_field()).display) pktlen_remaining = pktlen_remaining - (name_len + 4) num_queries = num_queries - 1 end -- end of while loop if num_queries > 0 then -- we didn't process them all queries_tree:add_expert_info(PI_MALFORMED, PI_ERROR, num_queries .. " query field(s) missing") return end end -- parsing answers, authority RRs, and additional RRs is up to you! dprint2("dns.dissector returning",pos) -- tell wireshark how much of tvbuff we dissected return pos end ---------------------------------------- -- we want to have our protocol dissection invoked for a specific UDP port, -- so get the udp dissector table and add our protocol to it DissectorTable.get("udp.port"):add(default_settings.port, dns) ---------------------------------------- -- we also want to add the heuristic dissector, for any UDP protocol -- first we need a heuristic dissection function -- this is that function - when wireshark invokes this, it will pass in the same -- things it passes in to the "dissector" function, but we only want to actually -- dissect it if it's for us, and we need to return true if it's for us, or else false -- figuring out if it's for us or not is not easy -- we need to try as hard as possible, or else we'll think it's for us when it's -- not and block other heuristic dissectors from getting their chance -- -- in practice, you'd never set a dissector like this to be heuristic, because there -- just isn't enough information to safely detect if it's DNS or not -- but I'm doing it to show how it would be done -- -- Note: this heuristic stuff is new in 1.11.3 local function heur_dissect_dns(tvbuf,pktinfo,root) dprint2("heur_dissect_dns called") -- if our preferences tell us not to do this, return false if not default_settings.heur_enabled then return false end if tvbuf:len() < DNS_HDR_LEN then dprint("heur_dissect_dns: tvb shorter than DNS_HDR_LEN of:",DNS_HDR_LEN) return false end local tvbr = tvbuf:range(0,DNS_HDR_LEN) -- the first 2 bytes are transaction id, which can be anything so no point in checking those -- the next 2 bytes contain flags, a couple of which have some values we can check against -- the opcode has to be 0, 1, 2, 4 or 5 -- the opcode field starts at bit offset 17 (in C-indexing), for 4 bits in length local check = tvbr:bitfield(17,4) if check == 3 or check > 5 then dprint("heur_dissect_dns: invalid opcode:",check) return false end -- the rcode has to be 0-10, 16-22 (we're ignoring private use rcodes here) -- the rcode field starts at bit offset 28 (in C-indexing), for 4 bits in length check = tvbr:bitfield(28,4) if check > 22 or (check > 10 and check < 16) then dprint("heur_dissect_dns: invalid rcode:",check) return false end dprint2("heur_dissect_dns checking questions/answers") -- now let's verify the number of questions/answers are reasonable check = tvbr:range(4,2):uint() -- num questions if check > 100 then return false end check = tvbr:range(6,2):uint() -- num answers if check > 100 then return false end check = tvbr:range(8,2):uint() -- num authority if check > 100 then return false end check = tvbr:range(10,2):uint() -- num additional if check > 100 then return false end dprint2("heur_dissect_dns: everything looks good calling the real dissector") -- don't do this line in your script - I'm just doing it so our test-suite can -- verify this script root:add("Heuristic dissector used"):set_generated() -- ok, looks like it's ours, so go dissect it -- note: calling the dissector directly like this is new in 1.11.3 -- also note that calling a Dissector object, as this does, means we don't -- get back the return value of the dissector function we created previously -- so it might be better to just call the function directly instead of doing -- this, but this script is used for testing and this tests the call() function dns.dissector(tvbuf,pktinfo,root) -- since this is over a transport protocol, such as UDP, we can set the -- conversation to make it sticky for our dissector, so that all future -- packets to/from the same address:port pair will just call our dissector -- function directly instead of this heuristic function -- this is a new attribute of pinfo in 1.11.3 pktinfo.conversation = dns return true end -- now register that heuristic dissector into the udp heuristic list if default_settings.heur_regmode == 1 then -- this is the "normal" way to register a heuristic: using a lua function dns:register_heuristic("udp",heur_dissect_dns) elseif default_settings.heur_regmode == 2 then -- this is to test the fix for bug 10695: dns:register_heuristic("udp",dns.dissector) elseif default_settings.heur_regmode == 3 then -- and this too is to test the fix for bug 10695: dns:register_heuristic("udp", function (...) return dns.dissector(...); end ) end -- We're done! -- our protocol (Proto) gets automatically registered after this script finishes loading ---------------------------------------- ---------------------------------------- -- DNS query names are not just null-terminated strings; they're actually a sequence of -- 'labels', with a length octet before each one. So "foobar.com" is actually the -- string "\06foobar\03com\00". We could create a ProtoField for label_length and label_name -- or whatever, but since this is an example script I'll show how to do it in raw code. -- This function is given the TvbRange object from the dissector() function, and needs to -- parse it. -- On success, it returns three things: the number of labels, the name string, and how -- many bytes it covered of the buffer. -- On failure, it returns nil and the error message. getQueryName = function (tvbr) local label_count = 0 local name = "" local name_len = 0 local len_remaining = tvbr:len() if len_remaining < 2 then -- it's too short return nil, "invalid name" end local barray = tvbr:bytes() -- gets a ByteArray of the TvbRange local pos = 0 -- unlike Lua, ByteArray uses 0-based indexing repeat local label_len = barray:get_index(pos) if label_len >= len_remaining then return nil, "invalid label length of "..label_len end pos = pos + 1 -- move past label length octet if label_len > 0 then -- append the label and a dot to name string -- note: this uses the new method of ByteArray:raw(), added in 1.11.3 name = name .. barray:raw(pos, label_len) .. "." label_count = label_count + 1 pos = pos + label_len -- move past label end name_len = name_len + label_len + 1 len_remaining = len_remaining - (label_len + 1) -- subtract label and its length octet until label_len == 0 -- we appended an extra dot, so get rid of it name = name:sub(1, -2) if name == "" then -- this is the root zone (.) name = "<Root>" end return label_count, name, name_len end
Lua
wireshark/test/lua/field.lua
-- test script for wslua Field/FieldInfo functions -- use with dhcp.pcap in test/captures directory local testlib = require("testlib") local FRAME = "frame" local PER_FRAME = "per-frame" local OTHER = "other" local n_frames = 1 testlib.init({ [FRAME] = n_frames, [PER_FRAME] = n_frames*43, [OTHER] = 16, }) ------------- helper funcs ------------ local function toMacAddr(addrhex) return addrhex:gsub("..","%0:"):sub(1,-2) end -- the following are so we can use pcall (which needs a function to call) local function makeField(name) local foo = Field.new(name) return true end local function makeFieldInfo(field) local foo = field() return true end local function setFieldInfo(finfo,name,value) finfo[name] = value return true end local function getFieldInfo(finfo,name) local foo = finfo[name] return true end -------------------------- testlib.testing(OTHER, "Field") testlib.test(OTHER,"Field.new-0",pcall(makeField,"ip.src")) testlib.test(OTHER,"Field.new-1",not pcall(makeField,"FooBARhowdy")) testlib.test(OTHER,"Field.new-2",not pcall(makeField)) testlib.test(OTHER,"Field.new-3",not pcall(makeField,"")) testlib.test(OTHER,"Field.new-4",not pcall(makeField,"IP.SRC")) -- declare some field extractors local f_frame_encap_type = Field.new("frame.encap_type") local f_frame_proto = Field.new("frame.protocols") local f_eth_src = Field.new("eth.src") local f_eth_dst = Field.new("eth.dst") local f_eth_mac = Field.new("eth.addr") local f_ip_src = Field.new("ip.src") local f_ip_dst = Field.new("ip.dst") local f_udp_srcport = Field.new("udp.srcport") local f_udp_dstport = Field.new("udp.dstport") local f_dhcp_hw = Field.new("dhcp.hw.mac_addr") local f_dhcp_opt = Field.new("dhcp.option.type") testlib.test(OTHER,"Field__tostring-1", tostring(f_frame_proto) == "frame.protocols") testlib.test(OTHER,"Field.name-1", f_frame_proto.name == "frame.protocols") testlib.test(OTHER,"Field.name-2", f_eth_src.name == "eth.src") testlib.test(OTHER,"Field.display-1", f_frame_proto.display == "Protocols in frame") testlib.test(OTHER,"Field.display-2", f_eth_src.display == "Source") testlib.test(OTHER,"Field.type-1", f_frame_proto.type == ftypes.STRING) testlib.test(OTHER,"Field.type-2", f_eth_src.type == ftypes.ETHER) testlib.test(OTHER,"Field.type-3", f_ip_src.type == ftypes.IPv4) testlib.test(OTHER,"Field.type-4", f_udp_srcport.type == ftypes.UINT16) testlib.test(OTHER,"Field.type-5", f_dhcp_opt.type == ftypes.UINT8) -- make sure can't create a FieldInfo outside tap testlib.test(OTHER,"Field__call-1",not pcall(makeFieldInfo,f_eth_src)) local tap = Listener.new() -------------------------- function tap.packet(pinfo,tvb) testlib.countPacket(FRAME) testlib.testing(FRAME,"Field") testlib.test(PER_FRAME,"Field__tostring-2", tostring(f_frame_proto) == "frame.protocols") -- make sure can't create a Field inside tap testlib.test(PER_FRAME,"Field.new-5",not pcall(makeField,"ip.src")) testlib.test(PER_FRAME,"Field__call-2",pcall(makeFieldInfo,f_eth_src)) testlib.test(PER_FRAME,"Field.name-3", f_frame_proto.name == "frame.protocols") testlib.test(PER_FRAME,"Field.name-4", f_eth_src.name == "eth.src") testlib.test(PER_FRAME,"Field.display-3", f_frame_proto.display == "Protocols in frame") testlib.test(PER_FRAME,"Field.display-4", f_eth_src.display == "Source") testlib.test(PER_FRAME,"Field.type-6", f_frame_proto.type == ftypes.STRING) testlib.test(PER_FRAME,"Field.type-7", f_eth_src.type == ftypes.ETHER) testlib.test(PER_FRAME,"Field.type-8", f_ip_src.type == ftypes.IPv4) testlib.test(PER_FRAME,"Field.type-9", f_udp_srcport.type == ftypes.UINT16) testlib.test(PER_FRAME,"Field.type-10", f_dhcp_opt.type == ftypes.UINT8) testlib.testing(FRAME,"FieldInfo") local finfo_udp_srcport = f_udp_srcport() testlib.test(PER_FRAME,"FieldInfo.name-1", finfo_udp_srcport.name == "udp.srcport") testlib.test(PER_FRAME,"FieldInfo.type-1", finfo_udp_srcport.type == ftypes.UINT16) testlib.test(PER_FRAME,"FieldInfo.little_endian-1", finfo_udp_srcport.little_endian == false) testlib.test(PER_FRAME,"FieldInfo.big_endian-1", finfo_udp_srcport.big_endian == true) testlib.test(PER_FRAME,"FieldInfo.is_url-1", finfo_udp_srcport.is_url == false) testlib.test(PER_FRAME,"FieldInfo.offset-1", finfo_udp_srcport.offset == 34) testlib.test(PER_FRAME,"FieldInfo.source-1", finfo_udp_srcport.source == tvb) -- check ether addr local fi_eth_src = f_eth_src() testlib.test(PER_FRAME,"FieldInfo.type-2", fi_eth_src.type == ftypes.ETHER) testlib.test(PER_FRAME,"FieldInfo.range-0",pcall(getFieldInfo,fi_eth_src,"range")) local eth_macs = { f_eth_mac() } local eth_src1 = tostring(f_eth_src().range) local eth_src2 = tostring(tvb:range(6,6)) local eth_src3 = tostring(eth_macs[2].tvb) testlib.test(PER_FRAME,"FieldInfo.range-1", eth_src1 == eth_src2) testlib.test(PER_FRAME,"FieldInfo.range-2", eth_src1 == eth_src3) testlib.test(PER_FRAME,"FieldInfo.range-3",not pcall(setFieldInfo,fi_eth_src,"range",3)) testlib.test(PER_FRAME,"FieldInfo.range-4", tostring(f_frame_encap_type().range) == "<EMPTY>") testlib.test(PER_FRAME,"FieldInfo.generated-1", f_frame_proto().generated == true) testlib.test(PER_FRAME,"FieldInfo.generated-2", eth_macs[2].generated == false) testlib.test(PER_FRAME,"FieldInfo.generated-3",not pcall(setFieldInfo,fi_eth_src,"generated",3)) testlib.test(PER_FRAME,"FieldInfo.name-1", fi_eth_src.name == "eth.src") testlib.test(PER_FRAME,"FieldInfo.name-2",not pcall(setFieldInfo,fi_eth_src,"name","3")) testlib.test(PER_FRAME,"FieldInfo.label-1", fi_eth_src.label == tostring(fi_eth_src)) testlib.test(PER_FRAME,"FieldInfo.label-2", fi_eth_src.label == toMacAddr(eth_src1)) testlib.test(PER_FRAME,"FieldInfo.label-3",not pcall(setFieldInfo,fi_eth_src,"label","3")) testlib.test(PER_FRAME,"FieldInfo.display-1", select(1, string.find(fi_eth_src.display, toMacAddr(eth_src1))) ~= nil) testlib.test(PER_FRAME,"FieldInfo.display-2",not pcall(setFieldInfo,fi_eth_src,"display","3")) testlib.test(PER_FRAME,"FieldInfo.eq-1", eth_macs[2] == select(2, f_eth_mac())) testlib.test(PER_FRAME,"FieldInfo.eq-2", eth_macs[1] ~= fi_eth_src) testlib.test(PER_FRAME,"FieldInfo.eq-3", eth_macs[1] == f_eth_dst()) testlib.test(PER_FRAME,"FieldInfo.offset-1", eth_macs[1].offset == 0) testlib.test(PER_FRAME,"FieldInfo.offset-2", -fi_eth_src == 6) testlib.test(PER_FRAME,"FieldInfo.offset-3",not pcall(setFieldInfo,fi_eth_src,"offset","3")) testlib.test(PER_FRAME,"FieldInfo.len-1", fi_eth_src.len == 6) testlib.test(PER_FRAME,"FieldInfo.len-2",not pcall(setFieldInfo,fi_eth_src,"len",6)) testlib.pass(FRAME) end function tap.draw() testlib.getResults() end
Lua
wireshark/test/lua/field_setup.lua
function field_setup(proto, prefix) local pf_boolean = ProtoField.new("Boolean", prefix..".boolean", ftypes.BOOLEAN) local pf_char = ProtoField.new("Char", prefix..".char", ftypes.CHAR) local pf_uint8 = ProtoField.new("Uint8", prefix..".uint8", ftypes.UINT8) local pf_uint16 = ProtoField.new("Uint16", prefix..".uint16", ftypes.UINT16) local pf_uint24 = ProtoField.new("Uint24", prefix..".uint24", ftypes.UINT24) local pf_uint32 = ProtoField.new("Uint32", prefix..".uint32", ftypes.UINT32) local pf_uint64 = ProtoField.new("Uint64", prefix..".uint64", ftypes.UINT64) local pf_int8 = ProtoField.new("Int8", prefix..".int8", ftypes.INT8) local pf_int16 = ProtoField.new("Int16", prefix..".int16", ftypes.INT16) local pf_int24 = ProtoField.new("Int24", prefix..".int24", ftypes.INT24) local pf_int32 = ProtoField.new("Int32", prefix..".int32", ftypes.INT32) local pf_int64 = ProtoField.new("Int64", prefix..".int64", ftypes.INT64) local pf_float = ProtoField.new("Float", prefix..".float", ftypes.FLOAT) local pf_double = ProtoField.new("Double", prefix..".double", ftypes.DOUBLE) local pf_absolute_time = ProtoField.new("Absolute_Time", prefix..".absolute_time", ftypes.ABSOLUTE_TIME) local pf_relative_time = ProtoField.new("Relative_Time", prefix..".relative_time", ftypes.RELATIVE_TIME) local pf_string = ProtoField.new("String", prefix..".string", ftypes.STRING) local pf_stringz = ProtoField.new("Stringz", prefix..".stringz", ftypes.STRINGZ) local pf_ether = ProtoField.new("Ether", prefix..".ether", ftypes.ETHER) local pf_bytes = ProtoField.new("Bytes", prefix..".bytes", ftypes.BYTES) local pf_uint_bytes = ProtoField.new("Uint_Bytes", prefix..".uint_bytes", ftypes.UINT_BYTES) local pf_ipv4 = ProtoField.new("Ipv4", prefix..".ipv4", ftypes.IPv4) local pf_ipv6 = ProtoField.new("Ipv6", prefix..".ipv6", ftypes.IPv6) local pf_ipxnet = ProtoField.new("Ipxnet", prefix..".ipxnet", ftypes.IPXNET) local pf_framenum = ProtoField.new("Framenum", prefix..".framenum", ftypes.FRAMENUM) local pf_guid = ProtoField.new("Guid", prefix..".guid", ftypes.GUID) local pf_oid = ProtoField.new("Oid", prefix..".oid", ftypes.OID) local pf_rel_oid = ProtoField.new("Rel_Oid", prefix..".rel_oid", ftypes.REL_OID) local pf_system_id = ProtoField.new("System_Id", prefix..".system_id", ftypes.SYSTEM_ID) local pf_eui64 = ProtoField.new("Eui64", prefix..".eui64", ftypes.EUI64) proto.fields = { pf_boolean, pf_char, pf_uint8, pf_uint16, pf_uint24, pf_uint32, pf_uint64, pf_int8, pf_int16, pf_int24, pf_int32, pf_int64, pf_float, pf_double, pf_absolute_time, pf_relative_time, pf_string, pf_stringz, pf_ether, pf_bytes, pf_uint_bytes, pf_ipv4, pf_ipv6, pf_ipxnet, pf_framenum, pf_guid, pf_oid, pf_rel_oid, pf_system_id, pf_eui64, } local vf_boolean = Field.new(prefix..".boolean") local vf_char = Field.new(prefix..".char") local vf_uint8 = Field.new(prefix..".uint8") local vf_uint16 = Field.new(prefix..".uint16") local vf_uint24 = Field.new(prefix..".uint24") local vf_uint32 = Field.new(prefix..".uint32") local vf_uint64 = Field.new(prefix..".uint64") local vf_int8 = Field.new(prefix..".int8") local vf_int16 = Field.new(prefix..".int16") local vf_int24 = Field.new(prefix..".int24") local vf_int32 = Field.new(prefix..".int32") local vf_int64 = Field.new(prefix..".int64") local vf_float = Field.new(prefix..".float") local vf_double = Field.new(prefix..".double") local vf_absolute_time = Field.new(prefix..".absolute_time") local vf_relative_time = Field.new(prefix..".relative_time") local vf_string = Field.new(prefix..".string") local vf_stringz = Field.new(prefix..".stringz") local vf_ether = Field.new(prefix..".ether") local vf_bytes = Field.new(prefix..".bytes") local vf_uint_bytes = Field.new(prefix..".uint_bytes") local vf_ipv4 = Field.new(prefix..".ipv4") local vf_ipv6 = Field.new(prefix..".ipv6") local vf_ipxnet = Field.new(prefix..".ipxnet") local vf_framenum = Field.new(prefix..".framenum") local vf_guid = Field.new(prefix..".guid") local vf_oid = Field.new(prefix..".oid") local vf_rel_oid = Field.new(prefix..".rel_oid") local vf_system_id = Field.new(prefix..".system_id") local vf_eui64 = Field.new(prefix..".eui64") local fieldmap = { ["boolean"] = {packet_field = pf_boolean, value_field = vf_boolean}, ["char"] = {packet_field = pf_char, value_field = vf_char}, ["uint8"] = {packet_field = pf_uint8, value_field = vf_uint8}, ["uint16"] = {packet_field = pf_uint16, value_field = vf_uint16}, ["uint24"] = {packet_field = pf_uint24, value_field = vf_uint24}, ["uint32"] = {packet_field = pf_uint32, value_field = vf_uint32}, ["uint64"] = {packet_field = pf_uint64, value_field = vf_uint64}, ["int8"] = {packet_field = pf_int8, value_field = vf_int8}, ["int16"] = {packet_field = pf_int16, value_field = vf_int16}, ["int24"] = {packet_field = pf_int24, value_field = vf_int24}, ["int32"] = {packet_field = pf_int32, value_field = vf_int32}, ["int64"] = {packet_field = pf_int64, value_field = vf_int64}, ["float"] = {packet_field = pf_float, value_field = vf_float}, ["double"] = {packet_field = pf_double, value_field = vf_double}, ["absolute_time"] = {packet_field = pf_absolute_time, value_field = vf_absolute_time}, ["relative_time"] = {packet_field = pf_relative_time, value_field = vf_relative_time}, ["string"] = {packet_field = pf_string, value_field = vf_string}, ["stringz"] = {packet_field = pf_stringz, value_field = vf_stringz}, ["ether"] = {packet_field = pf_ether, value_field = vf_ether}, ["bytes"] = {packet_field = pf_bytes, value_field = vf_bytes}, ["uint_bytes"] = {packet_field = pf_uint_bytes, value_field = vf_uint_bytes}, ["ipv4"] = {packet_field = pf_ipv4, value_field = vf_ipv4}, ["ipv6"] = {packet_field = pf_ipv6, value_field = vf_ipv6}, ["ipxnet"] = {packet_field = pf_ipxnet, value_field = vf_ipxnet}, ["framenum"] = {packet_field = pf_framenum, value_field = vf_framenum}, ["guid"] = {packet_field = pf_guid, value_field = vf_guid}, ["oid"] = {packet_field = pf_oid, value_field = vf_oid}, ["rel_oid"] = {packet_field = pf_rel_oid, value_field = vf_rel_oid}, ["system_id"] = {packet_field = pf_system_id, value_field = vf_system_id}, ["eui64"] = {packet_field = pf_eui64, value_field = vf_eui64}, } return fieldmap end return field_setup
Text
wireshark/test/lua/globals_2.2.txt
-- Wireshark version: 1.12.6 { ["Address"] = { ["__typeof"] = "Address", ["ip"] = '<function 1>', ["ipv4"] = '<function 1>', ['<metatable>'] = { ["__eq"] = '<function 2>', ["__index"] = '<filtered>', ["__le"] = '<function 4>', ["__lt"] = '<function 5>', ["__methods"] = '<table 2>', ["__tostring"] = '<function 6>', ["__typeof"] = "Address" } }, ["ByteArray"] = { ["__typeof"] = "ByteArray", ["append"] = '<function 7>', ["base64_decode"] = '<function 8>', ["get_index"] = '<function 9>', ["len"] = '<function 10>', ["new"] = '<function 11>', ["prepend"] = '<function 12>', ["raw"] = '<function 13>', ["set_index"] = '<function 14>', ["set_size"] = '<function 15>', ["subset"] = '<function 16>', ["tohex"] = '<function 17>', ["tvb"] = '<function 18>', ['<metatable>'] = { ["__call"] = '<function 16>', ["__concat"] = '<function 19>', ["__eq"] = '<function 20>', ["__index"] = '<filtered>', ["__methods"] = '<table 3>', ["__tostring"] = '<function 22>', ["__typeof"] = "ByteArray" } }, ["Column"] = { ["__typeof"] = "Column", ["append"] = '<function 23>', ["clear"] = '<function 24>', ["clear_fence"] = '<function 25>', ["fence"] = '<function 26>', ["prepend"] = '<function 27>', ["preppend"] = '<function 27>', ["set"] = '<function 28>', ['<metatable>'] = { ["__index"] = '<filtered>', ["__methods"] = '<table 4>', ["__tostring"] = '<function 30>', ["__typeof"] = "Column" } }, ["DATA_DIR"] = '<filtered>', ["DESEGMENT_ONE_MORE_SEGMENT"] = 268435455, ["Dir"] = { ["__typeof"] = "Dir", ["close"] = '<function 31>', ["exists"] = '<function 32>', ["global_config_path"] = '<function 33>', ["global_plugins_path"] = '<function 34>', ["make"] = '<function 35>', ["open"] = '<function 36>', ["personal_config_path"] = '<function 37>', ["personal_plugins_path"] = '<function 38>', ["remove"] = '<function 39>', ["remove_all"] = '<function 40>', ['<metatable>'] = { ["__call"] = '<function 41>', ["__index"] = '<filtered>', ["__methods"] = '<table 5>', ["__typeof"] = "Dir" } }, ["Dissector"] = { ["__typeof"] = "Dissector", ["call"] = '<function 43>', ["get"] = '<function 44>', ["list"] = '<function 45>', ['<metatable>'] = { ["__call"] = '<function 46>', ["__index"] = '<filtered>', ["__methods"] = '<table 6>', ["__tostring"] = '<function 48>', ["__typeof"] = "Dissector" } }, ["DissectorTable"] = { ["__typeof"] = "DissectorTable", ["add"] = '<function 49>', ["get"] = '<function 50>', ["get_dissector"] = '<function 51>', ["heuristic_list"] = '<function 52>', ["list"] = '<function 53>', ["new"] = '<function 54>', ["remove"] = '<function 55>', ["remove_all"] = '<function 56>', ["set"] = '<function 57>', ["try"] = '<function 58>', ['<metatable>'] = { ["__index"] = '<filtered>', ["__methods"] = '<table 7>', ["__tostring"] = '<function 60>', ["__typeof"] = "DissectorTable" } }, ["Dumper"] = { ["__typeof"] = "Dumper", ["close"] = '<function 61>', ["dump"] = '<function 62>', ["dump_current"] = '<function 63>', ["flush"] = '<function 64>', ["new"] = '<function 65>', ["new_for_current"] = '<function 66>', ['<metatable>'] = { ["__index"] = '<filtered>', ["__methods"] = '<table 8>', ["__typeof"] = "Dumper" } }, ["ENC_3GPP_TS_23_038_7BITS"] = 44, ["ENC_ASCII"] = 0, ["ENC_ASCII_7BITS"] = 52, ["ENC_BIG_ENDIAN"] = 0, ["ENC_CHARENCODING_MASK"] = 2147483646, ["ENC_CP437"] = 50, ["ENC_EBCDIC"] = 46, ["ENC_ISO_8601_DATE"] = 65536, ["ENC_ISO_8601_DATE_TIME"] = 196608, ["ENC_ISO_8601_DATE_TIME_BASIC"] = 1048576, ["ENC_ISO_8601_TIME"] = 131072, ["ENC_ISO_8859_1"] = 10, ["ENC_ISO_8859_10"] = 28, ["ENC_ISO_8859_11"] = 30, ["ENC_ISO_8859_13"] = 34, ["ENC_ISO_8859_14"] = 36, ["ENC_ISO_8859_15"] = 38, ["ENC_ISO_8859_16"] = 40, ["ENC_ISO_8859_2"] = 12, ["ENC_ISO_8859_3"] = 14, ["ENC_ISO_8859_4"] = 16, ["ENC_ISO_8859_5"] = 18, ["ENC_ISO_8859_6"] = 20, ["ENC_ISO_8859_7"] = 22, ["ENC_ISO_8859_8"] = 24, ["ENC_ISO_8859_9"] = 26, ["ENC_LITTLE_ENDIAN"] = 2147483648, ["ENC_MAC_ROMAN"] = 48, ["ENC_NA"] = 0, ["ENC_NUM_PREF"] = 2097152, ["ENC_RFC_1123"] = 524288, ["ENC_RFC_822"] = 262144, ["ENC_SEP_COLON"] = 131072, ["ENC_SEP_DASH"] = 262144, ["ENC_SEP_DOT"] = 524288, ["ENC_SEP_MASK"] = 2031616, ["ENC_SEP_NONE"] = 65536, ["ENC_SEP_SPACE"] = 1048576, ["ENC_STRING"] = 50331648, ["ENC_STR_HEX"] = 33554432, ["ENC_STR_MASK"] = 65534, ["ENC_STR_NUM"] = 16777216, ["ENC_STR_TIME_MASK"] = 983040, ["ENC_TIME_NTP"] = 2, ["ENC_TIME_TIMESPEC"] = 0, ["ENC_TIME_TOD"] = 4, ["ENC_UCS_2"] = 6, ["ENC_UCS_4"] = 8, ["ENC_UTF_16"] = 4, ["ENC_UTF_8"] = 2, ["ENC_WINDOWS_1250"] = 42, ["Field"] = { ["__typeof"] = "Field", ["list"] = '<function 68>', ["new"] = '<function 69>', ['<metatable>'] = { ["__call"] = '<function 70>', ["__index"] = '<filtered>', ["__methods"] = '<table 9>', ["__tostring"] = '<function 72>', ["__typeof"] = "Field" } }, ["File"] = { ["__typeof"] = "File", ["lines"] = '<function 73>', ["read"] = '<function 74>', ["seek"] = '<function 75>', ["write"] = '<function 76>', ['<metatable>'] = { ["__getters"] = { ["__typeof"] = "getter", ["compressed"] = '<function 78>' }, ["__index"] = '<filtered>', ["__methods"] = '<table 10>', ["__newindex"] = '<function 79>', ["__setters"] = { ["__typeof"] = "setter" }, ["__tostring"] = '<function 80>', ["__typeof"] = "File" } }, ["FileHandler"] = { ["__typeof"] = "FileHandler", ["new"] = '<function 81>', ['<metatable>'] = { ["__getters"] = { ["__typeof"] = "getter", ["extensions"] = '<function 83>', ["supported_comment_types"] = '<function 84>', ["type"] = '<function 85>', ["writes_name_resolution"] = '<function 86>', ["writing_must_seek"] = '<function 87>' }, ["__index"] = '<filtered>', ["__methods"] = '<table 11>', ["__newindex"] = '<function 88>', ["__setters"] = { ["__typeof"] = "setter", ["can_write_encap"] = '<function 89>', ["extensions"] = '<function 90>', ["read"] = '<function 91>', ["read_close"] = '<function 92>', ["read_open"] = '<function 93>', ["seek_read"] = '<function 94>', ["seq_read_close"] = '<function 95>', ["supported_comment_types"] = '<function 96>', ["write"] = '<function 97>', ["write_close"] = '<function 98>', ["write_open"] = '<function 99>', ["writes_name_resolution"] = '<function 100>', ["writing_must_seek"] = '<function 101>' }, ["__tostring"] = '<function 102>', ["__typeof"] = "FileHandler" } }, ["FrameInfo"] = { ["__typeof"] = "FrameInfo", ["read_data"] = '<function 103>', ['<metatable>'] = { ["__getters"] = { ["__typeof"] = "getter", ["captured_length"] = '<function 105>', ["comment"] = '<function 106>', ["data"] = '<function 107>', ["encap"] = '<function 108>', ["flags"] = '<function 109>', ["original_length"] = '<function 110>', ["rec_type"] = '<function 111>', ["time"] = '<function 112>' }, ["__index"] = '<filtered>', ["__methods"] = '<table 12>', ["__newindex"] = '<function 113>', ["__setters"] = { ["__typeof"] = "setter", ["captured_length"] = '<function 114>', ["comment"] = '<function 115>', ["data"] = '<function 116>', ["encap"] = '<function 117>', ["flags"] = '<function 118>', ["original_length"] = '<function 119>', ["rec_type"] = '<function 120>', ["time"] = '<function 121>' }, ["__tostring"] = '<function 122>', ["__typeof"] = "FrameInfo" } }, ["FrameInfoConst"] = { ["__typeof"] = "FrameInfoConst", ["write_data"] = '<function 123>', ['<metatable>'] = { ["__getters"] = { ["__typeof"] = "getter", ["captured_length"] = '<function 125>', ["comment"] = '<function 126>', ["data"] = '<function 127>', ["encap"] = '<function 128>', ["flags"] = '<function 129>', ["original_length"] = '<function 130>', ["rec_type"] = '<function 131>', ["time"] = '<function 132>' }, ["__index"] = '<filtered>', ["__methods"] = '<table 13>', ["__newindex"] = '<function 133>', ["__setters"] = { ["__typeof"] = "setter" }, ["__tostring"] = '<function 134>', ["__typeof"] = "FrameInfoConst" } }, ["GUI_ENABLED"] = false, ["H225_ALERTING"] = 3, ["H225_CALL_PROCEDING"] = 1, ["H225_CONNECT"] = 2, ["H225_CS"] = 1, ["H225_EMPTY"] = 8, ["H225_FACILITY"] = 6, ["H225_INFORMATION"] = 4, ["H225_NOTIFY"] = 12, ["H225_OTHER"] = 13, ["H225_OTHERS"] = 2, ["H225_PROGRESS"] = 7, ["H225_RAS"] = 0, ["H225_RELEASE_COMPLET"] = 5, ["H225_SETUP"] = 0, ["H225_SETUP_ACK"] = 11, ["H225_STATUS"] = 9, ["H225_STATUS_INQUIRY"] = 10, ["Int64"] = { ["__typeof"] = "Int64", ["arshift"] = '<function 145>', ["band"] = '<function 146>', ["bnot"] = '<function 147>', ["bor"] = '<function 148>', ["bswap"] = '<function 149>', ["bxor"] = '<function 150>', ["decode"] = '<function 151>', ["encode"] = '<function 152>', ["fromhex"] = '<function 153>', ["higher"] = '<function 154>', ["lower"] = '<function 155>', ["lshift"] = '<function 156>', ["max"] = '<function 157>', ["min"] = '<function 158>', ["new"] = '<function 159>', ["rol"] = '<function 160>', ["ror"] = '<function 161>', ["rshift"] = '<function 162>', ["tohex"] = '<function 163>', ["tonumber"] = '<function 164>', ['<metatable>'] = { ["__add"] = '<function 165>', ["__call"] = '<function 166>', ["__concat"] = '<function 167>', ["__div"] = '<function 168>', ["__eq"] = '<function 169>', ["__index"] = '<filtered>', ["__le"] = '<function 171>', ["__lt"] = '<function 172>', ["__methods"] = '<table 14>', ["__mod"] = '<function 173>', ["__mul"] = '<function 174>', ["__pow"] = '<function 175>', ["__sub"] = '<function 176>', ["__tostring"] = '<function 177>', ["__typeof"] = "Int64", ["__unm"] = '<function 178>' } }, ["Listener"] = { ["__typeof"] = "Listener", ["list"] = '<function 179>', ["new"] = '<function 180>', ["remove"] = '<function 181>', ['<metatable>'] = { ["__getters"] = { ["__typeof"] = "getter" }, ["__index"] = '<filtered>', ["__methods"] = '<table 15>', ["__newindex"] = '<function 183>', ["__setters"] = { ["__typeof"] = "setter", ["draw"] = '<function 184>', ["packet"] = '<function 185>', ["reset"] = '<function 186>' }, ["__tostring"] = '<function 187>', ["__typeof"] = "Listener" } }, ["MENU_PACKET_ANALYZE_UNSORTED"] = 0, ["MENU_ANALYZE_CONVERSATION_FILTER"] = 1, ["MENU_STAT_UNSORTED"] = 2, ["MENU_STAT_GENERIC"] = 3, ["MENU_STAT_CONVERSATION_LIST"] = 4, ["MENU_STAT_ENDPOINT_LIST"] = 5, ["MENU_STAT_RESPONSE_TIME"] = 6, ["MENU_STAT_RSERPOOL"] = 7, ["MENU_STAT_TELEPHONY"] = 8, ["MENU_STAT_TELEPHONY_ANSI"] = 9, ["MENU_STAT_TELEPHONY_GSM"] = 10, ["MENU_STAT_TELEPHONY_LTE"] = 11, ["MENU_STAT_TELEPHONY_MTP3"] = 12, ["MENU_STAT_TELEPHONY_SCTP"] = 13, ["MENU_TOOLS_UNSORTED"] = 14, ["MENU_LOG_ANALYZE_UNSORTED"] = 15, ["MENU_LOG_STAT_UNSORTED"] = 16, ["NSTime"] = '<filtered>', ["PI_ASSUMPTION"] = 218103808, ["PI_CHAT"] = 2097152, ["PI_CHECKSUM"] = 16777216, ["PI_COMMENT"] = 1048576, ["PI_COMMENTS_GROUP"] = 184549376, ["PI_DEBUG"] = 134217728, ["PI_DECRYPTION"] = 201326592, ["PI_DEPRECATED"] = 234881024, ["PI_ERROR"] = 8388608, ["PI_GROUP_MASK"] = 4278190080, ["PI_MALFORMED"] = 117440512, ["PI_NOTE"] = 4194304, ["PI_PROTOCOL"] = 150994944, ["PI_REASSEMBLE"] = 100663296, ["PI_REQUEST_CODE"] = 67108864, ["PI_RESPONSE_CODE"] = 50331648, ["PI_SECURITY"] = 167772160, ["PI_SEQUENCE"] = 33554432, ["PI_SEVERITY_MASK"] = 15728640, ["PI_UNDECODED"] = 83886080, ["PI_WARN"] = 6291456, ["Pref"] = { ["__typeof"] = "Pref", ["bool"] = '<function 188>', ["enum"] = '<function 189>', ["range"] = '<function 190>', ["statictext"] = '<function 191>', ["string"] = '<function 192>', ["uint"] = '<function 193>', ['<metatable>'] = { ["__index"] = '<filtered>', ["__methods"] = '<table 16>', ["__typeof"] = "Pref" } }, ["ProgDlg"] = { ["__typeof"] = "ProgDlg", ["close"] = '<function 195>', ["new"] = '<function 196>', ["stopped"] = '<function 197>', ["update"] = '<function 198>', ['<metatable>'] = { ["__index"] = '<filtered>', ["__methods"] = '<table 17>', ["__tostring"] = '<function 200>', ["__typeof"] = "ProgDlg" } }, ["Proto"] = '<filtered>', ["ProtoExpert"] = { ["__typeof"] = "ProtoExpert", ["new"] = '<function 201>', ['<metatable>'] = { ["__index"] = '<filtered>', ["__methods"] = '<table 18>', ["__tostring"] = '<function 203>', ["__typeof"] = "ProtoExpert" } }, ["ProtoField"] = { ["__typeof"] = "ProtoField", ["absolute_time"] = '<function 204>', ["bool"] = '<function 205>', ["bytes"] = '<function 206>', ["double"] = '<function 207>', ["ether"] = '<function 208>', ["float"] = '<function 209>', ["framenum"] = '<function 210>', ["guid"] = '<function 211>', ["int16"] = '<function 212>', ["int24"] = '<function 213>', ["int32"] = '<function 214>', ["int64"] = '<function 215>', ["int8"] = '<function 216>', ["ipv4"] = '<function 217>', ["ipv6"] = '<function 218>', ["ipx"] = '<function 219>', ["new"] = '<function 220>', ["oid"] = '<function 221>', ["rel_oid"] = '<function 222>', ["relative_time"] = '<function 223>', ["string"] = '<function 224>', ["stringz"] = '<function 225>', ["systemid"] = '<function 226>', ["ubytes"] = '<function 227>', ["uint16"] = '<function 228>', ["uint24"] = '<function 229>', ["uint32"] = '<function 230>', ["uint64"] = '<function 231>', ["uint8"] = '<function 232>', ['<metatable>'] = { ["__index"] = '<filtered>', ["__methods"] = '<table 19>', ["__tostring"] = '<function 234>', ["__typeof"] = "ProtoField" } }, ["PseudoHeader"] = { ["__typeof"] = "PseudoHeader", ["atm"] = '<function 235>', ["eth"] = '<function 236>', ["mtp2"] = '<function 237>', ["none"] = '<function 238>', ['<metatable>'] = { ["__index"] = '<filtered>', ["__methods"] = '<table 20>', ["__typeof"] = "PseudoHeader" } }, ["Struct"] = { ["__typeof"] = "Struct", ["fromhex"] = '<function 240>', ["pack"] = '<function 241>', ["size"] = '<function 242>', ["tohex"] = '<function 243>', ["unpack"] = '<function 244>', ["values"] = '<function 245>', ['<metatable>'] = { ["__index"] = '<filtered>', ["__methods"] = '<table 21>', ["__typeof"] = "Struct" } }, ["TextWindow"] = { ["__typeof"] = "TextWindow", ["add_button"] = '<function 247>', ["append"] = '<function 248>', ["clear"] = '<function 249>', ["get_text"] = '<function 250>', ["new"] = '<function 251>', ["prepend"] = '<function 252>', ["set"] = '<function 253>', ["set_atclose"] = '<function 254>', ["set_editable"] = '<function 255>', ['<metatable>'] = { ["__index"] = '<filtered>', ["__methods"] = '<table 22>', ["__tostring"] = '<function 250>', ["__typeof"] = "TextWindow" } }, ["TreeItem"] = { ["__typeof"] = "TreeItem", ["add"] = '<function 257>', ["add_expert_info"] = '<function 258>', ["add_le"] = '<function 259>', ["add_packet_field"] = '<function 260>', ["add_proto_expert_info"] = '<function 261>', ["add_tvb_expert_info"] = '<function 262>', ["append_text"] = '<function 263>', ["prepend_text"] = '<function 264>', ["set_generated"] = '<function 265>', ["set_hidden"] = '<function 266>', ["set_len"] = '<function 267>', ["set_text"] = '<function 268>', ['<metatable>'] = { ["__index"] = '<filtered>', ["__methods"] = '<table 23>', ["__typeof"] = "TreeItem" } }, ["Tvb"] = { ["__typeof"] = "Tvb", ["len"] = '<function 270>', ["offset"] = '<function 271>', ["range"] = '<function 272>', ["raw"] = '<function 273>', ["reported_len"] = '<function 274>', ["reported_length_remaining"] = '<function 275>', ['<metatable>'] = { ["__call"] = '<function 272>', ["__index"] = '<filtered>', ["__methods"] = '<table 24>', ["__tostring"] = '<function 277>', ["__typeof"] = "Tvb" } }, ["TvbRange"] = { ["__typeof"] = "TvbRange", ["bitfield"] = '<function 278>', ["bytes"] = '<function 279>', ["ether"] = '<function 280>', ["float"] = '<function 281>', ["int"] = '<function 282>', ["int64"] = '<function 283>', ["ipv4"] = '<function 284>', ["le_float"] = '<function 285>', ["le_int"] = '<function 286>', ["le_int64"] = '<function 287>', ["le_ipv4"] = '<function 288>', ["le_nstime"] = '<function 289>', ["le_uint"] = '<function 290>', ["le_uint64"] = '<function 291>', ["le_ustring"] = '<function 292>', ["le_ustringz"] = '<function 293>', ["len"] = '<function 294>', ["nstime"] = '<function 295>', ["offset"] = '<function 296>', ["range"] = '<function 297>', ["raw"] = '<function 298>', ["string"] = '<function 299>', ["stringz"] = '<function 300>', ["strsize"] = '<function 301>', ["tvb"] = '<function 302>', ["uint"] = '<function 303>', ["uint64"] = '<function 304>', ["uncompress"] = '<function 305>', ["ustring"] = '<function 306>', ["ustringz"] = '<function 307>', ['<metatable>'] = { ["__call"] = '<function 297>', ["__concat"] = '<function 167>', ["__index"] = '<filtered>', ["__methods"] = '<table 25>', ["__tostring"] = '<function 309>', ["__typeof"] = "TvbRange" } }, ["UInt64"] = { ["__typeof"] = "UInt64", ["arshift"] = '<function 310>', ["band"] = '<function 311>', ["bnot"] = '<function 312>', ["bor"] = '<function 313>', ["bswap"] = '<function 314>', ["bxor"] = '<function 315>', ["decode"] = '<function 316>', ["encode"] = '<function 317>', ["fromhex"] = '<function 318>', ["higher"] = '<function 319>', ["lower"] = '<function 320>', ["lshift"] = '<function 321>', ["max"] = '<function 322>', ["min"] = '<function 323>', ["new"] = '<function 324>', ["rol"] = '<function 325>', ["ror"] = '<function 326>', ["rshift"] = '<function 327>', ["tohex"] = '<function 328>', ["tonumber"] = '<function 329>', ['<metatable>'] = { ["__add"] = '<function 330>', ["__call"] = '<function 331>', ["__concat"] = '<function 167>', ["__div"] = '<function 332>', ["__eq"] = '<function 333>', ["__index"] = '<filtered>', ["__le"] = '<function 335>', ["__lt"] = '<function 336>', ["__methods"] = '<table 26>', ["__mod"] = '<function 337>', ["__mul"] = '<function 338>', ["__pow"] = '<function 339>', ["__sub"] = '<function 340>', ["__tostring"] = '<function 341>', ["__typeof"] = "UInt64", ["__unm"] = '<function 342>' } }, ["USER_DIR"] = '<filtered>', ["_G"] = '<table 1>', ["_VERSION"] = '<filtered>', ["all_field_infos"] = '<function 343>', ["apply_filter"] = '<function 344>', ["assert"] = '<function 345>', ["base"] = { ["CUSTOM"] = 6, ["DEC"] = 1, ["DEC_HEX"] = 4, ["HEX"] = 2, ["HEX_DEC"] = 5, ["NONE"] = 0, ["OCT"] = 3 }, ["bit"] = { ["arshift"] = '<function 346>', ["band"] = '<function 347>', ["bnot"] = '<function 348>', ["bor"] = '<function 349>', ["bswap"] = '<function 350>', ["bxor"] = '<function 351>', ["lshift"] = '<function 352>', ["rol"] = '<function 353>', ["ror"] = '<function 354>', ["rshift"] = '<function 355>', ["tobit"] = '<function 356>', ["tohex"] = '<function 357>' }, ["bit32"] = { ["arshift"] = '<function 358>', ["band"] = '<function 359>', ["bnot"] = '<function 360>', ["bor"] = '<function 361>', ["btest"] = '<function 362>', ["bxor"] = '<function 363>', ["extract"] = '<function 364>', ["lrotate"] = '<function 365>', ["lshift"] = '<function 366>', ["replace"] = '<function 367>', ["rrotate"] = '<function 368>', ["rshift"] = '<function 369>' }, ["browser_open_data_file"] = '<function 370>', ["browser_open_url"] = '<function 371>', ["collectgarbage"] = '<function 372>', ["copy_to_clipboard"] = '<function 373>', ["coroutine"] = { ["create"] = '<function 374>', ["resume"] = '<function 375>', ["running"] = '<function 376>', ["status"] = '<function 377>', ["wrap"] = '<function 378>', ["yield"] = '<function 379>' }, ["datafile_path"] = '<function 33>', ["deregister_filehandler"] = '<function 382>', ["dofile"] = '<function 383>', ["enable_lua"] = true, ["error"] = '<function 384>', ["expert"] = { ["group"] = { ["CHECKSUM"] = 16777216, ["COMMENTS_GROUP"] = 184549376, ["DEBUG"] = 134217728, ["MALFORMED"] = 117440512, ["PROTOCOL"] = 150994944, ["REASSEMBLE"] = 100663296, ["REQUEST_CODE"] = 67108864, ["RESPONSE_CODE"] = 50331648, ["SECURITY"] = 167772160, ["SEQUENCE"] = 33554432, ["UNDECODED"] = 83886080 }, ["severity"] = { ["CHAT"] = 2097152, ["COMMENT"] = 1048576, ["ERROR"] = 8388608, ["NOTE"] = 4194304, ["WARN"] = 6291456 } }, ["file_exists"] = '<function 385>', ["format_date"] = '<function 386>', ["format_time"] = '<function 387>', ["ftypes"] = { ["ABSOLUTE_TIME"] = 15, ["AX25"] = 31, ["BOOLEAN"] = 2, ["BYTES"] = 21, ["DOUBLE"] = 14, ["ETHER"] = 20, ["EUI64"] = 30, ["FLOAT"] = 13, ["FRAMENUM"] = 26, ["GUID"] = 28, ["INT16"] = 9, ["INT24"] = 10, ["INT32"] = 11, ["INT64"] = 12, ["INT8"] = 8, ["IPXNET"] = 25, ["IPv4"] = 23, ["IPv6"] = 24, ["NONE"] = 0, ["OID"] = 29, ["PROTOCOL"] = 1, ["RELATIVE_TIME"] = 16, ["REL_OID"] = 33, ["STRING"] = 17, ["STRINGZ"] = 18, ["STRINGZPAD"] = 35, ["SYSTEM_ID"] = 34, ["UINT16"] = 4, ["UINT24"] = 5, ["UINT32"] = 6, ["UINT64"] = 7, ["UINT8"] = 3, ["UINT_BYTES"] = 22, ["UINT_STRING"] = 19, ["VINES"] = 32 }, ["get_filter"] = '<function 388>', ["get_version"] = '<function 389>', ["getmetatable"] = '<function 390>', ["gui_enabled"] = '<function 391>', ["h225_cs_type"] = { [1] = "H225_CALL_PROCEDING", [2] = "H225_CONNECT", [3] = "H225_ALERTING", [4] = "H225_INFORMATION", [5] = "H225_RELEASE_COMPLET", [6] = "H225_FACILITY", [7] = "H225_PROGRESS", [8] = "H225_EMPTY", [9] = "H225_STATUS", [10] = "H225_STATUS_INQUIRY", [11] = "H225_SETUP_ACK", [12] = "H225_NOTIFY", [13] = "H225_OTHER", [0] = "H225_SETUP" }, ["h225_msg_type"] = { [1] = "H225_CS", [2] = "H225_OTHERS", [0] = "H225_RAS" }, ["init_routines"] = {}, ["io"] = { ["close"] = '<function 393>', ["flush"] = '<function 394>', ["input"] = '<function 395>', ["lines"] = '<function 396>', ["open"] = '<function 397>', ["output"] = '<function 398>', ["popen"] = '<function 399>', ["read"] = '<function 400>', ["stderr"] = '<userdata 1>', ["stdin"] = '<userdata 2>', ["stdout"] = '<userdata 3>', ["tmpfile"] = '<function 401>', ["type"] = '<function 402>', ["write"] = '<function 403>' }, ["ipairs"] = '<function 404>', ["load"] = '<function 405>', ["loadfile"] = '<function 406>', ["loadstring"] = '<function 405>', ["math"] = { ["abs"] = '<function 407>', ["acos"] = '<function 408>', ["asin"] = '<function 409>', ["atan"] = '<function 410>', ["atan2"] = '<function 411>', ["ceil"] = '<function 412>', ["cos"] = '<function 413>', ["cosh"] = '<function 414>', ["deg"] = '<function 415>', ["exp"] = '<function 416>', ["floor"] = '<function 417>', ["fmod"] = '<function 418>', ["frexp"] = '<function 419>', ["huge"] = '<number inf>', ["ldexp"] = '<function 420>', ["log"] = '<function 421>', ["log10"] = '<function 422>', ["max"] = '<function 423>', ["min"] = '<function 424>', ["modf"] = '<function 425>', ["pi"] = 3.1415926535898, ["pow"] = '<function 426>', ["rad"] = '<function 427>', ["random"] = '<function 428>', ["randomseed"] = '<function 429>', ["sin"] = '<function 430>', ["sinh"] = '<function 431>', ["sqrt"] = '<function 432>', ["tan"] = '<function 433>', ["tanh"] = '<function 434>' }, ["module"] = '<function 436>', ["new_dialog"] = '<function 437>', ["next"] = '<function 438>', ["open_capture_file"] = '<function 439>', ["os"] = { ["clock"] = '<function 440>', ["date"] = '<function 441>', ["difftime"] = '<function 442>', ["execute"] = '<function 443>', ["exit"] = '<function 444>', ["getenv"] = '<function 445>', ["remove"] = '<function 446>', ["rename"] = '<function 447>', ["setlocale"] = '<function 448>', ["time"] = '<function 449>', ["tmpname"] = '<function 450>' }, ["package"] = { ["config"] = '<filtered>', ["cpath"] = '<filtered>', ["loaded"] = '<filtered>', ["loaders"] = { [1] = '<function 451>', [2] = '<function 452>', [3] = '<function 453>', [4] = '<function 454>' }, ["loadlib"] = '<function 455>', ["path"] = '<filtered>', ["preload"] = {}, ["prepend_path"] = '<function 456>', ["searchers"] = '<table 33>', ["searchpath"] = '<function 457>', ["seeall"] = '<function 458>' }, ["pairs"] = '<function 459>', ["pcall"] = '<function 460>', ["persconffile_path"] = '<function 37>', ["prefs_changed"] = {}, ["print"] = '<function 461>', ["rawequal"] = '<function 462>', ["rawget"] = '<function 463>', ["rawlen"] = '<function 464>', ["rawset"] = '<function 465>', ["register_filehandler"] = '<function 466>', ["register_menu"] = '<function 467>', ["register_postdissector"] = '<function 468>', ["register_stat_cmd_arg"] = '<function 469>', ["reload"] = '<function 470>', ["report_failure"] = '<function 471>', ["require"] = '<function 472>', ["retap_packets"] = '<function 473>', ["rex_pcre2"] = { ["_VERSION"] = "Lrexlib 2.9.1 (for PCRE2)" }, ["run_user_scripts_when_superuser"] = '<filtered>', ["running_superuser"] = '<filtered>', ["select"] = '<function 474>', ["set_color_filter_slot"] = '<function 475>', ["set_filter"] = '<function 476>', ["setmetatable"] = '<function 477>', ["string"] = { ["byte"] = '<function 478>', ["char"] = '<function 479>', ["dump"] = '<function 480>', ["find"] = '<function 481>', ["format"] = '<function 482>', ["gmatch"] = '<function 483>', ["gsub"] = '<function 484>', ["len"] = '<function 485>', ["lower"] = '<function 486>', ["match"] = '<function 487>', ["rep"] = '<function 488>', ["reverse"] = '<function 489>', ["sub"] = '<function 490>', ["upper"] = '<function 491>' }, ["table"] = { ["concat"] = '<function 492>', ["insert"] = '<function 493>', ["maxn"] = '<function 494>', ["pack"] = '<function 495>', ["remove"] = '<function 496>', ["sort"] = '<function 497>', ["unpack"] = '<function 498>' }, ["tonumber"] = '<function 499>', ["tostring"] = '<function 500>', ["type"] = '<function 501>', ["typeof"] = '<function 502>', ["unpack"] = '<function 498>', ["wtap"] = { ["APPLE_IP_OVER_IEEE1394"] = 62, ["ARCNET"] = 8, ["ARCNET_LINUX"] = 9, ["ASCEND"] = 16, ["ATM_PDUS"] = 13, ["ATM_PDUS_UNTRUNCATED"] = 14, ["ATM_RFC1483"] = 10, ["AX25"] = 148, ["AX25_KISS"] = 147, ["BACNET_MS_TP"] = 63, ["BACNET_MS_TP_WITH_PHDR"] = 143, ["BER"] = 90, ["BLUETOOTH_BREDR_BB"] = 160, ["BLUETOOTH_H4"] = 41, ["BLUETOOTH_H4_WITH_PHDR"] = 99, ["BLUETOOTH_HCI"] = 102, ["BLUETOOTH_LE_LL"] = 154, ["BLUETOOTH_LE_LL_WITH_PHDR"] = 161, ["BLUETOOTH_LINUX_MONITOR"] = 159, ["CAN20B"] = 109, ["CATAPULT_DCT2000"] = 89, ["CHDLC"] = 28, ["CHDLC_WITH_PHDR"] = 40, ["CISCO_IOS"] = 29, ["COSINE"] = 34, ["DBUS"] = 146, ["DOCSIS"] = 33, ["DPNSS"] = 117, ["DVBCI"] = 132, ["ENC"] = 38, ["EPON"] = 172, ["ERF"] = 98, ["ETHERNET"] = 1, ["FDDI"] = 5, ["FDDI_BITSWAPPED"] = 6, ["FIBRE_CHANNEL_FC2"] = 121, ["FIBRE_CHANNEL_FC2_WITH_FRAME_DELIMS"] = 122, ["FLEXRAY"] = 106, ["FRELAY"] = 26, ["FRELAY_WITH_PHDR"] = 27, ["GCOM_SERIAL"] = 78, ["GCOM_TIE1"] = 77, ["GPRS_LLC"] = 66, ["GSM_UM"] = 116, ["HHDLC"] = 32, ["I2C_LINUX"] = 112, ["IEEE802_15_4"] = 104, ["IEEE802_15_4_NOFCS"] = 127, ["IEEE802_15_4_NONASK_PHY"] = 113, ["IEEE802_16_MAC_CPS"] = 93, ["IEEE_802_11"] = 20, ["IEEE_802_11_AVS"] = 24, ["IEEE_802_11_NETMON"] = 126, ["IEEE_802_11_PRISM"] = 21, ["IEEE_802_11_RADIOTAP"] = 23, ["IEEE_802_11_WITH_RADIO"] = 22, ["INFINIBAND"] = 150, ["IPMB_KONTRON"] = 103, ["IPMI_TRACE"] = 173, ["IPNET"] = 124, ["IP_OVER_FC"] = 18, ["IP_OVER_IB_PCAP"] = 180, ["IP_OVER_IB_SNOOP"] = 137, ["IRDA"] = 44, ["ISDN"] = 17, ["IXVERIWAVE"] = 144, ["JPEG_JFIF"] = 123, ["JUNIPER_ATM1"] = 67, ["JUNIPER_ATM2"] = 68, ["JUNIPER_CHDLC"] = 86, ["JUNIPER_ETHER"] = 83, ["JUNIPER_FRELAY"] = 85, ["JUNIPER_GGSN"] = 87, ["JUNIPER_MLFR"] = 82, ["JUNIPER_MLPPP"] = 81, ["JUNIPER_PPP"] = 84, ["JUNIPER_PPPOE"] = 76, ["JUNIPER_SVCS"] = 151, ["JUNIPER_VP"] = 91, ["K12"] = 80, ["LAPB"] = 12, ["LAPD"] = 131, ["LAYER1_EVENT"] = 110, ["LIN"] = 107, ["LINUX_ATM_CLIP"] = 11, ["LINUX_LAPD"] = 88, ["LOCALTALK"] = 30, ["LOGCAT"] = 163, ["LOGCAT_BRIEF"] = 164, ["LOGCAT_LONG"] = 170, ["LOGCAT_PROCESS"] = 165, ["LOGCAT_TAG"] = 166, ["LOGCAT_THREAD"] = 167, ["LOGCAT_THREADTIME"] = 169, ["LOGCAT_TIME"] = 168, ["MIME"] = 134, ["MOST"] = 108, ["MPEG"] = 96, ["MPEG_2_TS"] = 138, ["MTP2"] = 42, ["MTP2_WITH_PHDR"] = 75, ["MTP3"] = 43, ["MUX27010"] = 133, ["NETANALYZER"] = 135, ["NETANALYZER_TRANSPARENT"] = 136, ["NETLINK"] = 158, ["NETTL_ETHERNET"] = 71, ["NETTL_FDDI"] = 73, ["NETTL_RAW_ICMP"] = 64, ["NETTL_RAW_ICMPV6"] = 65, ["NETTL_RAW_IP"] = 70, ["NETTL_RAW_TELNET"] = 94, ["NETTL_TOKEN_RING"] = 72, ["NETTL_UNKNOWN"] = 74, ["NETTL_X25"] = 79, ["NFC_LLCP"] = 140, ["NFLOG"] = 141, ["NSTRACE_1_0"] = 119, ["NSTRACE_2_0"] = 120, ["NSTRACE_3_0"] = 162, ["NULL"] = 15, ["OLD_PFLOG"] = 31, ["PACKETLOGGER"] = 118, ["PER_PACKET"] = -1, ["PFLOG"] = 39, ["PKTAP"] = 171, ["PPI"] = 97, ["PPP"] = 4, ["PPP_ETHER"] = 139, ["PPP_WITH_PHDR"] = 19, ["RAW_IP"] = 7, ["RAW_IP4"] = 129, ["RAW_IP6"] = 130, ["RAW_IPFIX"] = 128, ["REDBACK"] = 69, ["RTAC_SERIAL"] = 153, ["SCCP"] = 101, ["SCTP"] = 149, ["SDH"] = 145, ["SDLC"] = 36, ["SITA"] = 100, ["SLIP"] = 3, ["SLL"] = 25, ["SOCKETCAN"] = 125, ["STANAG_4607"] = 156, ["STANAG_5066_D_PDU"] = 157, ["SYMANTEC"] = 61, ["TNEF"] = 114, ["TOKEN_RING"] = 2, ["TZSP"] = 37, ["UNKNOWN"] = 0, ["USB_FREEBSD"] = 92, ["USBPCAP"] = 152, ["USB_LINUX"] = 95, ["USB_LINUX_MMAPPED"] = 115, ["USER0"] = 45, ["USER1"] = 46, ["USER10"] = 55, ["USER11"] = 56, ["USER12"] = 57, ["USER13"] = 58, ["USER14"] = 59, ["USER15"] = 60, ["USER2"] = 47, ["USER3"] = 48, ["USER4"] = 49, ["USER5"] = 50, ["USER6"] = 51, ["USER7"] = 52, ["USER8"] = 53, ["USER9"] = 54, ["V5_EF"] = 142, ["WFLEET_HDLC"] = 35, ["WIRESHARK_UPPER_PDU"] = 156, ["X2E_SERIAL"] = 111, ["X2E_XORAYA"] = 105 }, ["wtap_comments"] = { ["PER_INTERFACE"] = 2, ["PER_PACKET"] = 4, ["PER_SECTION"] = 1 }, ["wtap_encaps"] = '<table 36>', ["wtap_filetypes"] = { ["5VIEWS"] = 9, ["AETHRA"] = 60, ["ASCEND"] = 26, ["BER"] = 12, ["BTSNOOP"] = 50, ["CAMINS"] = 64, ["CATAPULT_DCT2000"] = 14, ["COMMVIEW"] = 49, ["COSINE"] = 17, ["CSIDS"] = 18, ["DAINTREE_SNA"] = 54, ["DBS_ETHERWATCH"] = 19, ["DCT3TRACE"] = 52, ["ERF"] = 20, ["EYESDN"] = 21, ["HCIDUMP"] = 13, ["I4BTRACE"] = 25, ["IPFIX"] = 58, ["IPTRACE_1_0"] = 10, ["IPTRACE_2_0"] = 11, ["ISERIES"] = 23, ["ISERIES_UNICODE"] = 24, ["JPEG_JFIF"] = 57, ["K12"] = 40, ["K12TEXT"] = 47, ["LANALYZER"] = 34, ["LOGCAT"] = 67, ["LOGCAT_BRIEF"] = 68, ["LOGCAT_LONG"] = 74, ["LOGCAT_PROCESS"] = 69, ["LOGCAT_TAG"] = 70, ["LOGCAT_THREAD"] = 71, ["LOGCAT_THREADTIME"] = 73, ["LOGCAT_TIME"] = 72, ["MIME"] = 59, ["MPEG"] = 46, ["MPEG_2_TS"] = 61, ["NETSCALER_1_0"] = 55, ["NETSCALER_2_0"] = 56, ["NETSCALER_3_0"] = 66, ["NETSCREEN"] = 48, ["NETTL"] = 22, ["NETWORK_INSTRUMENTS"] = 33, ["NETXRAY_1_0"] = 16, ["NETXRAY_1_1"] = 31, ["NETXRAY_OLD"] = 15, ["NGSNIFFER_COMPRESSED"] = 30, ["NGSNIFFER_UNCOMPRESSED"] = 29, ["PACKETLOGGER"] = 53, ["PCAP"] = 1, ["PCAPNG"] = 2, ["PCAP_AIX"] = 4, ["PCAP_NOKIA"] = 6, ["PCAP_NSEC"] = 3, ["PCAP_SS990417"] = 7, ["PCAP_SS990915"] = 8, ["PCAP_SS991029"] = 5, ["PEEKCLASSIC_V56"] = 43, ["PEEKCLASSIC_V7"] = 44, ["PEEKTAGGED"] = 45, ["PPPDUMP"] = 35, ["RADCOM"] = 36, ["SHOMITI"] = 38, ["SNOOP"] = 37, ["STANAG_4607"] = 65, ["TNEF"] = 51, ["TOSHIBA"] = 41, ["TSPREC_CSEC"] = 2, ["TSPREC_DSEC"] = 1, ["TSPREC_MSEC"] = 3, ["TSPREC_NSEC"] = 9, ["TSPREC_SEC"] = 0, ["TSPREC_USEC"] = 6, ["UNKNOWN"] = 0, ["VISUAL_NETWORKS"] = 42, ["VMS"] = 39, ["VWR_80211"] = 62, ["VWR_ETH"] = 63 }, ["wtap_presence_flags"] = { ["CAP_LEN"] = 2, ["INTERFACE_ID"] = 4, ["TS"] = 1 }, ["wtap_rec_types"] = { ["FT_SPECIFIC_EVENT"] = 1, ["FT_SPECIFIC_REPORT"] = 2, ["PACKET"] = 0 }, ["xpcall"] = '<function 504>' }
Lua
wireshark/test/lua/inspect.lua
------------------------------------------------------------------- -- This was changed for Wireshark's use by Hadriel Kaplan. -- -- Changes made: -- * provided 'serialize' option to output serialized info (ie, can be marshaled), -- though note that serializing functions/metatables/userdata/threads will not -- magically make them be their original type when marshaled. -- * provided 'notostring' option, which if true will disabled calling __tostring -- metamethod of tables. -- * made it always print the index number of numbered-array entries, and on separate -- lines like the normal key'd entries (much easier to read this way I think) -- New public functions: -- inspect.compare(first,second[,options]) -- inspect.marshal(inString[,options]) -- inspect.makeFilter(arrayTable) -- -- For the *changes*: -- Copyright (c) 2014, Hadriel Kaplan -- My change to the code is in the Public Domain, or the BSD (3 clause) license if -- Public Domain does not apply in your country, or you would prefer a BSD license. -- But the original code is still under Enrique García Cota's MIT license (below). ------------------------------------------------------------------- local inspect ={ _VERSION = 'inspect.lua 2.0.0 - with changes', _URL = 'http://github.com/kikito/inspect.lua', _DESCRIPTION = 'human-readable representations of tables', _LICENSE = [[ MIT LICENSE Copyright (c) 2013 Enrique García Cota Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ]], _TINDEX_KEY = '<index>', -- the key name to use for index number entries for tables _DEPTH_MARKER = " ['<depth>'] = true " -- instead of printing '...' we print this } -- Apostrophizes the string if it has quotes, but not apostrophes -- Otherwise, it returns a regular quoted string local function smartQuote(str) if str:match('"') and not str:match("'") then return "'" .. str .. "'" end return '"' .. str:gsub('"', '\\"') .. '"' end local controlCharsTranslation = { ["\a"] = "\\a", ["\b"] = "\\b", ["\f"] = "\\f", ["\n"] = "\\n", ["\r"] = "\\r", ["\t"] = "\\t", ["\v"] = "\\v" } local function escapeChar(c) return controlCharsTranslation[c] end local function escape(str) local result = str:gsub("\\", "\\\\"):gsub("(%c)", escapeChar) return result end local function isIdentifier(str) return type(str) == 'string' and str:match( "^[_%a][_%a%d]*$" ) end local function isArrayKey(k, length) return type(k) == 'number' and 1 <= k and k <= length end local function isDictionaryKey(k, length) return not isArrayKey(k, length) end local defaultTypeOrders = { ['number'] = 1, ['boolean'] = 2, ['string'] = 3, ['table'] = 4, ['function'] = 5, ['userdata'] = 6, ['thread'] = 7 } local function sortKeys(a, b) local ta, tb = type(a), type(b) -- strings and numbers are sorted numerically/alphabetically if ta == tb and (ta == 'string' or ta == 'number') then return a < b end local dta, dtb = defaultTypeOrders[ta], defaultTypeOrders[tb] -- Two default types are compared according to the defaultTypeOrders table if dta and dtb then return defaultTypeOrders[ta] < defaultTypeOrders[tb] elseif dta then return true -- default types before custom ones elseif dtb then return false -- custom types after default ones end -- custom types are sorted out alphabetically return ta < tb end local function getDictionaryKeys(t) local keys, length = {}, #t for k,_ in pairs(t) do if isDictionaryKey(k, length) then table.insert(keys, k) end end table.sort(keys, sortKeys) return keys end local function getToStringResultSafely(t, mt) local __tostring = type(mt) == 'table' and rawget(mt, '__tostring') local str, ok if type(__tostring) == 'function' then ok, str = pcall(__tostring, t) str = ok and str or 'error: ' .. tostring(str) end if type(str) == 'string' and #str > 0 then return str end end local maxIdsMetaTable = { __index = function(self, typeName) rawset(self, typeName, 0) return 0 end } local idsMetaTable = { __index = function (self, typeName) local col = setmetatable({}, {__mode = "kv"}) rawset(self, typeName, col) return col end } local function countTableAppearances(t, tableAppearances) tableAppearances = tableAppearances or setmetatable({}, {__mode = "k"}) if type(t) == 'table' then if not tableAppearances[t] then tableAppearances[t] = 1 for k,v in pairs(t) do countTableAppearances(k, tableAppearances) countTableAppearances(v, tableAppearances) end countTableAppearances(getmetatable(t), tableAppearances) else tableAppearances[t] = tableAppearances[t] + 1 end end return tableAppearances end local function parse_filter(filter) if type(filter) == 'function' then return filter end -- not a function, so it must be a table or table-like filter = type(filter) == 'table' and filter or {filter} local dictionary = {} for _,v in pairs(filter) do dictionary[v] = true end return function(x) return dictionary[x] end end local function makePath(path, key) local newPath, len = {}, #path for i=1, len do newPath[i] = path[i] end newPath[len+1] = key return newPath end ------------------------------------------------------------------- function inspect.inspect(rootObject, options) options = options or {} local depth = options.depth or math.huge local filter = parse_filter(options.filter or {}) local serialize = options.serialize local depth_marker = inspect._DEPTH_MARKER local tableAppearances = countTableAppearances(rootObject) local buffer = {} local maxIds = setmetatable({}, maxIdsMetaTable) local ids = setmetatable({}, idsMetaTable) local level = 0 local blen = 0 -- buffer length local function puts(...) local args = {...} for i=1, #args do blen = blen + 1 buffer[blen] = tostring(args[i]) end end -- like puts above, but for things we want as quoted strings -- so they become values, as we do if serializing local function putv(...) blen = blen + 1 buffer[blen] = "'" puts(...) blen = blen + 1 buffer[blen] = "'" end -- if serializing, using raw strings is unsafe, so we use the full "['key']" style local function putk(...) blen = blen + 1 buffer[blen] = "['" puts(...) blen = blen + 1 buffer[blen] = "']" end -- if not serializing, it's all puts if not serialize then putv = puts putk = puts depth_marker = '...' end -- disable using __tostring metamethod local getToStringResultSafely = getToStringResultSafely if options.notostring or serialize then getToStringResultSafely = function() return end end local function down(f) level = level + 1 f() level = level - 1 end local function tabify() puts("\n", string.rep(" ", level)) end local function commaControl(needsComma) if needsComma then puts(',') end return true end local function alreadyVisited(v) return ids[type(v)][v] ~= nil end local function getId(v) local tv = type(v) local id = ids[tv][v] if not id then id = maxIds[tv] + 1 maxIds[tv] = id ids[tv][v] = id end return id end local putValue -- forward declaration that needs to go before putTable & putKey local function putKey(k) if not serialize and isIdentifier(k) then return puts(k) end puts("[") putValue(k, {}) puts("]") end local function putTable(t, path) if alreadyVisited(t) then putv('<table ', getId(t), '>') elseif level >= depth then puts('{', depth_marker, '}') else if not serialize and tableAppearances[t] > 1 then puts('<', getId(t), '>') end local dictKeys = getDictionaryKeys(t) local length = #t local mt = getmetatable(t) local to_string_result = getToStringResultSafely(t, mt) puts('{') down(function() if to_string_result then puts(' -- ', escape(to_string_result)) if length >= 1 then tabify() end -- tabify the array values end local needsComma = false if serialize and tableAppearances[t] > 1 then getId(t) end for i=1, length do needsComma = commaControl(needsComma) -- just doing puts(' ') made for ugly arrays tabify() putKey(i) puts(' = ') putValue(t[i], makePath(path, i)) end for _,k in ipairs(dictKeys) do needsComma = commaControl(needsComma) tabify() putKey(k) puts(' = ') putValue(t[k], makePath(path, k)) end if mt then needsComma = commaControl(needsComma) tabify() putk('<metatable>') puts(' = ') putValue(mt, makePath(path, '<metatable>')) end end) if #dictKeys > 0 or mt then -- dictionary table. Justify closing } tabify() elseif length > 0 then -- array tables have one extra space before closing } puts(' ') end puts('}') end end -- putvalue is forward-declared before putTable & putKey putValue = function(v, path) if filter(v, path) then putv('<filtered>') else local tv = type(v) if tv == 'string' then puts(smartQuote(escape(v))) elseif tv == 'number' and v == math.huge then putv('<number inf>') elseif tv == 'number' or tv == 'boolean' or tv == 'nil' then puts(tostring(v)) elseif tv == 'table' then putTable(v, path) else putv('<',tv,' ',getId(v),'>') end end end putValue(rootObject, {}) return table.concat(buffer) end setmetatable(inspect, { __call = function(_, ...) return inspect.inspect(...) end }) ------------------------------------------------------------------- -- The above is very close to Enrique's original inspect library. -- Below are my main changes. ------------------------------------------------------------------- -- Given a string generated by inspect() with the serialize option, -- this function marshals it back into a Lua table/whatever. -- If the string's table(s) had metatable(s), i.e. "<metatable>" tables, -- then this keeps them as "<metatable>" subtables unless the option -- 'nometa' is set to true. -- -- This function also removes all "<index>" entries. -- function inspect.marshal(inString, options) options = options or {} local index = inspect._TINDEX_KEY local function removeIndex(t) if type(t) == 'table' then t[index] = nil for _, v in pairs(t) do removeIndex(v) end end end local function removeMeta(t) if type(t) == 'table' then t['<metatable>'] = nil for _, v in pairs(t) do removeMeta(v) end end end -- first skip past comments/empty-lines -- warning: super-hack-ish weak local pos, ok, dk = 1, true, true local fin local stop = string.len(inString) while ok or dk do ok, fin = inString:find("^[%s\r\n]+",pos) if ok then pos = fin + 1 end dk, fin = inString:find("^%-%-.-\n",pos) if dk then pos = fin + 1 end end if not inString:find("^%s*return[%s%{]",pos) then inString = "return " .. inString end local t = assert(loadstring(inString))() removeIndex(t) if options.nometa then removeMeta(t) end return t end ------------------------------------------------------------------- ------------------------------------------------------------------- -- more private functions -- things like '<function>' are equal to '<function 32>' local mungetypes = { {"^<function ?%d*>", '<function>'}, {"^<table ?%d*>", '<table>'}, {"^<userdata ?%d*>", '<userdata>'}, {"^<thread ?%d*>", '<thread>'} } local function normalizeString(s) for _,t in ipairs(mungetypes) do if s:find(t[1]) then return t[2] end end return s end local typetable = { ['<function>'] = 'function', ['<table>'] = 'table', ['<userdata>'] = 'userdata', ['<thread>'] = 'thread' } local function getType(v) local tv = type(v) if tv == 'string' then tv = typetable[normalizeString(v)] or 'string' end return tv end local function tablelength(t) local count = 0 for _ in pairs(t) do count = count + 1 end return count end -- for pretty-printing paths, for debug output -- this is non-optimal, but only gets used in verbose mode anyway local function serializePath(path) local t = {} for i,k in ipairs(path) do local tk = type(k) if isIdentifier(k) then t[i] = ((i == 1) and k) or ('.'..k) elseif tk == 'string' then t[i] = '[' .. smartQuote(escape(k)) .. ']' elseif tk == 'number' or tk == 'boolean' then t[i] = '[' .. tostring(k) .. ']' else t[i] = "['<" .. tk .. ">']" end end if #t == 0 then t[1] = '{}' end return table.concat(t) end ------------------------------------------------------------------- ------------------------------------------------------------------- -- Given one table and another, this function detects if the first is -- completely contained in the second object. The second can have more -- entries, but cannot be missing an entry in the first one. Entry values -- must match as well - i.e., string values are the same, numbers the -- same, booleans the same. -- -- The function returns true if the first is in the second, false otherwise. -- It also returns a table of the diff, which will be empty if they matched. -- This returned table is structured like the first one passed in, -- so calling print(inspect(returnedTabled)) will make it pretty print. -- -- The returned table's members have their values replaced with mismatch -- information, explaining what the mismatch was. Setting the option "keep" -- makes it not replace the values, but keep them as they were in the first -- table. -- -- By default, the key's values must match in both tables. If the option -- 'nonumber' is set, then number values are not compared. This is useful -- if they're things that can change (like exported C-code numbers). -- -- By default, the metatables/"<metatables>" are also compared. If the option -- 'nometa' is set, then metatables are not compared, nor does it matter if -- they exist in either table. -- -- Like inspect(), there's a 'filter' option, which works the same way: -- it ignores its value completely in terms of matching, so their string values -- can be different, but the keys still have to exist. Sub-tables of -- such keys (i.e., if the key's value is a table) are not checked/compared. -- In other words, it's identical to the filter option for inspect(). -- -- The option 'ignore' is similar to 'filter', except matching ones -- are not checked for existence in the tables at all. -- -- Setting the 'depth' option applies as in inspect(), to both tables. -- -- Setting the option 'verbose' makes it print out as it compares, for -- debugging or test purposes. -- function inspect.compare(firstTable, secondTable, options) options = options or {} local depth = options.depth or math.huge local filter = parse_filter(options.filter or {}) local ignore = parse_filter(options.ignore or {}) local function puts(...) local args = {...} for i=1, #args do blen = blen + 1 buffer[blen] = tostring(args[i]) end end -- for debug printing local function dprint(...) local args = {...} print(table.concat(args)) end local serializePath = serializePath if not options.verbose then dprint = function() return end serializePath = function() return end end -- for error message replacing key value local function emsg(...) local args = {...} return(table.concat(args)) end if options.keep then emsg = function() return end end -- declare checkValue here local checkValue local function checkTable(f, s, path) dprint("checking ",serializePath(path)," table contents") for k, v in pairs(f) do local child = makePath(path, k) if not ignore(v,child) then local ret, msg = checkValue(v, s[k], child) if ret then f[k] = nil elseif msg then f[k] = msg dprint(serializePath(child)," ",msg) end else dprint("ignoring ",serializePath(child)) f[k] = nil end end return tablelength(f) == 0 end -- a wrapper for failure cases in checkValue() that can be handled the same way local function compCheck(f,s,func) if not func() then return false, emsg("mismatched ",getType(f)," values: ",tostring(f)," --> ",tostring(s)) end return true end -- kinda ugly, but I wanted pretty information output checkValue = function(f, s, path) local tf = getType(f) dprint("checking ",serializePath(path)," (",tf,")") if s == nil then return false, emsg("missing ",tf,"!") elseif tf ~= getType(s) then return false, emsg("type mismatch (",tf,") --> (",getType(s),")") elseif type(f) == 'table' then return checkTable(f, s, path) end return compCheck(f,s,function() if tf == 'string' or tf == 'boolean' then return f == s elseif tf == 'number' then return f == s or options.nonumber else -- assume they're the same functions/userdata/looped-table -- type matching before would already cover it otherwise return true end end) end -- inspect+serialize both tables, to normalize them, separate their -- metatables, limit depth, etc. Also, since we pass the filter option on, -- the filtered items become "<filtered>" and will by definition match local function normalizeTable(t) return assert( inspect.marshal( inspect.inspect(t,{serialize=true,depth=depth,filter=filter}), {nometa=options.nometa} )) end local first = normalizeTable(firstTable) local second = normalizeTable(secondTable) return checkTable(first, second, {}), first end ------------------------------------------------------------------- ------------------------------------------------------------------- -- Given a table of key strings, return a function that can be used for -- the 'filter' option of inspect() and inspect.compare() functions. function inspect.makeFilter(arrayTable) local filter = {} -- our filter lookup tree (tables of tables) local matchNode = {} -- a table instance we use as a key for nodes which match local wildcard = {} -- a key table of wildcard match names local function buildFilter(pathname) local t = filter local key -- if the filtered name starts with a '.', it's a wildcard if pathname:find("^%.") then wildcard[pathname:sub(2)] = true return end for sep, name in pathname:gmatch("([%.%[\"\']*)([^%.%[\"\'%]]+)[\"\'%]]?") do if sep == '[' then if name == 'true' then key = true elseif name == 'false' then key = false else key = tonumber(name) end else -- to be safe, we'll check the key name doesn't mean a table/function/userdata local tn = getType(name) if tn == 'string' then key = name else error("filter key '"..pathname.."' has key '"..name.."' which is an unsupported type ("..tn..")") end end if not t[key] then t[key] = {} end t = t[key] end t[matchNode] = true end -- we could call serializePath() and do a simple lookup, but it's expensive and -- we'd be calling it a LOT. So instead we break up the filter -- table into true "path" elements, into a filter tree, and compare -- against it... thereby avoiding string concat/manip during compare. for _, pathname in ipairs(arrayTable) do buildFilter(pathname) end return function(value,path) local t = filter if wildcard[ path[#path] ] then return true end for _,v in ipairs(path) do if not t[v] then return false end t = t[v] end return t[matchNode] == true end end return inspect
Lua
wireshark/test/lua/int64.lua
-- This is a test script for tshark/wireshark. -- This script runs inside tshark/wireshark, so to run it do: -- wireshark -X lua_script:<path_to_testdir>/lua/int64.lua -- tshark -r bogus.cap -X lua_script:<path_to_testdir>/lua/int64.lua -- Tests Int64/UInt64 functions local testlib = require("testlib") local OTHER = "other" testlib.init( { [OTHER] = 23 } ) -- you can't compare (use the '==') userdata objects with numbers, so this function does it instead. function checkeq(arg1,arg2) if arg1 == arg2 then return true elseif type(arg1) == 'userdata' and arg1.tonumber then if type(arg2) == 'userdata' and arg2.tonumber then return arg1:tonumber() == arg2:tonumber() else return arg1:tonumber() == arg2 end elseif type(arg2) == 'userdata' and arg2.tonumber then return arg1 == arg2:tonumber() else return false end end ----------------------------- testlib.testing("Int64/UInt64 library") local testtbl = { { ["type"]=Int64, ["name"]="Int64" } , { ["type"]=UInt64, ["name"]="UInt64" }, } for i,t in ipairs(testtbl) do testlib.init( { [t.name] = 125+(t.name == "Int64" and 3 or 0) } ) testlib.testing(t.name, "class") local obj = t.type for name, val in pairs(obj) do print("\t"..name.." = "..type(val)) end testlib.test(t.name,"class1",type(obj) == 'table') testlib.test(t.name,"class2",type(obj.new) == 'function') testlib.test(t.name,"class3",type(obj.max) == 'function') testlib.test(t.name,"class4",type(obj.min) == 'function') testlib.test(t.name,"class5",type(obj.tonumber) == 'function') testlib.test(t.name,"class6",type(obj.fromhex) == 'function') testlib.test(t.name,"class7",type(obj.tohex) == 'function') testlib.test(t.name,"class8",type(obj.higher) == 'function') testlib.test(t.name,"class9",type(obj.lower) == 'function') testlib.testing(t.name, "new, tonumber, tostring") local val = 12345 local my64a = obj.new(val) local my64b = obj.new(tostring(val)) local zero = obj.new(0) -- remember in Lua it's a double, so only precise up to 9,007,199,254,740,992 local my64c = obj.new(val,100) local valc = (100 * 4294967296) + val print(tostring(my64c)) local my64z = obj.new(0,0) local my64d = obj.new(0,100) local vald = (100 * 4294967296) testlib.test(t.name,"new1",checkeq(my64a,val)) testlib.test(t.name,"new2",checkeq(my64b,val)) testlib.test(t.name,"new3",checkeq(my64a,obj.new(my64b))) testlib.test(t.name,"new3b",checkeq(my64a,obj(my64b))) testlib.test(t.name,"new4",checkeq(valc,my64c)) testlib.test(t.name,"new5",checkeq(0,my64z)) testlib.test(t.name,"new6",obj.new(0,1):tonumber() == (2^32)) if t.name == "Int64" then testlib.test(t.name,"new7",obj(-1):tonumber() == -1) testlib.test(t.name,"new8",obj.new(0,-1):tonumber() == -4294967296) testlib.test(t.name,"new9",obj(obj.new(-1)):tonumber() == -1) end testlib.test(t.name,"tonumber1",val == my64a:tonumber()) testlib.test(t.name,"tonumber2",valc == my64c:tonumber()) testlib.test(t.name,"tonumber3",vald == my64d:tonumber()) testlib.test(t.name,"tonumber4",0 == my64z:tonumber()) testlib.test(t.name,"tostring1", tostring(my64a)==tostring(val)) testlib.test(t.name,"tostring2",tostring(my64b)==tostring(val)) testlib.test(t.name,"tostring3",tostring(my64c)==tostring(valc)) testlib.test(t.name,"tostring4",tostring(my64d)==tostring(vald)) testlib.testing(t.name, "compare ops") testlib.test(t.name,"eq", my64a == my64b) testlib.test(t.name,"le1", my64a <= my64b) testlib.test(t.name,"le2", my64a <= my64c) testlib.test(t.name,"le3", my64z <= my64c) testlib.test(t.name,"ge1", my64a >= my64b) testlib.test(t.name,"ge2", my64c >= my64b) testlib.test(t.name,"ge2", my64c >= my64z) testlib.test(t.name,"neq1",not(my64a ~= my64b)) testlib.test(t.name,"neq2",my64a ~= obj(0)) testlib.test(t.name,"neq2",my64a ~= my64c) testlib.test(t.name,"gt1",my64a > my64z) testlib.test(t.name,"gt2",my64c > my64a) testlib.test(t.name,"lt1",not(my64a < my64b)) testlib.test(t.name,"lt2",my64a < my64c) testlib.testing(t.name, "math ops") testlib.test(t.name,"add1",checkeq(my64a + my64b, val + val)) testlib.test(t.name,"add2",my64a + my64z == my64b) testlib.test(t.name,"add3",my64a + my64b == my64b + my64a) testlib.test(t.name,"add4",my64d + my64a == my64c) testlib.test(t.name,"add5",checkeq(my64a + vald, valc)) testlib.test(t.name,"add6",checkeq(vald + my64a, valc)) testlib.test(t.name,"sub1",checkeq(my64a - my64b, 0)) testlib.test(t.name,"sub2",my64a - my64b == my64z) testlib.test(t.name,"sub3",my64a - my64b == my64b - my64a) testlib.test(t.name,"sub4",my64c - my64a == my64d) testlib.test(t.name,"sub5",checkeq(my64a - val, 0)) testlib.test(t.name,"mod1",checkeq(my64a % my64b, 0)) testlib.test(t.name,"mod2",checkeq(my64c % my64b, valc % val)) testlib.test(t.name,"mod3",checkeq(my64c % val, valc % val)) testlib.test(t.name,"mod4",checkeq(val % my64c, val % valc)) testlib.test(t.name,"div1",checkeq(my64a / my64b, 1)) testlib.test(t.name,"div2",checkeq(my64a / val, 1)) testlib.test(t.name,"div3",checkeq(val / my64a, 1)) testlib.test(t.name,"div4",my64c / my64d == obj.new(1)) testlib.test(t.name,"pow1",checkeq(my64a ^ 1, val)) testlib.test(t.name,"pow2",checkeq(my64a ^ obj.new(2), val ^ 2)) testlib.test(t.name,"pow3",checkeq(my64a ^ obj.new(3), val ^ 3)) testlib.test(t.name,"pow4",checkeq(my64c ^ 1, valc ^ 1)) testlib.test(t.name,"mul1",checkeq(my64a * obj(1), my64b)) testlib.test(t.name,"mul2",checkeq(my64a * my64b, my64b * my64a)) testlib.test(t.name,"mul3",checkeq(my64a * 1, my64b)) testlib.test(t.name,"mul4",checkeq(2 * my64c, 2 * valc)) if t.name == "Int64" then -- unary minus on UInt64 is illogical, but oh well testlib.test(t.name,"unm1",checkeq(-my64a,-val)) testlib.test(t.name,"unm2",checkeq(string.sub(tostring(-my64a),1,1), "-")) testlib.test(t.name,"unm3",checkeq(-my64c,-valc)) else testlib.test(t.name,"unm1",checkeq(-my64a,val)) testlib.test(t.name,"unm2",checkeq(string.sub(tostring(-my64a),1,1), "1")) testlib.test(t.name,"unm3",checkeq(-my64c,valc)) end testlib.test(t.name,"unm4",checkeq(-my64z,0)) testlib.testing(t.name, "methods") testlib.test(t.name,"higher1",my64a:higher() == 0) testlib.test(t.name,"higher2",my64c:higher() == 100) testlib.test(t.name,"lower1",my64a:lower() == val) testlib.test(t.name,"lower2",my64c:lower() == val) testlib.test(t.name,"lower3",my64d:lower() == 0) local vale1 = 3735928559 -- yields hex of deadbeef local vale2 = 5045997 -- yields 4cfeed local my64e = obj.new(vale1, vale2) testlib.test(t.name,"fromhex1",obj.fromhex("0000000000003039") == my64a); testlib.test(t.name,"fromhex2",obj.fromhex("3039") == my64a); testlib.test(t.name,"fromhex3",obj.fromhex("0000006400003039") == my64c); testlib.test(t.name,"fromhex4",obj.fromhex("0000000000000000") == my64z); testlib.test(t.name,"fromhex5",obj.fromhex("004cfeeddeadbeef") == my64e); testlib.test(t.name,"fromhex6",obj.fromhex("4cFEEDDEADBEEF") == my64e); testlib.test(t.name,"tohex1",my64a:tohex() == "0000000000003039") testlib.test(t.name,"tohex2",my64c:tohex(16) == "0000006400003039") testlib.test(t.name,"tohex3",my64z:tohex() == "0000000000000000") testlib.test(t.name,"tohex4",my64e:tohex() == "004cfeeddeadbeef") testlib.test(t.name,"tohex5",my64e:tohex(8) == "deadbeef") testlib.test(t.name,"tohex6",my64e:tohex(-8) == "DEADBEEF") testlib.test(t.name,"encode1",my64a:encode(true) == "\57\48\00\00\00\00\00\00") testlib.test(t.name,"encode2",my64a:encode(false) == "\00\00\00\00\00\00\48\57") testlib.test(t.name,"encode3",my64c:encode(false) == "\00\00\00\100\00\00\48\57") testlib.test(t.name,"decode1",obj.decode("\57\48\00\00\00\00\00\00", true) == my64a) testlib.test(t.name,"decode2",obj.decode("\00\00\00\00\00\00\48\57", false) == my64a) testlib.test(t.name,"decode3",obj.decode("\00\00\00\100\00\00\48\57", false) == my64c) local function testpower(b) testlib.testing(t.name, "powers of "..b) b=obj.new(b) local z=obj.new(1) for i=0,100 do print(i,z,b^i) assert(z==b^i) z=b*z end end testpower(2) testpower(3) testlib.testing(t.name, "factorials") F={ [1]="1", [2]="2", [3]="6", [4]="24", [5]="120", [6]="720", [7]="5040", [8]="40320", [9]="362880", [10]="3628800", [11]="39916800", [12]="479001600", [13]="6227020800", [14]="87178291200", [15]="1307674368000", [16]="20922789888000", [17]="355687428096000", [18]="6402373705728000", [19]="121645100408832000", [20]="2432902008176640000", } z=obj.new(1) f=1 for i=1,20 do z=z*i f=f*i s=obj.tonumber(z) print(i,z,f,f==obj.tonumber(z),tostring(z)==F[i]) --print(i,int64.new(F[i])) end testlib.testing(t.name, "bit operations") testlib.test(t.name,"band1",checkeq(obj(1):band(1), 1)) testlib.test(t.name,"band2",checkeq(obj(1):band(0), 0)) testlib.test(t.name,"band3",checkeq(obj(4294967295,100):band(4294967295), 4294967295)) testlib.test(t.name,"band4",obj.new(4294967295,100):band(obj(0,100),obj(0,100),obj(0,100)) == obj(0,100)) testlib.test(t.name,"band5",checkeq(obj.new(4294967295,100):band(obj.new(0,100),obj(0)), 0)) testlib.test(t.name,"bor1",checkeq(obj(1):bor(1), 1)) testlib.test(t.name,"bor2",checkeq(obj(1):bor(0), 1)) testlib.test(t.name,"bor3",checkeq(obj(0):bor(0), 0)) testlib.test(t.name,"bor4",obj.new(0,100):bor(4294967295) == obj.new(4294967295,100)) testlib.test(t.name,"bor5",obj.new(1):bor(obj(2),obj.new(4),obj(8),16,32,64,128) == obj(255)) testlib.test(t.name,"bxor1",checkeq(obj.new(1):bxor(1), 0)) testlib.test(t.name,"bxor2",checkeq(obj.new(1):bxor(0), 1)) testlib.test(t.name,"bxor3",checkeq(obj.new(0):bxor(0), 0)) testlib.test(t.name,"bxor4",obj.new(4294967295,100):bxor(obj(0,100)) == obj.new(4294967295)) testlib.test(t.name,"bxor5",obj.new(1):bxor(obj(2),obj(4),obj(8),16,32,64,128) == obj(255)) testlib.test(t.name,"bnot1",checkeq(obj.new(4294967295,4294967295):bnot(), 0)) testlib.test(t.name,"bnot2",obj.new(0):bnot() == obj.new(4294967295,4294967295)) testlib.test(t.name,"bnot3",obj.new(0xaaaaaaaa,0xaaaaaaaa):bnot() == obj.new( 0x55555555, 0x55555555)) testlib.test(t.name,"bsawp1",obj.new( 0x01020304, 0x05060708 ):bswap() == obj.new( 0x08070605, 0x04030201 )) testlib.test(t.name,"bsawp2",obj.new( 0xFF020304, 0xFF060708 ):bswap() == obj.new( 0x080706FF, 0x040302FF )) testlib.test(t.name,"lshift1",obj.new( 0x01020304, 0x0506070F ):lshift(4) == obj.new( 0x10203040, 0x506070f0 )) testlib.test(t.name,"lshift2",obj.new( 0x0102030F, 0x05060708 ):lshift(63) == obj.new( 0, 0x80000000 )) if t.name == "Int64" then testlib.test(t.name,"lshift3",checkeq(obj.new( 0x0102030F, 0x05060708 ):lshift(63), -9223372036854775808)) else testlib.test(t.name,"lshift3",obj.new( 0x0102030F, 0x05060708 ):lshift(63) == obj.new( 0, 0x80000000 )) end testlib.test(t.name,"rshift1",obj.new( 0x01020304, 0xF5060708 ):rshift(4) == obj.new( 0x80102030, 0x0F506070 )) testlib.test(t.name,"rshift2",checkeq(obj.new( 0x01020304, 0xF5060708 ):rshift(63), 1)) if t.name == "Int64" then testlib.test(t.name,"arshift1",obj.new( 0x01020304, 0xF5060708 ):arshift(4) == obj.new( 0x80102030, 0xFF506070 )) testlib.test(t.name,"arshift2",obj.new( 0x01020304, 0xF5060708 ):arshift(63) == obj.new( 0xFFFFFFFF, 0xFFFFFFFF )) else testlib.test(t.name,"arshift1",obj.new( 0x01020304, 0xF5060708 ):arshift(4) == obj.new( 0x80102030, 0x0F506070 )) testlib.test(t.name,"arshift2",checkeq(obj.new( 0x01020304, 0xF5060708 ):arshift(63),1)) end testlib.test(t.name,"arshift3",obj.new( 0x01020304, 0x05060708 ):arshift(4) == obj.new( 0x80102030, 0x00506070 )) testlib.test(t.name,"arshift4",checkeq(obj.new( 0x01020304, 0x05060708 ):arshift(63), 0)) testlib.test(t.name,"rol1",obj.new( 0x01020304, 0xF5060708 ):rol(4) == obj.new( 0x1020304F, 0x50607080 )) testlib.test(t.name,"rol2",obj.new( 0x01020304, 0xF5060708 ):rol(32):rol(32) == obj.new( 0x01020304, 0xF5060708 )) testlib.test(t.name,"ror1",obj.new( 0x01020304, 0xF5060708 ):ror(4) == obj.new( 0x80102030, 0x4F506070 )) testlib.test(t.name,"ror2",obj.new( 0x01020304, 0xF5060708 ):ror(32):ror(32) == obj.new( 0x01020304, 0xF5060708 )) end testlib.testing("min and max values") z=Int64.new(2) z=z^63-1 testlib.test(OTHER,"max1",tostring(Int64.max()) == "9223372036854775807") testlib.test(OTHER,"max2",Int64.max() == Int64.new(4294967295, 2147483647)) testlib.test(OTHER,"max3",z==Int64.max()) testlib.test(OTHER,"min1",tostring(Int64.min()) == "-9223372036854775808") testlib.test(OTHER,"min2",Int64.min() == Int64.new(0,2147483648)) z=-z z=z-1 testlib.test(OTHER,"min3",z==Int64.min()) testlib.test(OTHER,"minmax",Int64.min()== - Int64.max() - 1) --Because of g_ascii_strtoll() usage without errno check, "invalid" strings are converted to 0 testlib.testing("invalid string values") testlib.test(OTHER,"invalid",Int64.new("invalid")== Int64.new(0,0)) testlib.test(OTHER,"invalid2",UInt64.new("invalid")== UInt64.new(0,0)) testlib.testing("error conditions") local function divtest(f,s) local r = (f / s) if r == 5 then io.stdout:write("ok...") else error("test failed!") end end local function modtest(f,s) local r = (f % s) if r == 5 then io.stdout:write("ok...") else error("test failed!") end end testlib.test(OTHER,"error1", pcall(divtest, 10, 2)) -- not an error, but checking the div function works above testlib.test(OTHER,"error2", not pcall(divtest, Int64(10), 0)) testlib.test(OTHER,"error3", not pcall(divtest, Int64(10), Int64(0))) testlib.test(OTHER,"error4", not pcall(divtest, Int64(10), UInt64(0))) testlib.test(OTHER,"error5", not pcall(divtest, UInt64(10), 0)) testlib.test(OTHER,"error6", not pcall(divtest, UInt64(10), Int64(0))) testlib.test(OTHER,"error7", not pcall(divtest, UInt64(10), UInt64(0))) testlib.test(OTHER,"error8", pcall(modtest, 17, 6)) -- not an error, but checking the mod function works above testlib.test(OTHER,"error9", not pcall(modtest, Int64(10), 0)) testlib.test(OTHER,"error10", not pcall(modtest, Int64(10), Int64(0))) testlib.test(OTHER,"error11", not pcall(modtest, Int64(10), UInt64(0))) testlib.test(OTHER,"error12", not pcall(modtest, UInt64(10), 0)) testlib.test(OTHER,"error13", not pcall(modtest, UInt64(10), Int64(0))) testlib.test(OTHER,"error14", not pcall(modtest, UInt64(10), UInt64(0))) testlib.getResults()
Lua
wireshark/test/lua/listener.lua
-- test script for various Lua functions -- use with dhcp.pcap in test/captures directory local testlib = require("testlib") ------------- general test helper funcs ------------ local FRAME = "frame" local ETH = "eth" local IP = "ip" local DHCP = "dhcp" local OTHER = "other" local PDISS = "postdissector" -- expected number of runs per type -- note ip (5 tests) only runs 3 times because it gets removed -- and dhcp (5 tests) only runs twice because the filter makes it run -- once and then it gets replaced with a different one for the second time local n_frames = 4 local taptests = { [FRAME]=5*n_frames, [ETH]=5*n_frames, [IP]=5*3, [DHCP]=5*2, [OTHER]=16, [PDISS]=n_frames, } testlib.init(taptests) local pkt_fields = { [FRAME] = {}, [PDISS] = {} } local function getAllFieldInfos(group) local fields = { all_field_infos() } local fieldnames = {} for i,v in ipairs(fields) do fieldnames[i] = v.name end local pktnum = testlib.getPktCount(group) pkt_fields[group][pktnum] = { ["num"] = #fields, ["fields"] = fieldnames } end local function dumpAllFieldInfos() for i,v in ipairs(pkt_fields[FRAME]) do print("In frame tap for packet ".. i ..":") print(" number of fields = ".. v.num) for _,name in ipairs(v.fields) do print(" field = ".. name) end local w = pkt_fields[PDISS][i] print("In postdissector for packet ".. i ..":") print(" number of fields = ".. w.num) for _,name in ipairs(w.fields) do print(" field = ".. name) end end end local function checkAllFieldInfos() for i,v in ipairs(pkt_fields[FRAME]) do local numfields = v.num if numfields ~= pkt_fields[PDISS][i].num then print("Tap and postdissector do not have same number of fields!") return false end if numfields < 100 then print("Too few fields!") return false end end return true end --------- -- the following are so we can use pcall (which needs a function to call) local function makeListener(...) local foo = Listener.new(...) end local function setListener(tap,name,value) tap[name] = value end local function getListener(tap,name) local foo = tap[name] end ------------- test script ------------ testlib.testing(OTHER,"negative tests") testlib.test(OTHER,"Listener.new-1",not pcall(makeListener,"FooBARhowdy")) testlib.test(OTHER,"Listener.new-2",not pcall(makeListener,"ip","FooBARhowdy")) local tmptap = Listener.new() local func = function(...) passed[OTHER] = 0 error("This shouldn't be called!") end testlib.test(OTHER,"Listener.set-3",pcall(setListener,tmptap,"packet",func)) testlib.test(OTHER,"Listener.set-4",pcall(setListener,tmptap,"reset",func)) testlib.test(OTHER,"Listener.set-5",pcall(setListener,tmptap,"draw",func)) testlib.test(OTHER,"Listener.set-6",not pcall(setListener,Listener,"packet",func)) testlib.test(OTHER,"Listener.set-7",not pcall(setListener,Listener,"reset",func)) testlib.test(OTHER,"Listener.set-8",not pcall(setListener,Listener,"draw",func)) testlib.test(OTHER,"Listener.set-9",not pcall(setListener,Listener,"foobar",func)) testlib.test(OTHER,"Listener.get-10",not pcall(getListener,tmptap,"packet",func)) testlib.test(OTHER,"Listener.get-11",not pcall(getListener,tmptap,"reset",func)) testlib.test(OTHER,"Listener.get-12",not pcall(getListener,tmptap,"draw",func)) print("removing tmptap twice") testlib.test(OTHER,"Listener.remove-13",pcall(tmptap.remove,tmptap)) testlib.test(OTHER,"Listener.remove-14",pcall(tmptap.remove,tmptap)) testlib.test(OTHER,"typeof-15", typeof(tmptap) == "Listener") -- declare some field extractors local f_eth_src = Field.new("eth.src") local f_eth_dst = Field.new("eth.dst") local f_eth_mac = Field.new("eth.addr") local f_ip_src = Field.new("ip.src") local f_ip_dst = Field.new("ip.dst") local f_dhcp_hw = Field.new("dhcp.hw.mac_addr") local f_dhcp_opt = Field.new("dhcp.option.type") local tap_frame = Listener.new(nil,nil,true) local tap_eth = Listener.new("eth") local tap_ip = Listener.new("ip","dhcp") local tap_dhcp = Listener.new("dhcp","dhcp.option.dhcp == 1") local second_time = false function tap_frame.packet(pinfo,tvb,frame) testlib.countPacket(FRAME) testlib.testing(FRAME,"Frame") testlib.test(FRAME,"arg-1", typeof(pinfo) == "Pinfo") testlib.test(FRAME,"arg-2", typeof(tvb) == "Tvb") testlib.test(FRAME,"arg-3", frame == nil) testlib.test(FRAME,"pinfo.number-1",pinfo.number == testlib.getPktCount(FRAME)) -- check ether addr local eth_src1 = tostring(f_eth_src().range) local eth_src2 = tostring(tvb:range(6,6)) testlib.test(FRAME,"FieldInfo.range-1", eth_src1 == eth_src2) getAllFieldInfos(FRAME) end function tap_eth.packet(pinfo,tvb,eth) testlib.countPacket(ETH) -- on the 4th run of eth, remove the ip one and add a new dhcp one if testlib.getPktCount(ETH) == 4 then testlib.testing(ETH,"removing ip tap, replacing dhcp tap") tap_ip:remove() tap_dhcp:remove() tap_dhcp = Listener.new("dhcp") tap_dhcp.packet = dhcp_packet second_time = true end testlib.testing(ETH,"Eth") testlib.test(ETH,"arg-1", typeof(pinfo) == "Pinfo") testlib.test(ETH,"arg-2", typeof(tvb) == "Tvb") testlib.test(ETH,"arg-3", type(eth) == "table") testlib.test(ETH,"pinfo.number-1",pinfo.number == testlib.getPktCount(ETH)) -- check ether addr local eth_src1 = tostring(f_eth_src().range) local eth_src2 = tostring(tvb:range(6,6)) testlib.test(ETH,"FieldInfo.range-1", eth_src1 == eth_src2) end function tap_ip.packet(pinfo,tvb,ip) testlib.countPacket(IP) testlib.testing(IP,"IP") testlib.test(IP,"arg-1", typeof(pinfo) == "Pinfo") testlib.test(IP,"arg-2", typeof(tvb) == "Tvb") testlib.test(IP,"arg-3", type(ip) == "table") testlib.test(IP,"pinfo.number-1",pinfo.number == testlib.getPktCount(IP)) -- check ether addr local eth_src1 = tostring(f_eth_src().range) local eth_src2 = tostring(tvb:range(6,6)) testlib.test(IP,"FieldInfo.range-1", eth_src1 == eth_src2) end dhcp_packet = function (pinfo,tvb,dhcp) testlib.countPacket(DHCP) testlib.testing(DHCP,"DHCP") testlib.test(DHCP,"arg-1", typeof(pinfo) == "Pinfo") testlib.test(DHCP,"arg-2", typeof(tvb) == "Tvb") testlib.test(DHCP,"arg-3", dhcp == nil) if not second_time then testlib.test(DHCP,"pinfo.number-1",pinfo.number == testlib.getPktCount(DHCP)) else testlib.test(DHCP,"pinfo.number-1",pinfo.number == 4) end -- check ether addr local eth_src1 = tostring(f_eth_src().range) local eth_src2 = tostring(tvb:range(6,6)) testlib.test(DHCP,"FieldInfo.range-1", eth_src1 == eth_src2) end tap_dhcp.packet = dhcp_packet function tap_frame.reset() -- reset never gets called in tshark (sadly) --[[ XXX: this is no longer the case?! if not GUI_ENABLED then error("reset called!!") end --]] end function tap_frame.draw() testlib.test(OTHER,"all_field_infos", checkAllFieldInfos()) testlib.getResults() end -- max_gap.lua -- create a gap.max field containing the maximum gap between two packets between two ip nodes -- we create a "protocol" for our tree local max_gap_p = Proto("gap","Gap in IP conversations") -- we create our fields local max_gap_field = ProtoField.float("gap.max") -- we add our fields to the protocol max_gap_p.fields = { max_gap_field } -- then we register max_gap_p as a postdissector register_postdissector(max_gap_p,true) function max_gap_p.dissector(tvb,pinfo,tree) testlib.countPacket(PDISS) getAllFieldInfos(PDISS) testlib.pass(PDISS) end
Lua
wireshark/test/lua/nstime.lua
-- test script for various Lua functions -- use with dhcp.pcap in test/captures directory local testlib = require("testlib") local FRAME = "frame" local PER_FRAME = "per-frame" local OTHER = "other" -- expected number of runs per type local n_frames = 4 local taptests = { [FRAME]=n_frames, [PER_FRAME]=n_frames*5, [OTHER]=44 } testlib.init(taptests) --------- -- the following are so we can use pcall (which needs a function to call) local function setNSTime(nst,name,value) nst[name] = value end local function getNSTime(nst,name) local foo = nst[name] end ------------- test script ------------ testlib.testing(OTHER,"negative tests") testlib.test(OTHER,"NSTime.new-1",not pcall(NSTime,"FooBARhowdy")) testlib.test(OTHER,"NSTime.new-2",not pcall(NSTime,"ip","FooBARhowdy")) local tmptime = NSTime() testlib.test(OTHER,"NSTime.set-3",pcall(setNSTime,tmptime,"secs",10)) testlib.test(OTHER,"NSTime.set-4",not pcall(setNSTime,tmptime,"foobar",1000)) testlib.test(OTHER,"NSTime.set-5",pcall(setNSTime,tmptime,"nsecs",123)) testlib.test(OTHER,"NSTime.set-6",not pcall(setNSTime,NSTime,"secs",0)) testlib.test(OTHER,"NSTime.set-7",not pcall(setNSTime,tmptime,"secs","foobar")) testlib.test(OTHER,"NSTime.set-8",not pcall(setNSTime,NSTime,"nsecs",0)) testlib.test(OTHER,"NSTime.set-9",not pcall(setNSTime,tmptime,"nsecs","foobar")) testlib.test(OTHER,"NSTime.get-10",pcall(getNSTime,tmptime,"secs")) testlib.test(OTHER,"NSTime.get-11",pcall(getNSTime,tmptime,"nsecs")) testlib.test(OTHER,"NSTime.get-12",not pcall(getNSTime,NSTime,"secs")) testlib.test(OTHER,"NSTime.get-13",not pcall(getNSTime,NSTime,"nsecs")) testlib.testing(OTHER,"basic tests") local first = NSTime() local second = NSTime(100,100) local third = NSTime(0,100) testlib.test(OTHER,"NSTime.secs-14", first.secs == 0) testlib.test(OTHER,"NSTime.secs-15", second.secs == 100) testlib.test(OTHER,"NSTime.secs-16", third.secs == 0) testlib.test(OTHER,"NSTime.nsecs-17", first.nsecs == 0) testlib.test(OTHER,"NSTime.nsecs-18", second.nsecs == 100) testlib.test(OTHER,"NSTime.nsecs-19", third.nsecs == 100) testlib.test(OTHER,"NSTime.eq-20", first == NSTime()) testlib.test(OTHER,"NSTime.neq-21", second ~= third) testlib.test(OTHER,"NSTime.add-22", first + second == second) testlib.test(OTHER,"NSTime.add-23", third + NSTime(100,0) == second) testlib.test(OTHER,"NSTime.add-24", NSTime(100) + NSTime(nil,100) == second) testlib.test(OTHER,"NSTime.lt-25", third < second) testlib.test(OTHER,"NSTime.gt-26", third > first) testlib.test(OTHER,"NSTime.le-27", second <= NSTime(100,100)) testlib.test(OTHER,"NSTime.unm-28", -first == first) testlib.test(OTHER,"NSTime.unm-29", -(-second) == second) testlib.test(OTHER,"NSTime.unm-30", -second == NSTime(-100,-100)) testlib.test(OTHER,"NSTime.unm-31", -third == NSTime(0,-100)) testlib.test(OTHER,"NSTime.tostring-32", tostring(first) == "0.000000000") testlib.test(OTHER,"NSTime.tostring-33", tostring(second) == "100.000000100") testlib.test(OTHER,"NSTime.tostring-34", tostring(third) == "0.000000100") testlib.test(OTHER,"NSTime.tonumber-35", first:tonumber() == 0.0) testlib.test(OTHER,"NSTime.tonumber-36", second:tonumber() == 100.0000001) testlib.test(OTHER,"NSTime.tonumber-37", third:tonumber() == 0.0000001) testlib.testing(OTHER,"setters/getters") first.secs = 123 first.nsecs = 100 testlib.test(OTHER,"NSTime.set-38", first == NSTime(123,100)) testlib.test(OTHER,"NSTime.get-39", first.secs == 123) testlib.test(OTHER,"NSTime.get-40", first.nsecs == 100) local minus0_4 = NSTime() - NSTime(0,400000000) testlib.test(OTHER,"NSTime.negative_tonumber-41", minus0_4:tonumber() == -0.4) testlib.test(OTHER,"NSTime.negative_tostring-42", tostring(minus0_4) == "-0.400000000") local minus0_4 = NSTime() - NSTime(1,400000000) testlib.test(OTHER,"NSTime.negative_tonumber-43", minus0_4:tonumber() == -1.4) testlib.test(OTHER,"NSTime.negative_tostring-44", tostring(minus0_4) == "-1.400000000") ---------------------------------- -- declare some field extractors local f_frame_time = Field.new("frame.time") local f_frame_time_rel = Field.new("frame.time_relative") local f_frame_time_delta = Field.new("frame.time_delta") local tap = Listener.new() local begin = NSTime() local now, previous function tap.packet(pinfo,tvb,frame) testlib.countPacket(FRAME) testlib.testing(FRAME,"NSTime in Frame") local fi_now = f_frame_time() local fi_rel = f_frame_time_rel() local fi_delta = f_frame_time_delta() testlib.test(PER_FRAME,"typeof-1", typeof(begin) == "NSTime") testlib.test(PER_FRAME,"typeof-2", typeof(fi_now()) == "NSTime") now = fi_now() if testlib.getPktCount(FRAME) == 1 then testlib.test(PER_FRAME,"__eq-1", begin == fi_delta()) testlib.test(PER_FRAME,"NSTime.secs-1", fi_delta().secs == 0) testlib.test(PER_FRAME,"NSTime.nsecs-1", fi_delta().nsecs == 0) begin = fi_now() else testlib.test(PER_FRAME,"__sub__eq-1", now - previous == fi_delta()) testlib.test(PER_FRAME,"__sub__eq-2", now - begin == fi_rel()) testlib.test(PER_FRAME,"__add-1", (previous - begin) + (now - previous) == fi_rel()) end previous = now testlib.pass(FRAME) end function tap.draw() testlib.getResults() end
Lua
wireshark/test/lua/pcap_file.lua
-- pcap_file_reader.lua -------------------------------------------------------------------------------- --[[ This is a Wireshark Lua-based pcap capture file reader. Author: Hadriel Kaplan This "capture file" reader reads pcap files - the old style ones. Don't expect this to be as good as the real thing; this is a simplistic implementation to show how to create such file readers, and for testing purposes. This script requires Wireshark v1.12 or newer. --]] -------------------------------------------------------------------------------- -- do not modify this table local debug = { DISABLED = 0, LEVEL_1 = 1, LEVEL_2 = 2 } -- set this DEBUG to debug.LEVEL_1 to enable printing debug info -- set it to debug.LEVEL_2 to enable really verbose printing local DEBUG = debug.LEVEL_1 local wireshark_name = "Wireshark" if not GUI_ENABLED then wireshark_name = "Tshark" end -- verify Wireshark is new enough local major, minor, micro = get_version():match("(%d+)%.(%d+)%.(%d+)") if major and tonumber(major) <= 1 and ((tonumber(minor) <= 10) or (tonumber(minor) == 11 and tonumber(micro) < 3)) then error( "Sorry, but your " .. wireshark_name .. " version (" .. get_version() .. ") is too old for this script!\n" .. "This script needs " .. wireshark_name .. "version 1.12 or higher.\n" ) end -- verify we have the Struct library in wireshark -- technically we should be able to do this with 'require', but Struct is a built-in assert(Struct.unpack, wireshark_name .. " does not have the Struct library!") -------------------------------------------------------------------------------- -- early definitions -- throughout most of this file I try to pre-declare things to help ease -- reading it and following the logic flow, but some things just have to be done -- before others, so this sections has such things that cannot be avoided -------------------------------------------------------------------------------- -- first some variable declarations for functions we'll define later local parse_file_header, parse_rec_header, read_common -- these will be set inside of parse_file_header(), but we're declaring them up here local default_settings = { debug = DEBUG, corrected_magic = 0xa1b2c3d4, version_major = 2, version_minor = 4, timezone = 0, sigfigs = 0, read_snaplen = 0, -- the snaplen we read from file snaplen = 0, -- the snaplen we use (limited by WTAP_MAX_PACKET_SIZE) linktype = -1, -- the raw linktype number in the file header wtap_type = wtap_encaps.UNKNOWN, -- the mapped internal wtap number based on linktype endianess = ENC_BIG_ENDIAN, time_precision = wtap_tsprecs.USEC, rec_hdr_len = 16, -- default size of record header rec_hdr_patt = "I4 I4 I4 I4", -- pattern for Struct to use num_rec_fields = 4, -- number of vars in pattern } local dprint = function() end local dprint2 = function() end local function reset_debug() if default_settings.debug > debug.DISABLED then dprint = function(...) print(table.concat({"Lua:", ...}," ")) end if default_settings.debug > debug.LEVEL_1 then dprint2 = dprint end end end -- call it now reset_debug() -------------------------------------------------------------------------------- -- file reader handling functions for Wireshark to use -------------------------------------------------------------------------------- ---------------------------------------- -- The read_open() is called by Wireshark once per file, to see if the file is this reader's type. -- Wireshark passes in (1) a File object and (2) CaptureInfo object to this function -- It expects in return either nil or false to mean it's not our file type, or true if it is -- In our case what this means is we figure out if the file has the magic header, and get the -- endianess of the file, and the encapsulation type of its frames/records local function read_open(file, capture) dprint2("read_open() called") local file_settings = parse_file_header(file) if file_settings then dprint2("read_open: success, file is for us") -- save our state capture.private_table = file_settings -- if the file is for us, we MUST set the file position cursor to -- where we want the first call to read() function to get it the next time -- for example if we checked a few records to be sure it's or type -- but in this simple example we only verify the file header (24 bytes) -- and we want the file position to remain after that header for our read() -- call, so we don't change it back --file:seek("set",position) -- these we can also set per record later during read operations capture.time_precision = file_settings.time_precision capture.encap = file_settings.wtap_type capture.snapshot_length = file_settings.snaplen return true end dprint2("read_open: file not for us") -- if it's not for us, wireshark will reset the file position itself return false end ---------------------------------------- -- Wireshark/tshark calls read() for each frame/record in the file -- It passes in (1) a File, (2) CaptureInfo, and (3) FrameInfo object to this function -- It expects in return the file offset position the record starts at, -- or nil/false if there's an error or end-of-file is reached. -- The offset position is used later: wireshark remembers it and gives -- it to seek_read() at various random times local function read(file, capture, frame) dprint2("read() called") -- call our common reader function local position = file:seek() if not read_common("read", file, capture, frame) then -- this isnt' actually an error, because it might just mean we reached end-of-file -- so let's test for that (read(0) is a special case in Lua, see Lua docs) if file:read(0) ~= nil then dprint("read: failed to call read_common") else dprint2("read: reached end of file") end return false end dprint2("read: succeess") -- return the position we got to (or nil if we hit EOF/error) return position end ---------------------------------------- -- Wireshark/tshark calls seek_read() for each frame/record in the file, at random times -- It passes in (1) a File, (2) CaptureInfo, (3) FrameInfo object, and the offset position number -- It expects in return true for successful parsing, or nil/false if there's an error. local function seek_read(file, capture, frame, offset) dprint2("seek_read() called") -- first move to the right position in the file file:seek("set",offset) if not read_common("seek_read", file, capture, frame) then dprint("seek_read: failed to call read_common") return false end return true end ---------------------------------------- -- Wireshark/tshark calls read_close() when it's closing the file completely -- It passes in (1) a File and (2) CaptureInfo object to this function -- this is a good opportunity to clean up any state you may have created during -- file reading. (in our case there's no real state) local function read_close(file, capture) dprint2("read_close() called") -- we don't really have to reset anything, because we used the -- capture.private_table and wireshark clears it for us after this function return true end ---------------------------------------- -- An often unused function, Wireshark calls this when the sequential walk-through is over -- (i.e., no more calls to read(), only to seek_read()). -- It passes in (1) a File and (2) CaptureInfo object to this function -- This gives you a chance to clean up any state you used during read() calls, but remember -- that there will be calls to seek_read() after this (in Wireshark, though not Tshark) local function seq_read_close(file, capture) dprint2("First pass of read() calls are over, but there may be seek_read() calls after this") return true end ---------------------------------------- -- ok, so let's create a FileHandler object local fh = FileHandler.new("Lua-based PCAP reader", "lua_pcap", "A Lua-based file reader for PCAP-type files","rms") -- set above functions to the FileHandler fh.read_open = read_open fh.read = read fh.seek_read = seek_read fh.read_close = read_close fh.seq_read_close = seq_read_close fh.extensions = "pcap;cap" -- this is just a hint -- and finally, register the FileHandler! register_filehandler(fh) dprint2("FileHandler registered") -------------------------------------------------------------------------------- -- ok now for the boring stuff that actually does the work -------------------------------------------------------------------------------- ---------------------------------------- -- in Lua, we have access to encapsulation types in the 'wtap_encaps' table, but -- those numbers don't actually necessarily match the numbers in pcap files -- for the encapsulation type, because the namespace got screwed up at some -- point in the past (blame LBL NRG, not wireshark for that) -- but I'm not going to create the full mapping of these two namespaces -- instead we'll just use this smaller table to map them -- these are taken from wiretap/pcap-common.c local pcap2wtap = { [0] = wtap_encaps.NULL, [1] = wtap_encaps.ETHERNET, [6] = wtap_encaps.TOKEN_RING, [8] = wtap_encaps.SLIP, [9] = wtap_encaps.PPP, [101] = wtap_encaps.RAW_IP, [105] = wtap_encaps.IEEE_802_11, [127] = wtap_encaps.IEEE_802_11_RADIOTAP, [140] = wtap_encaps.MTP2, [141] = wtap_encaps.MTP3, [143] = wtap_encaps.DOCSIS, [147] = wtap_encaps.USER0, [148] = wtap_encaps.USER1, [149] = wtap_encaps.USER2, [150] = wtap_encaps.USER3, [151] = wtap_encaps.USER4, [152] = wtap_encaps.USER5, [153] = wtap_encaps.USER6, [154] = wtap_encaps.USER7, [155] = wtap_encaps.USER8, [156] = wtap_encaps.USER9, [157] = wtap_encaps.USER10, [158] = wtap_encaps.USER11, [159] = wtap_encaps.USER12, [160] = wtap_encaps.USER13, [161] = wtap_encaps.USER14, [162] = wtap_encaps.USER15, [186] = wtap_encaps.USB, [187] = wtap_encaps.BLUETOOTH_H4, [189] = wtap_encaps.USB_LINUX, [195] = wtap_encaps.IEEE802_15_4, } -- we can use the above to directly map very quickly -- but to map it backwards we'll use this, because I'm lazy: local function wtap2pcap(encap) for k,v in pairs(pcap2wtap) do if v == encap then return k end end return 0 end ---------------------------------------- -- here are the "structs" we're going to parse, of the various records in a pcap file -- these pattern string gets used in calls to Struct.unpack() -- -- we will prepend a '<' or '>' later, once we figure out what endian-ess the files are in -- -- this is a constant for minimum we need to read before we figure out the filetype local FILE_HDR_LEN = 24 -- a pcap file header struct -- this is: magic, version_major, version_minor, timezone, sigfigs, snaplen, encap type local FILE_HEADER_PATT = "I4 I2 I2 i4 I4 I4 I4" -- it's too bad Struct doesn't have a way to get the number of vars the pattern holds -- another thing to add to my to-do list? local NUM_HDR_FIELDS = 7 -- these will hold the '<'/'>' prepended version of above --local file_header, rec_header -- snaplen/caplen can't be bigger than this local WTAP_MAX_PACKET_SIZE = 65535 ---------------------------------------- -- different pcap file types have different magic values -- we need to know various things about them for various functions -- in this script, so this table holds all the info -- -- See default_settings table above for the defaults used if this table -- doesn't override them. -- -- Arguably, these magic types represent different "Protocols" to dissect later, -- but this script treats them all as "pcapfile" protocol. -- -- From this table, we'll auto-create a value-string table for file header magic field local magic_spells = { normal = { magic = 0xa1b2c3d4, name = "Normal (Big-endian)", }, swapped = { magic = 0xd4c3b2a1, name = "Swapped Normal (Little-endian)", endianess = ENC_LITTLE_ENDIAN, }, modified = { -- this is for a ss991029 patched format only magic = 0xa1b2cd34, name = "Modified", rec_hdr_len = 24, rec_hdr_patt = "I4I4I4I4 I4 I2 I1 I1", num_rec_fields = 8, }, swapped_modified = { -- this is for a ss991029 patched format only magic = 0x34cdb2a1, name = "Swapped Modified", rec_hdr_len = 24, rec_hdr_patt = "I4I4I4I4 I4 I2 I1 I1", num_rec_fields = 8, endianess = ENC_LITTLE_ENDIAN, }, nsecs = { magic = 0xa1b23c4d, name = "Nanosecond", time_precision = wtap_filetypes.TSPREC_NSEC, }, swapped_nsecs = { magic = 0x4d3cb2a1, name = "Swapped Nanosecond", endianess = ENC_LITTLE_ENDIAN, time_precision = wtap_filetypes.TSPREC_NSEC, }, } -- create a magic-to-spell entry table from above magic_spells table -- so we can find them faster during file read operations -- we could just add them right back into spells table, but this is cleaner local magic_values = {} for k,t in pairs(magic_spells) do magic_values[t.magic] = t end -- the function which makes a copy of the default settings per file local function new_settings() dprint2("creating new file_settings") local file_settings = {} for k,v in pairs(default_settings) do file_settings[k] = v end return file_settings end -- set the file_settings that the magic value defines in magic_values local function set_magic_file_settings(magic) local t = magic_values[magic] if not t then dprint("set_magic_file_settings: did not find magic settings for:",magic) return false end local file_settings = new_settings() -- the magic_values/spells table uses the same key names, so this is easy for k,v in pairs(t) do file_settings[k] = v end -- based on endianess, set the file_header and rec_header -- and determine corrected_magic if file_settings.endianess == ENC_BIG_ENDIAN then file_settings.file_hdr_patt = '>' .. FILE_HEADER_PATT file_settings.rec_hdr_patt = '>' .. file_settings.rec_hdr_patt file_settings.corrected_magic = magic else file_settings.file_hdr_patt = '<' .. FILE_HEADER_PATT file_settings.rec_hdr_patt = '<' .. file_settings.rec_hdr_patt local m = Struct.pack(">I4", magic) file_settings.corrected_magic = Struct.unpack("<I4", m) end file_settings.rec_hdr_len = Struct.size(file_settings.rec_hdr_patt) return file_settings end ---------------------------------------- -- internal functions declared previously ---------------------------------------- ---------------------------------------- -- used by read_open(), this parses the file header parse_file_header = function(file) dprint2("parse_file_header() called") -- by default, file:read() gets the next "string", meaning ending with a newline \n -- but we want raw byte reads, so tell it how many bytes to read local line = file:read(FILE_HDR_LEN) -- it's ok for us to not be able to read it, but we need to tell wireshark the -- file's not for us, so return false if not line then return false end dprint2("parse_file_header: got this line:\n'", Struct.tohex(line,false,":"), "'") -- let's peek at the magic int32, assuming it's big-endian local magic = Struct.unpack(">I4", line) local file_settings = set_magic_file_settings(magic) if not file_settings then dprint("magic was: '", magic, "', so not a known pcap file?") return false end -- this is: magic, version_major, version_minor, timezone, sigfigs, snaplen, encap type local fields = { Struct.unpack(file_settings.file_hdr_patt, line) } -- sanity check; also note that Struct.unpack() returns the fields plus -- a number of where in the line it stopped reading (i.e., the end in this case) -- so we got back number of fields + 1 if #fields ~= NUM_HDR_FIELDS + 1 then -- this should never happen, since we already told file:read() to grab enough bytes dprint("parse_file_header: failed to read the file header") return nil end -- fields[1] is the magic, which we already parsed and saved before, but just to be sure -- our endianess is set right, we validate what we got is what we expect now that -- endianess has been corrected if fields[1] ~= file_settings.corrected_magic then dprint ("parse_file_header: endianess screwed up? Got:'", fields[1], "', but wanted:", file_settings.corrected_magic) return nil end file_settings.version_major = fields[2] file_settings.version_minor = fields[3] file_settings.timezone = fields[4] file_settings.sigfigs = fields[5] file_settings.read_snaplen = fields[6] file_settings.linktype = fields[7] -- wireshark only supports version 2.0 and later if fields[2] < 2 then dprint("got version =",VERSION_MAJOR,"but only version 2 or greater supported") return false end -- convert pcap file interface type to wtap number type file_settings.wtap_type = pcap2wtap[file_settings.linktype] if not file_settings.wtap_type then dprint("file nettype", file_settings.linktype, "couldn't be mapped to wireshark wtap type") return false end file_settings.snaplen = file_settings.read_snaplen if file_settings.snaplen > WTAP_MAX_PACKET_SIZE then file_settings.snaplen = WTAP_MAX_PACKET_SIZE end dprint2("read_file_header: got magic='", magic, "', major version='", file_settings.version_major, "', minor='", file_settings.version_minor, "', timezone='", file_settings.timezone, "', sigfigs='", file_settings.sigfigs, "', read_snaplen='", file_settings.read_snaplen, "', snaplen='", file_settings.snaplen, "', nettype ='", file_settings.linktype, "', wtap ='", file_settings.wtap_type) --ok, it's a pcap file dprint2("parse_file_header: success") return file_settings end ---------------------------------------- -- this is used by both read() and seek_read() -- the calling function to this should have already set the file position correctly read_common = function(funcname, file, capture, frame) dprint2(funcname,": read_common() called") -- get the state info local file_settings = capture.private_table -- first parse the record header, which will set the FrameInfo fields if not parse_rec_header(funcname, file, file_settings, frame) then dprint2(funcname, ": read_common: hit end of file or error") return false end frame.encap = file_settings.wtap_type -- now we need to get the packet bytes from the file record into the frame... -- we *could* read them into a string using file:read(numbytes), and then -- set them to frame.data so that wireshark gets it... -- but that would mean the packet's string would be copied into Lua -- and then sent right back into wireshark, which is gonna slow things -- down; instead FrameInfo has a read_data() method, which makes -- wireshark read directly from the file into the frame buffer, so we use that if not frame:read_data(file, frame.captured_length) then dprint(funcname, ": read_common: failed to read data from file into buffer") return false end return true end ---------------------------------------- -- the function to parse individual records parse_rec_header = function(funcname, file, file_settings, frame) dprint2(funcname,": parse_rec_header() called") local line = file:read(file_settings.rec_hdr_len) -- it's ok for us to not be able to read it, if it's end of file if not line then return false end -- this is: time_sec, time_usec, capture_len, original_len local fields = { Struct.unpack(file_settings.rec_hdr_patt, line) } -- sanity check; also note that Struct.unpack() returns the fields plus -- a number of where in the line it stopped reading (i.e., the end in this case) -- so we got back number of fields + 1 if #fields ~= file_settings.num_rec_fields + 1 then dprint(funcname, ": parse_rec_header: failed to read the record header, got:", #fields, ", expected:", file_settings.num_rec_fields) return nil end local nsecs = fields[2] if file_settings.time_precision == wtap_filetypes.TSPREC_USEC then nsecs = nsecs * 1000 elseif file_settings.time_precision == wtap_filetypes.TSPREC_MSEC then nsecs = nsecs * 1000000 end frame.time = NSTime(fields[1], nsecs) local caplen, origlen = fields[3], fields[4] -- sanity check, verify captured length isn't more than original length if caplen > origlen then dprint("captured length of", caplen, "is bigger than original length of", origlen) -- swap them, a cool Lua ability caplen, origlen = origlen, caplen end if caplen > WTAP_MAX_PACKET_SIZE then dprint("Got a captured_length of", caplen, "which is too big") caplen = WTAP_MAX_PACKET_SIZE end frame.rec_type = wtap_rec_types.PACKET frame.captured_length = caplen frame.original_length = origlen frame.flags = wtap_presence_flags.TS + wtap_presence_flags.CAP_LEN -- for timestamp|cap_len dprint2(funcname,": parse_rec_header() returning") return true end -------------------------------------------------------------------------------- -- file writer handling functions for Wireshark to use -------------------------------------------------------------------------------- -- file encaps we can handle writing local canwrite = { [ wtap_encaps.NULL ] = true, [ wtap_encaps.ETHERNET ] = true, [ wtap_encaps.PPP ] = true, [ wtap_encaps.RAW_IP ] = true, [ wtap_encaps.IEEE_802_11 ] = true, [ wtap_encaps.MTP2 ] = true, [ wtap_encaps.MTP3 ] = true, -- etc., etc. } -- we can't reuse the variables we used in the reader, because this script might be used to both -- open a file for reading and write it out, at the same time, so we cerate another file_settings -- instance. -- set the file_settings for the little-endian version in magic_spells local function create_writer_file_settings() dprint2("create_writer_file_settings called") local t = magic_spells.swapped local file_settings = new_settings() -- the magic_values/spells table uses the same key names, so this is easy for k,v in pairs(t) do file_settings[k] = v end -- based on endianess, set the file_header and rec_header -- and determine corrected_magic if file_settings.endianess == ENC_BIG_ENDIAN then file_settings.file_hdr_patt = '>' .. FILE_HEADER_PATT file_settings.rec_hdr_patt = '>' .. file_settings.rec_hdr_patt file_settings.corrected_magic = file_settings.magic else file_settings.file_hdr_patt = '<' .. FILE_HEADER_PATT file_settings.rec_hdr_patt = '<' .. file_settings.rec_hdr_patt local m = Struct.pack(">I4", file_settings.magic) file_settings.corrected_magic = Struct.unpack("<I4", m) end file_settings.rec_hdr_len = Struct.size(file_settings.rec_hdr_patt) return file_settings end ---------------------------------------- -- The can_write_encap() function is called by Wireshark when it wants to write out a file, -- and needs to see if this file writer can handle the packet types in the window. -- We need to return true if we can handle it, else false local function can_write_encap(encap) dprint2("can_write_encap() called with encap=",encap) return canwrite[encap] or false end local function write_open(file, capture) dprint2("write_open() called") local file_settings = create_writer_file_settings() -- write out file header local hdr = Struct.pack(file_settings.file_hdr_patt, file_settings.corrected_magic, file_settings.version_major, file_settings.version_minor, file_settings.timezone, file_settings.sigfigs, capture.snapshot_length, wtap2pcap(capture.encap)) if not hdr then dprint("write_open: error generating file header") return false end dprint2("write_open generating:", Struct.tohex(hdr)) if not file:write(hdr) then dprint("write_open: error writing file header to file") return false end -- save settings capture.private_table = file_settings return true end local function write(file, capture, frame) dprint2("write() called") -- get file settings local file_settings = capture.private_table if not file_settings then dprint("write() failed to get private table file settings") return false end -- write out record header: time_sec, time_usec, capture_len, original_len -- first get times local nstime = frame.time -- pcap format is in usecs, but wireshark's internal is nsecs local nsecs = nstime.nsecs if file_settings.time_precision == wtap_filetypes.TSPREC_USEC then nsecs = nsecs / 1000 elseif file_settings.time_precision == wtap_filetypes.TSPREC_MSEC then nsecs = nsecs / 1000000 end local hdr = Struct.pack(file_settings.rec_hdr_patt, nstime.secs, nsecs, frame.captured_length, frame.original_length) if not hdr then dprint("write: error generating record header") return false end if not file:write(hdr) then dprint("write: error writing record header to file") return false end -- we could write the packet data the same way, by getting frame.data and writing it out -- but we can avoid copying those bytes into Lua by using the write_data() function if not frame:write_data(file) then dprint("write: error writing record data to file") return false end return true end local function write_close(file, capture) dprint2("write_close() called") dprint2("Good night, and good luck") return true end -- ok, so let's create another FileHandler object local fh2 = FileHandler.new("Lua-based PCAP writer", "lua_pcap2", "A Lua-based file writer for PCAP-type files","wms") -- set above functions to the FileHandler fh2.can_write_encap = can_write_encap fh2.write_open = write_open fh2.write = write fh2.write_close = write_close fh2.extensions = "pcap;cap" -- this is just a hint -- and finally, register the FileHandler! register_filehandler(fh2) dprint2("Second FileHandler registered")
Lua
wireshark/test/lua/pinfo.lua
-- test script for Pinfo and Address functions -- use with dhcp.pcap in test/captures directory ------------- general test helper funcs ------------ local testlib = require("testlib") local FRAME = "frame" local DENIED = "denied" local GETTER = "getter" local SETTER = "setter" local ADDR = "address" local OTHER = "other" -- expected number of runs per type local n_frames = 4 local taptests = { [FRAME]=n_frames, [DENIED]=n_frames*32, [GETTER]=n_frames*39, [SETTER]=n_frames*15, [ADDR]=n_frames*6, [OTHER]=n_frames*2, } testlib.init(taptests) --------- -- the following are so we can use pcall (which needs a function to call) local function setPinfo(pinfo,name,value) pinfo[name] = value end local function getPinfo(pinfo,name) local foo = pinfo[name] end ------------- test script ------------ local tap = Listener.new() function tap.packet(pinfo,tvb) testlib.countPacket(FRAME) testlib.testing(FRAME,"Pinfo in Frame") testlib.test(OTHER,"typeof-1", typeof(pinfo) == "Pinfo") testlib.test(OTHER,"tostring-1", tostring(pinfo) == "a Pinfo") testlib.testing(FRAME,"negative tests") -- try to set read-only attributes testlib.test(DENIED,"Pinfo.number-set-1",not pcall(setPinfo,pinfo,"number",0)) testlib.test(DENIED,"Pinfo.len-set-1",not pcall(setPinfo,pinfo,"len",0)) testlib.test(DENIED,"Pinfo.caplen-set-1",not pcall(setPinfo,pinfo,"caplen",0)) testlib.test(DENIED,"Pinfo.rel_ts-set-1",not pcall(setPinfo,pinfo,"rel_ts",0)) testlib.test(DENIED,"Pinfo.delta_ts-set-1",not pcall(setPinfo,pinfo,"delta_ts",0)) testlib.test(DENIED,"Pinfo.delta_dis_ts-set-1",not pcall(setPinfo,pinfo,"delta_dis_ts",0)) testlib.test(DENIED,"Pinfo.visited-set-1",not pcall(setPinfo,pinfo,"visited",0)) testlib.test(DENIED,"Pinfo.lo-set-1",not pcall(setPinfo,pinfo,"lo",0)) testlib.test(DENIED,"Pinfo.hi-set-1",not pcall(setPinfo,pinfo,"hi",0)) testlib.test(DENIED,"Pinfo.port_type-set-1",not pcall(setPinfo,pinfo,"port_type",0)) testlib.test(DENIED,"Pinfo.match-set-1",not pcall(setPinfo,pinfo,"match",0)) testlib.test(DENIED,"Pinfo.curr_proto-set-1",not pcall(setPinfo,pinfo,"curr_proto",0)) testlib.test(DENIED,"Pinfo.columns-set-1",not pcall(setPinfo,pinfo,"columns",0)) testlib.test(DENIED,"Pinfo.cols-set-1",not pcall(setPinfo,pinfo,"cols",0)) testlib.test(DENIED,"Pinfo.private-set-1",not pcall(setPinfo,pinfo,"private",0)) testlib.test(DENIED,"Pinfo.fragmented-set-1",not pcall(setPinfo,pinfo,"fragmented",0)) testlib.test(DENIED,"Pinfo.in_error_pkt-set-1",not pcall(setPinfo,pinfo,"in_error_pkt",0)) testlib.test(DENIED,"Pinfo.match_uint-set-1",not pcall(setPinfo,pinfo,"match_uint",0)) testlib.test(DENIED,"Pinfo.match_string-set-1",not pcall(setPinfo,pinfo,"match_string",0)) -- wrong type being set testlib.test(DENIED,"Pinfo.src-set-1",not pcall(setPinfo,pinfo,"src","foobar")) testlib.test(DENIED,"Pinfo.dst-set-1",not pcall(setPinfo,pinfo,"dst","foobar")) testlib.test(DENIED,"Pinfo.dl_src-set-1",not pcall(setPinfo,pinfo,"dl_src","foobar")) testlib.test(DENIED,"Pinfo.dl_dst-set-1",not pcall(setPinfo,pinfo,"dl_dst","foobar")) testlib.test(DENIED,"Pinfo.net_src-set-1",not pcall(setPinfo,pinfo,"net_src","foobar")) testlib.test(DENIED,"Pinfo.net_dst-set-1",not pcall(setPinfo,pinfo,"net_dst","foobar")) testlib.test(DENIED,"Pinfo.src_port-set-1",not pcall(setPinfo,pinfo,"src_port","foobar")) testlib.test(DENIED,"Pinfo.dst_port-set-1",not pcall(setPinfo,pinfo,"dst_port","foobar")) testlib.test(DENIED,"Pinfo.can_desegment-set-1",not pcall(setPinfo,pinfo,"can_desegment","foobar")) testlib.test(DENIED,"Pinfo.desegment_len-set-1",not pcall(setPinfo,pinfo,"desegment_len","foobar")) testlib.test(DENIED,"Pinfo.desegment_offset-set-1",not pcall(setPinfo,pinfo,"desegment_offset","foobar")) -- invalid attribute names testlib.test(DENIED,"Pinfo.set-1",not pcall(setPinfo,pinfo,"foobar","foobar")) testlib.test(DENIED,"Pinfo.get-12",not pcall(getPinfo,pinfo,"foobar")) testlib.testing(FRAME,"basic getter tests") local pktlen, srcip, dstip, srcport, dstport if pinfo.number == 1 or pinfo.number == 3 then pktlen = 314 srcip = "0.0.0.0" dstip = "255.255.255.255" srcport = 68 dstport = 67 else pktlen = 342 srcip = "192.168.0.1" dstip = "192.168.0.10" srcport = 67 dstport = 68 end testlib.test(GETTER,"Pinfo.number-get-1",pinfo.number == testlib.getPktCount(FRAME)) testlib.test(GETTER,"Pinfo.len-get-1",pinfo.len == pktlen) testlib.test(GETTER,"Pinfo.caplen-get-1",pinfo.caplen == pktlen) testlib.test(GETTER,"Pinfo.visited-get-1",pinfo.visited == true) testlib.test(GETTER,"Pinfo.lo-get-1",tostring(pinfo.lo) == srcip) testlib.test(GETTER,"Pinfo.lo-get-2",typeof(pinfo.lo) == "Address") testlib.test(GETTER,"Pinfo.hi-get-1",tostring(pinfo.hi) == dstip) testlib.test(GETTER,"Pinfo.hi-get-2",typeof(pinfo.hi) == "Address") testlib.test(GETTER,"Pinfo.port_type-get-1",pinfo.port_type == 3) testlib.test(GETTER,"Pinfo.match-get-1",pinfo.match == 0) testlib.test(GETTER,"Pinfo.curr_proto-get-1",tostring(pinfo.curr_proto) == "<Missing Protocol Name>") testlib.test(GETTER,"Pinfo.columns-get-1",tostring(pinfo.columns) == "Columns") testlib.test(GETTER,"Pinfo.columns-get-2",typeof(pinfo.columns) == "Columns") testlib.test(GETTER,"Pinfo.cols-get-1",tostring(pinfo.cols) == "Columns") testlib.test(GETTER,"Pinfo.cols-get-2",typeof(pinfo.cols) == "Columns") testlib.test(GETTER,"Pinfo.private-get-1",type(pinfo.private) == "userdata") testlib.test(GETTER,"Pinfo.fragmented-get-1",pinfo.fragmented == false) testlib.test(GETTER,"Pinfo.in_error_pkt-get-1",pinfo.in_error_pkt == false) testlib.test(GETTER,"Pinfo.match_uint-get-1",pinfo.match_uint == 0) testlib.test(GETTER,"Pinfo.match_string-get-1",pinfo.match_string == nil) testlib.test(GETTER,"Pinfo.src-get-1",tostring(pinfo.src) == srcip) testlib.test(GETTER,"Pinfo.src-get-2",typeof(pinfo.src) == "Address") testlib.test(GETTER,"Pinfo.dst-get-1",tostring(pinfo.dst) == dstip) testlib.test(GETTER,"Pinfo.dst-get-2",typeof(pinfo.dst) == "Address") testlib.test(GETTER,"Pinfo.dl_src-get-1",typeof(pinfo.dl_src) == "Address") testlib.test(GETTER,"Pinfo.dl_dst-get-1",typeof(pinfo.dl_dst) == "Address") testlib.test(GETTER,"Pinfo.net_src-get-1",tostring(pinfo.net_src) == srcip) testlib.test(GETTER,"Pinfo.net_src-get-2",typeof(pinfo.net_src) == "Address") testlib.test(GETTER,"Pinfo.net_dst-get-1",tostring(pinfo.net_dst) == dstip) testlib.test(GETTER,"Pinfo.net_dst-get-2",typeof(pinfo.net_dst) == "Address") testlib.test(GETTER,"Pinfo.src_port-get-1",pinfo.src_port == srcport) testlib.test(GETTER,"Pinfo.dst_port-get-1",pinfo.dst_port == dstport) testlib.test(GETTER,"Pinfo.can_desegment-get-1",pinfo.can_desegment == 0) testlib.test(GETTER,"Pinfo.desegment_len-get-1",pinfo.desegment_len == 0) testlib.test(GETTER,"Pinfo.desegment_offset-get-1",pinfo.desegment_offset == 0) testlib.test(GETTER,"pinfo.p2p_dir", pinfo.p2p_dir == P2P_DIR_UNKNOWN) if pinfo.number == 1 then testlib.test(GETTER,"Pinfo.rel_ts-get-1",pinfo.rel_ts == 0) testlib.test(GETTER,"Pinfo.delta_ts-get-1",pinfo.delta_ts == 0) testlib.test(GETTER,"Pinfo.delta_dis_ts-get-1",pinfo.delta_dis_ts == 0) elseif pinfo.number == 2 then testlib.test(GETTER,"Pinfo.rel_ts-get-1",pinfo.rel_ts == 0.000295) testlib.test(GETTER,"Pinfo.delta_ts-get-1",pinfo.delta_ts == 0.000295) testlib.test(GETTER,"Pinfo.delta_dis_ts-get-1",pinfo.delta_dis_ts == 0.000295) elseif pinfo.number == 3 then testlib.test(GETTER,"Pinfo.rel_ts-get-1",pinfo.rel_ts == 0.070031) testlib.test(GETTER,"Pinfo.delta_ts-get-1",pinfo.delta_ts == 0.069736) testlib.test(GETTER,"Pinfo.delta_dis_ts-get-1",pinfo.delta_dis_ts == 0.069736) elseif pinfo.number == 4 then testlib.test(GETTER,"Pinfo.rel_ts-get-1",pinfo.rel_ts == 0.070345) testlib.test(GETTER,"Pinfo.delta_ts-get-1",pinfo.delta_ts == 0.000314) testlib.test(GETTER,"Pinfo.delta_dis_ts-get-1",pinfo.delta_dis_ts == 0.000314) end testlib.testing(FRAME,"basic setter tests") local tmp = pinfo.src pinfo.src = pinfo.dst pinfo.dst = tmp testlib.test(SETTER,"Pinfo.src-set-1",tostring(pinfo.src) == dstip) testlib.test(SETTER,"Pinfo.src-set-1",typeof(pinfo.src) == "Address") testlib.test(SETTER,"Pinfo.dst-set-1",tostring(pinfo.dst) == srcip) testlib.test(SETTER,"Pinfo.dst-set-1",typeof(pinfo.dst) == "Address") local dl_dst_val = tostring(pinfo.dl_dst) local dl_src_val = tostring(pinfo.dl_src) tmp = pinfo.dl_src pinfo.dl_src = pinfo.dl_dst pinfo.dl_dst = tmp testlib.test(SETTER,"Pinfo.dl_src-set-1",tostring(pinfo.dl_src) == dl_dst_val) testlib.test(SETTER,"Pinfo.dl_dst-set-1",tostring(pinfo.dl_dst) == dl_src_val) tmp = pinfo.net_src pinfo.net_src = pinfo.net_dst pinfo.net_dst = tmp testlib.test(SETTER,"Pinfo.net_src-set-1",tostring(pinfo.net_src) == dstip) testlib.test(SETTER,"Pinfo.net_src-set-1",typeof(pinfo.net_src) == "Address") testlib.test(SETTER,"Pinfo.net_dst-set-1",tostring(pinfo.net_dst) == srcip) testlib.test(SETTER,"Pinfo.net_dst-set-1",typeof(pinfo.net_dst) == "Address") tmp = pinfo.src_port pinfo.src_port = pinfo.dst_port pinfo.dst_port = tmp testlib.test(SETTER,"Pinfo.src_port-set-1",pinfo.src_port == dstport) testlib.test(SETTER,"Pinfo.dst_port-set-1",pinfo.dst_port == srcport) pinfo.can_desegment = 12 testlib.test(SETTER,"Pinfo.can_desegment-set-1",pinfo.can_desegment == 12) pinfo.desegment_len = 34 testlib.test(SETTER,"Pinfo.desegment_len-set-1",pinfo.desegment_len == 34) pinfo.desegment_offset = 45 testlib.test(SETTER,"Pinfo.desegment_offset-set-1",pinfo.desegment_offset == 45) testlib.testing(FRAME,"Address functions") testlib.test(ADDR,"Address-eq-1", pinfo.lo == pinfo.dst) testlib.test(ADDR,"Address-eq-2", pinfo.lo ~= pinfo.hi) testlib.test(ADDR,"Address-lt-1", pinfo.lo < pinfo.hi) testlib.test(ADDR,"Address-lt-2", pinfo.hi > pinfo.lo) testlib.test(ADDR,"Address-le-1", pinfo.lo <= pinfo.hi) testlib.test(ADDR,"Address-le-2", pinfo.lo <= pinfo.dst) testlib.pass(FRAME) end function tap.draw() testlib.getResults() end
Lua
wireshark/test/lua/proto.lua
---------------------------------------- -- script-name: proto.lua -- Test the Proto/ProtoField API ---------------------------------------- ------------- general test helper funcs ------------ local testlib = require("testlib") local OTHER = "other" -- expected number of runs per type local taptests = { [OTHER]=48 } testlib.init(taptests) --------- -- the following are so we can use pcall (which needs a function to call) local function callFunc(func,...) func(...) end local function callObjFuncGetter(vart,varn,tobj,name,...) vart[varn] = tobj[name](...) end local function setValue(tobj,name,value) tobj[name] = value end local function getValue(tobj,name) local foo = tobj[name] end ------------- test script ------------ ---------------------------------------- -- creates a Proto object, but doesn't register it yet testlib.testing(OTHER,"Proto creation") testlib.test(OTHER,"Proto.__call", pcall(callFunc,Proto,"foo","Foo Protocol")) testlib.test(OTHER,"Proto.__call", pcall(callFunc,Proto,"foo1","Foo1 Protocol")) testlib.test(OTHER,"Proto.__call", not pcall(callFunc,Proto,"","Bar Protocol")) testlib.test(OTHER,"Proto.__call", not pcall(callFunc,Proto,nil,"Bar Protocol")) testlib.test(OTHER,"Proto.__call", not pcall(callFunc,Proto,"bar","")) testlib.test(OTHER,"Proto.__call", not pcall(callFunc,Proto,"bar",nil)) local dns = Proto("mydns","MyDNS Protocol") testlib.test(OTHER,"Proto.__tostring", tostring(dns) == "Proto: MYDNS") ---------------------------------------- -- multiple ways to do the same thing: create a protocol field (but not register it yet) -- the abbreviation should always have "<myproto>." before the specific abbreviation, to avoid collisions testlib.testing(OTHER,"ProtoField creation") local pfields = {} -- a table to hold fields, so we can pass them back/forth through pcall() --- variable -- what dissector.lua did, so we almost match it local pf_trasaction_id = 1 -- ProtoField.new("Transaction ID", "mydns.trans_id", ftypes.UINT16) local pf_flags = 2 -- ProtoField.new("Flags", "mydns.flags", ftypes.UINT16, nil, base.HEX) local pf_num_questions = 3 -- ProtoField.uint16("mydns.num_questions", "Number of Questions") local pf_num_answers = 4 -- ProtoField.uint16("mydns.num_answers", "Number of Answer RRs") local pf_num_authority_rr = 5 -- ProtoField.uint16("mydns.num_authority_rr", "Number of Authority RRs") local pf_num_additional_rr = 6 -- ProtoField.uint16("mydns.num_additional_rr", "Number of Additional RRs") testlib.test(OTHER,"ProtoField.new",pcall(callObjFuncGetter, pfields,pf_trasaction_id, ProtoField,"new", "Transaction ID", "mydns.trans_id", ftypes.INT16,nil,"base.DEC")) testlib.test(OTHER,"ProtoField.new",pcall(callObjFuncGetter, pfields,pf_flags, ProtoField,"new", "Flags", "mydns.flags", ftypes.UINT16, nil, "base.HEX")) -- tries to register a field that already exists (from the real dns proto dissector) but with incompatible type testlib.test(OTHER,"ProtoField.new_duplicate_bad",not pcall(callObjFuncGetter, pfields,10, ProtoField,"new", "Flags", "dns.flags", ftypes.INT16, nil, "base.HEX")) testlib.test(OTHER,"ProtoField.int16_duplicate_bad",not pcall(callObjFuncGetter, pfields,10, ProtoField,"int16", "dns.id","Transaction ID")) -- now compatible (but different type) testlib.test(OTHER,"ProtoField.new_duplicate_ok",pcall(callObjFuncGetter, pfields,10, ProtoField,"new", "Flags", "dns.flags", ftypes.UINT32, nil, "base.HEX")) testlib.test(OTHER,"ProtoField.uint16_duplicate_ok",pcall(callObjFuncGetter, pfields,10, ProtoField,"uint16", "dns.id","Transaction ID")) -- invalid valuestring arg testlib.test(OTHER,"ProtoField.new_invalid_valuestring",not pcall(callObjFuncGetter, pfields,10, ProtoField,"new", "Transaction ID", "mydns.trans_id", ftypes.INT16,"howdy","base.DEC")) -- invalid ftype testlib.test(OTHER,"ProtoField.new_invalid_ftype",not pcall(callObjFuncGetter, pfields,10, ProtoField,"new", "Transaction ID", "mydns.trans_id", 9999)) -- invalid description --testlib.test(OTHER,"ProtoField.new_invalid_description",not pcall(callObjFuncGetter, pfields,10, ProtoField,"new", "", "mydns.trans_id", ftypes.INT16)) testlib.test(OTHER,"ProtoField.new_invalid_description",not pcall(callObjFuncGetter, pfields,10, ProtoField,"new", nil, "mydns.trans_id", ftypes.INT16)) testlib.test(OTHER,"ProtoField.new_invalid_abbr",not pcall(callObjFuncGetter, pfields,10, ProtoField,"new", "trans id", "", ftypes.INT16)) testlib.test(OTHER,"ProtoField.new_invalid_abbr",not pcall(callObjFuncGetter, pfields,10, ProtoField,"new", "trans id", nil, ftypes.INT16)) testlib.test(OTHER,"ProtoField.int16",pcall(callObjFuncGetter, pfields,pf_num_questions, ProtoField,"int16", "mydns.num_questions", "Number of Questions")) testlib.test(OTHER,"ProtoField.int16",pcall(callObjFuncGetter, pfields,pf_num_answers, ProtoField,"int16", "mydns.num_answers", "Number of Answer RRs",base.DEC)) testlib.test(OTHER,"ProtoField.int16",pcall(callObjFuncGetter, pfields,pf_num_authority_rr, ProtoField,"int16", "mydns.num_authority_rr", "Number of Authority RRs",base.DEC)) testlib.test(OTHER,"ProtoField.int16",pcall(callObjFuncGetter, pfields,pf_num_additional_rr, ProtoField,"int16", "mydns.num_additional_rr", "Number of Additional RRs")) -- now undo the table thingy pf_trasaction_id = pfields[pf_trasaction_id] pf_flags = pfields[pf_flags] pf_num_questions = pfields[pf_num_questions] pf_num_answers = pfields[pf_num_answers] pf_num_authority_rr = pfields[pf_num_authority_rr] pf_num_additional_rr = pfields[pf_num_additional_rr] -- within the flags field, we want to parse/show the bits separately -- note the "base" argument becomes the size of the bitmask'ed field when ftypes.BOOLEAN is used -- the "mask" argument is which bits we want to use for this field (e.g., base=16 and mask=0x8000 means we want the top bit of a 16-bit field) -- again the following shows different ways of doing the same thing basically local pf_flag_response = ProtoField.new("Response", "mydns.flags.response", ftypes.BOOLEAN, {"this is a response","this is a query"}, 16, 0x8000, "is the message a response?") local pf_flag_opcode = ProtoField.new("Opcode", "mydns.flags.opcode", ftypes.UINT16, nil, base.DEC, 0x7800, "operation code") local pf_flag_authoritative = ProtoField.new("Authoritative", "mydns.flags.authoritative", ftypes.BOOLEAN, nil, 16, 0x0400, "is the response authoritative?") local pf_flag_truncated = ProtoField.bool("mydns.flags.truncated", "Truncated", 16, nil, 0x0200, "is the message truncated?") local pf_flag_recursion_desired = ProtoField.bool("mydns.flags.recursion_desired", "Recursion desired", 16, {"yes","no"}, 0x0100, "do the query recursivley?") local pf_flag_recursion_available = ProtoField.bool("mydns.flags.recursion_available", "Recursion available", 16, nil, 0x0080, "does the server support recursion?") local pf_flag_z = ProtoField.uint16("mydns.flags.z", "World War Z - Reserved for future use", base.HEX, nil, 0x0040, "when is it the future?") local pf_flag_authenticated = ProtoField.bool("mydns.flags.authenticated", "Authenticated", 16, {"yes","no"}, 0x0020, "did the server DNSSEC authenticate?") local pf_flag_checking_disabled = ProtoField.bool("mydns.flags.checking_disabled", "Checking disabled", 16, nil, 0x0010) -- no, these aren't all the DNS response codes - this is just an example local rcodes = { [0] = "No Error", [1] = "Format Error", [2] = "Server Failure", [3] = "Non-Existent Domain", [9] = "Server Not Authoritative for zone" } -- the above rcodes table is used in this next ProtoField local pf_flag_rcode = ProtoField.uint16("mydns.flags.rcode", "Response code", base.DEC, rcodes, 0x000F) local pf_query = ProtoField.new("Query", "mydns.query", ftypes.BYTES) local pf_query_name = ProtoField.new("Name", "mydns.query.name", ftypes.STRING) local pf_query_name_len = ProtoField.new("Name Length", "mydns.query.name.len", ftypes.UINT8) local pf_query_label_count = ProtoField.new("Label Count", "mydns.query.label.count", ftypes.UINT8) local rrtypes = { [1] = "A (IPv4 host address)", [2] = "NS (authoritative name server)", [28] = "AAAA (for geeks only)" } local pf_query_type = ProtoField.uint16("mydns.query.type", "Type", base.DEC, rrtypes) -- again, not all class types are listed here local classes = { [0] = "Reserved", [1] = "IN (Internet)", [2] = "The 1%", [5] = "First class", [6] = "Business class", [65535] = "Cattle class" } local pf_query_class = ProtoField.uint16("mydns.query.class", "Class", base.DEC, classes, nil, "keep it classy folks") testlib.testing(OTHER,"Proto functions") ---------------------------------------- -- this actually registers the ProtoFields above, into our new Protocol -- in a real script I wouldn't do it this way; I'd build a table of fields programaticaly -- and then set dns.fields to it, so as to avoid forgetting a field local myfields = { pf_trasaction_id, pf_flags, pf_num_questions, pf_num_answers, pf_num_authority_rr, pf_num_additional_rr, pf_flag_response, pf_flag_opcode, pf_flag_authoritative, pf_flag_truncated, pf_flag_recursion_desired, pf_flag_recursion_available, pf_flag_z, pf_flag_authenticated, pf_flag_checking_disabled, pf_flag_rcode, pf_query, pf_query_name, pf_query_name_len, pf_query_label_count, pf_query_type, pf_query_class } --dns.fields = myfields testlib.test(OTHER,"Proto.fields-set", pcall(setValue,dns,"fields",myfields)) testlib.test(OTHER,"Proto.fields-get", pcall(getValue,dns,"fields")) testlib.test(OTHER,"Proto.fields-get", #dns.fields == #myfields) local pf_foo = ProtoField.uint16("myfoo.com", "Fooishly", base.DEC, rcodes, 0x000F) local foo = Proto("myfoo","MyFOO Protocol") local bar = Proto("mybar","MyBAR Protocol") testlib.test(OTHER,"Proto.fields-set", pcall(setValue,foo,"fields",pf_foo)) testlib.test(OTHER,"Proto.fields-get", #foo.fields == 1) testlib.test(OTHER,"Proto.fields-get", foo.fields[1] == pf_foo) testlib.test(OTHER,"Proto.fields-set", not pcall(setValue,bar,"fields","howdy")) testlib.test(OTHER,"Proto.fields-set", not pcall(setValue,bar,"fields",nil)) testlib.test(OTHER,"Proto.fields-get", #bar.fields == 0) testlib.test(OTHER,"Proto.name-get", foo.name == "MYFOO") testlib.test(OTHER,"Proto.name-set", not pcall(setValue,foo,"name","howdy")) testlib.test(OTHER,"Proto.description-get", foo.description == "MyFOO Protocol") testlib.test(OTHER,"Proto.description-set", not pcall(setValue,foo,"description","howdy")) testlib.test(OTHER,"Proto.prefs-get", typeof(foo.prefs) == "Prefs") testlib.test(OTHER,"Proto.prefs-set", not pcall(setValue,foo,"prefs","howdy")) local function dummy() setFailed(OTHER) error("dummy function called!") return end -- can't get this because we haven't set it yet testlib.test(OTHER,"Proto.dissector-get", not pcall(getValue,foo,"dissector")) -- now set it testlib.test(OTHER,"Proto.dissector-set", pcall(setValue,foo,"dissector",dummy)) testlib.test(OTHER,"Proto.dissector-set", not pcall(setValue,foo,"dissector","howdy")) testlib.test(OTHER,"Proto.dissector-get", pcall(getValue,foo,"dissector")) testlib.test(OTHER,"Proto.prefs_changed-set", pcall(setValue,foo,"prefs_changed",dummy)) testlib.test(OTHER,"Proto.prefs_changed-get", not pcall(getValue,foo,"prefs_changed")) testlib.test(OTHER,"Proto.prefs_changed-set", not pcall(setValue,foo,"prefs_changed","howdy")) local function dummy_init() testlib.test(OTHER,"Proto.init-called",true) end testlib.test(OTHER,"Proto.init-set", pcall(setValue,foo,"init",dummy_init)) testlib.test(OTHER,"Proto.init-set", pcall(setValue,bar,"init",dummy_init)) testlib.test(OTHER,"Proto.init-get", not pcall(getValue,foo,"init")) testlib.test(OTHER,"Proto.init-set", not pcall(setValue,foo,"init","howdy")) testlib.getResults()
Lua
wireshark/test/lua/protobuf_test_called_by_custom_dissector.lua
do local protobuf_dissector = Dissector.get("protobuf") -- Create protobuf dissector based on UDP or TCP. -- The UDP dissector will take the whole tvb as a message. -- The TCP dissector will parse tvb as format: -- [4bytes length][a message][4bytes length][a message]... -- @param name The name of the new dissector. -- @param desc The description of the new dissector. -- @param for_udp Register the new dissector to UDP table.(Enable 'Decode as') -- @param for_tcp Register the new dissector to TCP table.(Enable 'Decode as') -- @param msgtype Message type. This must be the root message defined in your .proto file. local function create_protobuf_dissector(name, desc, for_udp, for_tcp, msgtype) local proto = Proto(name, desc) local f_length = ProtoField.uint32(name .. ".length", "Length", base.DEC) proto.fields = { f_length } proto.dissector = function(tvb, pinfo, tree) local subtree = tree:add(proto, tvb()) if for_udp and pinfo.port_type == 3 then -- UDP if msgtype ~= nil then pinfo.private["pb_msg_type"] = "message," .. msgtype end pcall(Dissector.call, protobuf_dissector, tvb, pinfo, subtree) elseif for_tcp and pinfo.port_type == 2 then -- TCP local offset = 0 local remaining_len = tvb:len() while remaining_len > 0 do if remaining_len < 4 then -- head not enough pinfo.desegment_offset = offset pinfo.desegment_len = DESEGMENT_ONE_MORE_SEGMENT return -1 end local data_len = tvb(offset, 4):uint() if remaining_len - 4 < data_len then -- data not enough pinfo.desegment_offset = offset pinfo.desegment_len = data_len - (remaining_len - 4) return -1 end subtree:add(f_length, tvb(offset, 4)) if msgtype ~= nil then pinfo.private["pb_msg_type"] = "message," .. msgtype end pcall(Dissector.call, protobuf_dissector, tvb(offset + 4, data_len):tvb(), pinfo, subtree) offset = offset + 4 + data_len remaining_len = remaining_len - 4 - data_len end end pinfo.columns.protocol:set(name) end if for_udp then DissectorTable.get("udp.port"):add(0, proto) end if for_tcp then DissectorTable.get("tcp.port"):add(0, proto) end return proto end -- default pure protobuf udp and tcp dissector without message type create_protobuf_dissector("protobuf_udp", "Protobuf UDP") create_protobuf_dissector("protobuf_tcp", "Protobuf TCP") -- add more protobuf dissectors with message types create_protobuf_dissector("AddrBook", "Tutorial AddressBook", true, true, "tutorial.AddressBook") end
Lua
wireshark/test/lua/protobuf_test_field_subdissector_table.lua
-- Test protobuf_field dissector table do local protobuf_field_table = DissectorTable.get("protobuf_field") local png_dissector = Dissector.get("png") protobuf_field_table:add("tutorial.Person.portrait_image", png_dissector) end
Lua
wireshark/test/lua/protofield.lua
---------------------------------------- -- script-name: protofield.lua -- test the ProtoField API ---------------------------------------- local testlib = require("testlib") local FRAME = "frame" local PER_FRAME = "per-frame" local OTHER = "other" -- expected number of runs local n_frames = 4 local taptests = { [FRAME]=n_frames, [PER_FRAME]=n_frames*8, [OTHER]=50, } testlib.init(taptests) ------------- test script ------------ ---------------------------------------- local test_proto = Proto.new("test", "Test Proto") test_proto.fields.time_field = ProtoField.uint16("test.time", "Time", base.UNIT_STRING, {" sec", " secs"}) test_proto.fields.dist_field = ProtoField.uint16("test.dist", "Distance", base.UNIT_STRING, {" km"}) test_proto.fields.filtered_field = ProtoField.uint16("test.filtered", "Filtered Field", base.DEC) -- Field type: CHAR success = pcall(ProtoField.new, "char", "test.char0", ftypes.CHAR) testlib.test(OTHER,"ProtoField-char", success) success = pcall(ProtoField.new, "char base NONE without valuestring", "test.char1", ftypes.CHAR, nil, base.NONE) testlib.test(OTHER,"ProtoField-char-without-valuestring", not success) success = pcall(ProtoField.new, "char base NONE with valuestring", "test.char2", ftypes.CHAR, {1, "Value"}, base.NONE) testlib.test(OTHER,"ProtoField-char-with-valuestring", success) success = pcall(ProtoField.new, "char base DEC", "test.char3", ftypes.CHAR, nil, base.DEC) testlib.test(OTHER,"ProtoField-char-base-dec", not success) success = pcall(ProtoField.new, "char base UNIT_STRING", "test.char4", ftypes.CHAR, {" m"}, base.UNIT_STRING) testlib.test(OTHER,"ProtoField-char-unit-string", not success) success = pcall(ProtoField.new, "char base RANGE_STRING", "test.char5", ftypes.CHAR, {{1, 2, "Value"}}, base.RANGE_STRING) testlib.test(OTHER,"ProtoField-char-range-string", success) -- Field type: BOOLEAN/UINT64 with (64 bit) mask success = pcall(ProtoField.new, "boolean", "test.boolean0", ftypes.BOOLEAN, nil, base.HEX, 0x1) testlib.test(OTHER,"ProtoField-new-bool-mask-trivial", success) success = pcall(ProtoField.new, "boolean", "test.boolean1", ftypes.BOOLEAN, nil, base.HEX, "1") testlib.test(OTHER,"ProtoField-new-bool-mask-string", success) success = pcall(ProtoField.new, "boolean", "test.boolean2", ftypes.BOOLEAN, nil, base.HEX, UInt64(0x00000001, 0x0)) testlib.test(OTHER,"ProtoField-new-bool-mask-uint64", success) success = pcall(ProtoField.new, "boolean", "test.boolean3", ftypes.BOOLEAN, nil, base.NONE, "invalid") -- 0 testlib.test(OTHER,"ProtoField-new-bool-mask-string-invalid", success) success = pcall(ProtoField.new, "boolean", "test.boolean4", ftypes.BOOLEAN, nil, base.HEX, "-1") -- 0xFFFFFFFFFFFFFFFF testlib.test(OTHER,"ProtoField-new-bool-mask-negative", success) success = pcall(ProtoField.new, "boolean", "test.boolean5", ftypes.BOOLEAN, nil, base.NONE) testlib.test(OTHER,"ProtoField-new-bool-mask-none", success) success = pcall(ProtoField.new, "boolean", "test.boolean6", ftypes.BOOLEAN, nil, base.NONE, nil) testlib.test(OTHER,"ProtoField-new-bool-mask-nil", success) success = pcall(ProtoField.bool, "test.boolean10", nil, 64, nil, 0x1) testlib.test(OTHER,"ProtoField-bool-mask-trivial", success) success = pcall(ProtoField.bool, "test.boolean11", nil, 64, nil, "1") testlib.test(OTHER,"ProtoField-bool-mask-string", success) success = pcall(ProtoField.bool, "test.boolean12", nil, 64, nil, UInt64(0x00000001, 0x0)) testlib.test(OTHER,"ProtoField-bool-mask-uint64", success) success = pcall(ProtoField.bool, "test.boolean13", nil, base.NONE, nil, "invalid") -- 0 testlib.test(OTHER,"ProtoField-bool-mask-string-invalid", success) success = pcall(ProtoField.bool, "test.boolean14", nil, 64, nil, "-1") -- 0xFFFFFFFFFFFFFFFF testlib.test(OTHER,"ProtoField-bool-mask-negative", success) success = pcall(ProtoField.bool, "test.boolean15", nil, base.NONE, nil) testlib.test(OTHER,"ProtoField-bool-mask-none", success) success = pcall(ProtoField.bool, "test.boolean16", nil, base.NONE, nil, nil) testlib.test(OTHER,"ProtoField-bool-mask-nil", success) success = pcall(ProtoField.new, "uint64", "test.uint64_0", ftypes.UINT64, nil, base.HEX, 0x1) testlib.test(OTHER,"ProtoField-new-uint64-mask-trivial", success) success = pcall(ProtoField.new, "uint64", "test.uint64_1", ftypes.UINT64, nil, base.HEX, "1") testlib.test(OTHER,"ProtoField-new-uint64-mask-string", success) success = pcall(ProtoField.new, "uint64", "test.uint64_2", ftypes.UINT64, nil, base.HEX, UInt64(0x00000001, 0x0)) testlib.test(OTHER,"ProtoField-new-uint64-mask-uint64", success) success = pcall(ProtoField.new, "uint64", "test.uint64_3", ftypes.UINT64, nil, base.NONE, "invalid") -- 0 testlib.test(OTHER,"ProtoField-new-uint64-mask-string-invalid", success) success = pcall(ProtoField.new, "uint64", "test.uint64_4", ftypes.UINT64, nil, base.HEX, "-1") -- 0xFFFFFFFFFFFFFFFF testlib.test(OTHER,"ProtoField-new-uint64-mask-negative", success) success = pcall(ProtoField.new, "uint64", "test.uint64_5", ftypes.UINT64, nil, base.NONE) testlib.test(OTHER,"ProtoField-new-uint64-mask-none", success) success = pcall(ProtoField.new, "uint64", "test.uint64_6", ftypes.UINT64, nil, base.NONE, nil) testlib.test(OTHER,"ProtoField-new-uint64-mask-nil", success) success = pcall(ProtoField.uint64, "test.uint64_10", nil, base.HEX, nil, 0x1) testlib.test(OTHER,"ProtoField-uint64-mask-trivial", success) success = pcall(ProtoField.uint64, "test.uint64_11", nil, base.HEX, nil, "1") testlib.test(OTHER,"ProtoField-uint64-mask-string", success) success = pcall(ProtoField.uint64, "test.uint64_12", nil, base.HEX, nil, UInt64(0x00000001, 0x0)) testlib.test(OTHER,"ProtoField-uint64-mask-uint64", success) success = pcall(ProtoField.uint64, "test.uint64_13", nil, base.DEC, nil, "invalid") -- 0 testlib.test(OTHER,"ProtoField-uint64-mask-string-invalid", success) success = pcall(ProtoField.uint64, "test.uint64_14", nil, base.DEC, nil, "-1") -- 0xFFFFFFFFFFFFFFFF testlib.test(OTHER,"ProtoField-uint64-mask-negative", success) success = pcall(ProtoField.uint64, "test.uint64_15", nil, base.DEC, nil) testlib.test(OTHER,"ProtoField-uint64-mask-none", success) success = pcall(ProtoField.uint64, "test.uint64_16", nil, base.DEC, nil, nil) testlib.test(OTHER,"ProtoField-uint64-mask-nil", success) -- Field name: empty, illegal, incompatible success = pcall(ProtoField.int8, nil, "empty field name 1") testlib.test(OTHER,"ProtoField-empty-field-name-1", not success) success = pcall(ProtoField.int8, "", "empty field name 2") testlib.test(OTHER,"ProtoField-empty-field-name-2", not success) success = pcall(ProtoField.int8, "test.$", "illegal field name") testlib.test(OTHER,"ProtoField-illegal-field-name", not success) success = pcall(ProtoField.int8, "frame.time", "incompatible field name") testlib.test(OTHER,"ProtoField-incompatible-field-name", not success) -- Actual name: empty success = pcall(ProtoField.int8, "test.empty_name_1") testlib.test(OTHER,"ProtoField-empty-name-1", success) -- will use abbrev success = pcall(ProtoField.int8, "test.empty_name_2", "") testlib.test(OTHER,"ProtoField-empty-name-2", not success) -- Signed integer base values, only base.DEC should work success = pcall(ProtoField.int8, "test.int.base_none", "int base NONE", base.NONE) testlib.test(OTHER,"ProtoField-int-base-none", not success) success = pcall(ProtoField.int8, "test.int.base_dec", "int base DEC", base.DEC) testlib.test(OTHER,"ProtoField-int-base-dec", success) success = pcall(ProtoField.int8, "test.int.base_hex", "int base HEX", base.HEX) testlib.test(OTHER,"ProtoField-int-base-hex", not success) success = pcall(ProtoField.int8, "test.int.base_oct", "int base OCT", base.OCT) testlib.test(OTHER,"ProtoField-int-base-oct", not success) success = pcall(ProtoField.int8, "test.int.base_dec_hex", "int base DEC_HEX", base.DEC_HEX) testlib.test(OTHER,"ProtoField-int-base-dec-hex", not success) success = pcall(ProtoField.int8, "test.int.base_hex_dec", "int base HEX_DEC", base.HEX_DEC) testlib.test(OTHER,"ProtoField-int-base-hex-dec", not success) -- Passing no table should not work success = pcall(ProtoField.uint16, "test.bad0", "Bad0", base.UNIT_STRING) testlib.test(OTHER,"ProtoField-unitstring-no-table", not success) -- Passing an empty table should not work success = pcall(ProtoField.uint16, "test.bad1", "Bad1", base.UNIT_STRING, {}) testlib.test(OTHER,"ProtoField-unitstring-empty-table", not success) -- Passing userdata should not work success = pcall(ProtoField.uint16, "test.bad2", "Bad2", base.UNIT_STRING, {test_proto}) testlib.test(OTHER,"ProtoField-unitstring-userdata", not success) -- Too many items are not supported success = pcall(ProtoField.uint16, "test.bad3", "Bad3", base.UNIT_STRING, {"too", "many", "items"}) testlib.test(OTHER,"ProtoField-unitstring-too-many-items", not success) local numinits = 0 function test_proto.init() numinits = numinits + 1 if numinits == 2 then testlib.getResults() end end -- Test expected text with singular and plural forms function test_proto.dissector(tvb, pinfo, tree) local ti testlib.countPacket(FRAME) local tvb1 = ByteArray.new("00 00"):tvb("Tvb1") ti = tree:add(test_proto.fields.time_field, tvb1()) testlib.test(PER_FRAME,"Time: 0 secs", ti.text == "Time: 0 secs") ti = tree:add(test_proto.fields.dist_field, tvb1()) testlib.test(PER_FRAME,"Distance: 0 km", ti.text == "Distance: 0 km") local tvb2 = ByteArray.new("00 01"):tvb("Tvb2") ti = tree:add(test_proto.fields.time_field, tvb2()) testlib.test(PER_FRAME,"Time: 1 sec", ti.text == "Time: 1 sec") ti = tree:add(test_proto.fields.dist_field, tvb2()) testlib.test(PER_FRAME,"Distance: 1 km", ti.text == "Distance: 1 km") local tvb3 = ByteArray.new("ff ff"):tvb("Tvb3") ti = tree:add(test_proto.fields.time_field, tvb3()) testlib.test(PER_FRAME,"Time: 65535 secs", ti.text == "Time: 65535 secs") ti = tree:add(test_proto.fields.dist_field, tvb3()) testlib.test(PER_FRAME,"Distance: 65535 km", ti.text == "Distance: 65535 km") ti = tree:add(test_proto.fields.filtered_field, tvb2()) -- Note that this file should be loaded in tshark twice. Once with a visible -- tree (-V) and once without a visible tree. if tree.visible then -- Tree is visible so both fields should be referenced testlib.test(PER_FRAME,"Visible tree: Time is referenced", tree:referenced(test_proto.fields.time_field) == true) testlib.test(PER_FRAME,"Visible tree: Filtered field is referenced", tree:referenced(test_proto.fields.filtered_field) == true) else -- Tree is not visible so only the field that appears in a filter should be referenced testlib.test(PER_FRAME,"Invisible tree: Time is NOT referenced", tree:referenced(test_proto.fields.time_field) == false) testlib.test(PER_FRAME,"Invisible tree: Filtered field is referenced", tree:referenced(test_proto.fields.filtered_field) == true) end testlib.pass(FRAME) end DissectorTable.get("udp.port"):add(65333, test_proto) DissectorTable.get("udp.port"):add(65346, test_proto)
Lua
wireshark/test/lua/script_args.lua
---------------------------------------- -- This just verifies the number of args it got is what it expected. -- The first arg should be a number, for how many total args to expect, -- including itself. local testlib = require("testlib") local ARGS = "args" testlib.init({ [ARGS]=3 }) ----------------------------- testlib.testing("Command-line args") local arg={...} -- get passed-in args testlib.test(ARGS, "arg1", arg ~= nil and #arg > 0) local numargs = tonumber(arg[1]) testlib.test(ARGS, "arg2", numargs ~= nil) testlib.test(ARGS, "arg3", #arg == numargs) testlib.getResults()
Lua
wireshark/test/lua/struct.lua
-- This is a test script for tshark/wireshark. -- This script runs inside tshark/wireshark, so to run it do: -- wireshark -X lua_script:<path_to_testdir>/lua/struct.lua -- tshark -r bogus.cap -X lua_script:<path_to_testdir>/lua/struct.lua -- Tests Struct functions local testlib = require("testlib") local OTHER = "other" -- -- auxiliary function to print an hexadecimal `dump' of a given string -- (not used by the test) -- local function tohex(s, sep) local patt = "%02x" .. (sep or "") s = string.gsub(s, "(.)", function(c) return string.format(patt, string.byte(c)) end) if sep then s = s:sub(1,-(sep:len()+1)) end return s end local function bp (s) s = tohex(s) print(s) end ----------------------------- print("Lua version: ".._VERSION) testlib.init({ [OTHER] = 0 }) testlib.testing(OTHER, "Struct library") local lib = Struct testlib.test(OTHER,"global",_G.Struct == lib) for name, val in pairs(lib) do print("\t"..name.." = "..type(val)) end testlib.test(OTHER,"class1",type(lib) == 'table') testlib.test(OTHER,"class2",type(lib.pack) == 'function') testlib.test(OTHER,"class3",type(lib.unpack) == 'function') testlib.test(OTHER,"class4",type(lib.size) == 'function') local val1 = "\42\00\00\00\00\00\00\01\00\00\00\02\00\00\00\03\00\00\00\04" local fmt1_le = "<!4biii4i4" local fmt1_be = ">!4biii4i4" local fmt1_64le = "<!4ieE" local fmt1_64be = ">!4ieE" local fmt2_be = ">!4bi(ii4)i" testlib.testing(OTHER, "basic size") testlib.test(OTHER,"basic_size1", lib.size(fmt1_le) == string.len(val1)) testlib.test(OTHER,"basic_size2", lib.size(fmt1_le) == Struct.size(fmt1_be)) testlib.test(OTHER,"basic_size3", lib.size(fmt1_le) == Struct.size(fmt1_64le)) testlib.test(OTHER,"basic_size4", lib.size(fmt2_be) == Struct.size(fmt1_64le)) testlib.testing(OTHER, "basic values") testlib.test(OTHER,"basic_values1", lib.values(fmt1_le) == 5) testlib.test(OTHER,"basic_values2", lib.values(fmt1_be) == lib.values(fmt1_le)) testlib.test(OTHER,"basic_values3", lib.values(fmt1_64le) == 3) testlib.test(OTHER,"basic_values4", lib.values(fmt2_be) == lib.values(fmt1_64le)) testlib.test(OTHER,"basic_values4", lib.values(" (I) s x i XxX c0") == 3) testlib.testing(OTHER, "tohex") local val1hex = "2A:00:00:00:00:00:00:01:00:00:00:02:00:00:00:03:00:00:00:04" testlib.test(OTHER,"tohex1", Struct.tohex(val1) == tohex(val1):upper()) testlib.test(OTHER,"tohex2", Struct.tohex(val1,true) == tohex(val1)) testlib.test(OTHER,"tohex3", Struct.tohex(val1,false,":") == val1hex) testlib.test(OTHER,"tohex4", Struct.tohex(val1,true,":") == val1hex:lower()) testlib.testing(OTHER, "fromhex") testlib.test(OTHER,"fromhex1", Struct.fromhex(val1hex,":") == val1) local val1hex2 = val1hex:gsub(":","") testlib.test(OTHER,"fromhex2", Struct.fromhex(val1hex2) == val1) testlib.test(OTHER,"fromhex3", Struct.fromhex(val1hex2:lower()) == val1) testlib.testing(OTHER, "basic unpack") local ret1, ret2, ret3, ret4, ret5, pos = lib.unpack(fmt1_le, val1) testlib.test(OTHER,"basic_unpack1", ret1 == 42 and ret2 == 0x01000000 and ret3 == 0x02000000 and ret4 == 0x03000000 and ret5 == 0x04000000) testlib.test(OTHER,"basic_unpack_position1", pos == string.len(val1) + 1) ret1, ret2, ret3, ret4, ret5, pos = lib.unpack(fmt1_be, val1) testlib.test(OTHER,"basic_unpack2", ret1 == 42 and ret2 == 1 and ret3 == 2 and ret4 == 3 and ret5 == 4) testlib.test(OTHER,"basic_unpack_position2", pos == string.len(val1) + 1) ret1, ret2, ret3, pos = lib.unpack(fmt1_64le, val1) testlib.test(OTHER,"basic_unpack3", ret1 == 42 and ret2 == Int64.new( 0x01000000, 0x02000000) and ret3 == UInt64.new( 0x03000000, 0x04000000)) print(typeof(ret2),typeof(ret3)) testlib.test(OTHER,"basic_unpack3b", typeof(ret2) == "Int64" and typeof(ret3) == "UInt64") testlib.test(OTHER,"basic_unpack_position3", pos == string.len(val1) + 1) ret1, ret2, ret3, pos = lib.unpack(fmt1_64be, val1) testlib.test(OTHER,"basic_unpack4", ret1 == 0x2A000000 and ret2 == Int64.new( 2, 1) and ret3 == UInt64.new( 4, 3)) testlib.test(OTHER,"basic_unpack4b", typeof(ret2) == "Int64" and typeof(ret3) == "UInt64") testlib.test(OTHER,"basic_unpack_position4", pos == string.len(val1) + 1) ret1, ret2, ret3, pos = lib.unpack(fmt2_be, val1) testlib.test(OTHER,"basic_unpack5", ret1 == 42 and ret2 == 1 and ret3 == 4) testlib.test(OTHER,"basic_unpack_position5", pos == string.len(val1) + 1) testlib.testing(OTHER, "basic pack") local pval1 = lib.pack(fmt1_le, lib.unpack(fmt1_le, val1)) testlib.test(OTHER,"basic_pack1", pval1 == val1) testlib.test(OTHER,"basic_pack2", val1 == lib.pack(fmt1_be, lib.unpack(fmt1_be, val1))) testlib.test(OTHER,"basic_pack3", val1 == lib.pack(fmt1_64le, lib.unpack(fmt1_64le, val1))) testlib.test(OTHER,"basic_pack4", val1 == lib.pack(fmt1_64be, lib.unpack(fmt1_64be, val1))) testlib.test(OTHER,"basic_pack5", lib.pack(fmt2_be, lib.unpack(fmt1_be, val1)) == lib.pack(">!4biiii", 42, 1, 0, 0, 2)) ---------------------------------- -- following comes from: -- https://github.com/LuaDist/struct/blob/master/teststruct.lua -- unfortunately many of his tests assumed a local machine word -- size of 4 bytes for long and such, so I had to muck with this -- to make it handle 64-bit compiles. -- $Id: teststruct.lua,v 1.2 2008/04/18 20:06:01 roberto Exp $ -- some pack/unpack commands are host-size dependent, so we need to pad local l_pad, ln_pad = "","" if lib.size("l") == 8 then -- the machine running this script uses a long of 8 bytes l_pad = "\00\00\00\00" ln_pad = "\255\255\255\255" end local a,b,c,d,e,f,x testlib.testing(OTHER, "pack") testlib.test(OTHER,"pack_I",#Struct.pack("I", 67324752) == 4) testlib.test(OTHER,"pack_b1",lib.pack('b', 10) == string.char(10)) testlib.test(OTHER,"pack_b2",lib.pack('bbb', 10, 20, 30) == string.char(10, 20, 30)) testlib.test(OTHER,"pack_h1",lib.pack('<h', 10) == string.char(10, 0)) testlib.test(OTHER,"pack_h2",lib.pack('>h', 10) == string.char(0, 10)) testlib.test(OTHER,"pack_h3",lib.pack('<h', -10) == string.char(256-10, 256-1)) testlib.test(OTHER,"pack_l1",lib.pack('<l', 10) == string.char(10, 0, 0, 0)..l_pad) testlib.test(OTHER,"pack_l2",lib.pack('>l', 10) == l_pad..string.char(0, 0, 0, 10)) testlib.test(OTHER,"pack_l3",lib.pack('<l', -10) == string.char(256-10, 256-1, 256-1, 256-1)..ln_pad) testlib.testing(OTHER, "unpack") testlib.test(OTHER,"unpack_h1",lib.unpack('<h', string.char(10, 0)) == 10) testlib.test(OTHER,"unpack_h2",lib.unpack('>h', string.char(0, 10)) == 10) testlib.test(OTHER,"unpack_h3",lib.unpack('<h', string.char(256-10, 256-1)) == -10) testlib.test(OTHER,"unpack_l1",lib.unpack('<l', string.char(10, 0, 0, 1)..l_pad) == 10 + 2^(3*8)) testlib.test(OTHER,"unpack_l2",lib.unpack('>l', l_pad..string.char(0, 1, 0, 10)) == 10 + 2^(2*8)) testlib.test(OTHER,"unpack_l3",lib.unpack('<l', string.char(256-10, 256-1, 256-1, 256-1)..ln_pad) == -10) -- limits lims = {{'B', 255}, {'b', 127}, {'b', -128}, {'I1', 255}, {'i1', 127}, {'i1', -128}, {'H', 2^16 - 1}, {'h', 2^15 - 1}, {'h', -2^15}, {'I2', 2^16 - 1}, {'i2', 2^15 - 1}, {'i2', -2^15}, {'L', 2^32 - 1}, {'l', 2^31 - 1}, {'l', -2^31}, {'I4', 2^32 - 1}, {'i4', 2^31 - 1}, {'i4', -2^31}, } for _, a in pairs{'', '>', '<'} do local i = 1 for _, l in pairs(lims) do local fmt = a .. l[1] testlib.test(OTHER,"limit"..i.."("..l[1]..")", lib.unpack(fmt, lib.pack(fmt, l[2])) == l[2]) i = i + 1 end end testlib.testing(OTHER, "fixed-sized ints") -- tests for fixed-sized ints local num = 1 for _, i in pairs{1,2,4} do x = lib.pack('<i'..i, -3) testlib.test(OTHER,"pack_fixedlen"..num, string.len(x) == i) testlib.test(OTHER,"pack_fixed"..num, x == string.char(256-3) .. string.rep(string.char(256-1), i-1)) testlib.test(OTHER,"unpack_fixed"..num, lib.unpack('<i'..i, x) == -3) num = num + 1 end testlib.testing(OTHER, "alignment") -- alignment d = lib.pack("d", 5.1) ali = {[1] = string.char(1)..d, [2] = string.char(1, 0)..d, [4] = string.char(1, 0, 0, 0)..d, [8] = string.char(1, 0, 0, 0, 0, 0, 0, 0)..d, } num = 1 for a,r in pairs(ali) do testlib.test(OTHER,"pack_align"..num, lib.pack("!"..a.."bd", 1, 5.1) == r) local x,y = lib.unpack("!"..a.."bd", r) testlib.test(OTHER,"unpack_align"..num, x == 1 and y == 5.1) num = num + 1 end testlib.testing(OTHER, "string") -- strings testlib.test(OTHER,"string_pack1",lib.pack("c", "alo alo") == "a") testlib.test(OTHER,"string_pack2",lib.pack("c4", "alo alo") == "alo ") testlib.test(OTHER,"string_pack3",lib.pack("c5", "alo alo") == "alo a") testlib.test(OTHER,"string_pack4",lib.pack("!4b>c7", 1, "alo alo") == "\1alo alo") testlib.test(OTHER,"string_pack5",lib.pack("!2<s", "alo alo") == "alo alo\0") testlib.test(OTHER,"string_pack6",lib.pack(" c0 ", "alo alo") == "alo alo") num = 1 for _, f in pairs{"B", "l", "i2", "f", "d"} do for _, s in pairs{"", "a", "alo", string.rep("x", 200)} do local x = lib.pack(f.."c0", #s, s) testlib.test(OTHER,"string_unpack"..num, lib.unpack(f.."c0", x) == s) num = num + 1 end end testlib.testing(OTHER, "indeces") -- indices x = lib.pack("!>iiiii", 1, 2, 3, 4, 5) local i = 1 local k = 1 num = 1 while i < #x do local v, j = lib.unpack("!>i", x, i) testlib.test(OTHER,"index_unpack"..num, j == i + 4 and v == k) i = j; k = k + 1 num = num + 1 end testlib.testing(OTHER, "absolute") -- alignments are relative to 'absolute' positions x = lib.pack("!8 xd", 12) testlib.test(OTHER,"absolute_unpack1",lib.unpack("!8d", x, 3) == 12) testlib.test(OTHER,"absolute_pack1",lib.pack("<lhbxxH", -2, 10, -10, 250) == string.char(254, 255, 255, 255) ..ln_pad.. string.char(10, 0, 246, 0, 0, 250, 0)) a,b,c,d = lib.unpack("<lhbxxH", string.char(254, 255, 255, 255) ..ln_pad.. string.char(10, 0, 246, 0, 0, 250, 0)) testlib.test(OTHER,"absolute_unpack2",a == -2 and b == 10 and c == -10 and d == 250) testlib.test(OTHER,"absolute_pack2",lib.pack(">lBxxH", -20, 10, 250) == ln_pad..string.char(255, 255, 255, 236, 10, 0, 0, 0, 250)) testlib.testing(OTHER, "position") a, b, c, d = lib.unpack(">lBxxH", ln_pad..string.char(255, 255, 255, 236, 10, 0, 0, 0, 250)) -- the 'd' return val is position in string, so will depend on size of long 'l' local vald = 10 + string.len(l_pad) testlib.test(OTHER,"position_unpack1",a == -20 and b == 10 and c == 250 and d == vald) a,b,c,d,e = lib.unpack(">fdfH", '000'..lib.pack(">fdfH", 3.5, -24e-5, 200.5, 30000), 4) testlib.test(OTHER,"position_unpack2",a == 3.5 and b == -24e-5 and c == 200.5 and d == 30000 and e == 22) a,b,c,d,e = lib.unpack("<fdxxfH", '000'..lib.pack("<fdxxfH", -13.5, 24e5, 200.5, 300), 4) testlib.test(OTHER,"position_unpack3",a == -13.5 and b == 24e5 and c == 200.5 and d == 300 and e == 24) x = lib.pack(">I2fi4I2", 10, 20, -30, 40001) testlib.test(OTHER,"position_pack1",string.len(x) == 2+4+4+2) testlib.test(OTHER,"position_unpack4",lib.unpack(">f", x, 3) == 20) a,b,c,d = lib.unpack(">i2fi4I2", x) testlib.test(OTHER,"position_unpack5",a == 10 and b == 20 and c == -30 and d == 40001) testlib.testing(OTHER, "string length") local s = "hello hello" x = lib.pack(" b c0 ", string.len(s), s) testlib.test(OTHER,"stringlen_unpack1",lib.unpack("bc0", x) == s) x = lib.pack("Lc0", string.len(s), s) testlib.test(OTHER,"stringlen_unpack2",lib.unpack(" L c0 ", x) == s) x = lib.pack("cc3b", s, s, 0) testlib.test(OTHER,"stringlen_pack1",x == "hhel\0") testlib.test(OTHER,"stringlen_unpack3",lib.unpack("xxxxb", x) == 0) testlib.testing(OTHER, "padding") testlib.test(OTHER,"padding_pack1",lib.pack("<!l", 3) == string.char(3, 0, 0, 0)..l_pad) testlib.test(OTHER,"padding_pack2",lib.pack("<!xl", 3) == l_pad..string.char(0, 0, 0, 0, 3, 0, 0, 0)..l_pad) testlib.test(OTHER,"padding_pack3",lib.pack("<!xxl", 3) == l_pad..string.char(0, 0, 0, 0, 3, 0, 0, 0)..l_pad) testlib.test(OTHER,"padding_pack4",lib.pack("<!xxxl", 3) == l_pad..string.char(0, 0, 0, 0, 3, 0, 0, 0)..l_pad) testlib.test(OTHER,"padding_unpack1",lib.unpack("<!l", string.char(3, 0, 0, 0)..l_pad) == 3) testlib.test(OTHER,"padding_unpack2",lib.unpack("<!xl", l_pad..string.char(0, 0, 0, 0, 3, 0, 0, 0)..l_pad) == 3) testlib.test(OTHER,"padding_unpack3",lib.unpack("<!xxl", l_pad..string.char(0, 0, 0, 0, 3, 0, 0, 0)..l_pad) == 3) testlib.test(OTHER,"padding_unpack4",lib.unpack("<!xxxl", l_pad..string.char(0, 0, 0, 0, 3, 0, 0, 0)..l_pad) == 3) testlib.testing(OTHER, "format") testlib.test(OTHER,"format_pack1",lib.pack("<!2 b l h", 2, 3, 5) == string.char(2, 0, 3, 0)..l_pad..string.char(0, 0, 5, 0)) a,b,c = lib.unpack("<!2blh", string.char(2, 0, 3, 0)..l_pad..string.char(0, 0, 5, 0)) testlib.test(OTHER,"format_pack2",a == 2 and b == 3 and c == 5) testlib.test(OTHER,"format_pack3",lib.pack("<!8blh", 2, 3, 5) == string.char(2, 0, 0, 0)..l_pad..string.char(3, 0, 0, 0)..l_pad..string.char(5, 0)) a,b,c = lib.unpack("<!8blh", string.char(2, 0, 0, 0)..l_pad..string.char(3, 0, 0, 0)..l_pad..string.char(5, 0)) testlib.test(OTHER,"format_pack4",a == 2 and b == 3 and c == 5) testlib.test(OTHER,"format_pack5",lib.pack(">sh", "aloi", 3) == "aloi\0\0\3") testlib.test(OTHER,"format_pack6",lib.pack(">!sh", "aloi", 3) == "aloi\0\0\0\3") x = "aloi\0\0\0\0\3\2\0\0" a, b, c = lib.unpack("<!si4", x) testlib.test(OTHER,"format_unpack1",a == "aloi" and b == 2*256+3 and c == string.len(x)+1) x = lib.pack("!4sss", "hi", "hello", "bye") a,b,c = lib.unpack("sss", x) testlib.test(OTHER,"format_unpack2",a == "hi" and b == "hello" and c == "bye") a, i = lib.unpack("s", x, 1) testlib.test(OTHER,"format_unpack3",a == "hi") a, i = lib.unpack("s", x, i) testlib.test(OTHER,"format_unpack4",a == "hello") a, i = lib.unpack("s", x, i) testlib.test(OTHER,"format_unpack5",a == "bye") -- test for weird conditions testlib.testing(OTHER, "weird conditions") testlib.test(OTHER,"weird_pack1",lib.pack(">>>h <!!!<h", 10, 10) == string.char(0, 10, 10, 0)) testlib.test(OTHER,"weird_pack2",not pcall(lib.pack, "!3l", 10)) testlib.test(OTHER,"weird_pack3",not pcall(lib.pack, "3", 10)) testlib.test(OTHER,"weird_pack4",not pcall(lib.pack, "i33", 10)) testlib.test(OTHER,"weird_pack5",not pcall(lib.pack, "I33", 10)) testlib.test(OTHER,"weird_pack6",lib.pack("") == "") testlib.test(OTHER,"weird_pack7",lib.pack(" ") == "") testlib.test(OTHER,"weird_pack8",lib.pack(">>><<<!!") == "") testlib.test(OTHER,"weird_unpack1",not pcall(lib.unpack, "c0", "alo")) testlib.test(OTHER,"weird_unpack2",not pcall(lib.unpack, "s", "alo")) testlib.test(OTHER,"weird_unpack3",lib.unpack("s", "alo\0") == "alo") testlib.test(OTHER,"weird_pack9",not pcall(lib.pack, "c4", "alo")) testlib.test(OTHER,"weird_pack10",pcall(lib.pack, "c3", "alo")) testlib.test(OTHER,"weird_unpack4",not pcall(lib.unpack, "c4", "alo")) testlib.test(OTHER,"weird_unpack5",pcall(lib.unpack, "c3", "alo")) testlib.test(OTHER,"weird_unpack6",not pcall(lib.unpack, "bc0", "\4alo")) testlib.test(OTHER,"weird_unpack7",pcall(lib.unpack, "bc0", "\3alo")) testlib.test(OTHER,"weird_unpack8",not pcall(lib.unpack, "b", "alo", 4)) testlib.test(OTHER,"weird_unpack9",lib.unpack("b", "alo\3", 4) == 3) testlib.test(OTHER,"weird_pack11",not pcall(lib.pack, "\250\22", "alo")) testlib.test(OTHER,"weird_pack12",not pcall(lib.pack, 1, "alo")) testlib.test(OTHER,"weird_pack13",not pcall(lib.pack, nil, "alo")) testlib.test(OTHER,"weird_pack14",not pcall(lib.pack, {}, "alo")) testlib.test(OTHER,"weird_pack15",not pcall(lib.pack, true, "alo")) testlib.test(OTHER,"weird_unpack10",not pcall(lib.unpack, "\250\22", "\3alo")) testlib.test(OTHER,"weird_unpack11",not pcall(lib.unpack, 1, "\3alo")) testlib.test(OTHER,"weird_unpack12",not pcall(lib.unpack, nil, "\3alo")) testlib.test(OTHER,"weird_unpack13",not pcall(lib.unpack, {}, "\3alo")) testlib.test(OTHER,"weird_unpack14",not pcall(lib.unpack, true, "\3alo")) -- done testlib.getResults()
Lua
wireshark/test/lua/testlib.lua
---------------------------------------- -- library name: testlib.lua -- -- Provides common functions for other lua test scripts to use. ---------------------------------------- --[[ This library aims to codify the most common practices used in testing Wireshark's lua features. The intent is to reduce boilerplate code so test scripts can focus on test cases. Tests are nominally classified into named groups. (In practice, most test files just use a single group called "other", but this should be tidied up at some point.) A test script must call testlib.init() with a table of group names and the number of tests expected to be run in each group. This number can be zero if you want to declare a group but don't need to check that a specific number of tests is run. Suggested use (abridged): local testlib = require("testlib") testlib.init({ other = 3 }) testlib.testing("other", "example tests") testlib.test("other", "firsttest", 1+1 == 2) testlib.test("other", "funccall", pcall(my_function, func_args), "function should succeed") testlib.test("other", "funccall", not pcall(my_function2, func_args), "function expected to give error") testlib.getResults() For information on specific functions, keep reading. --]] ---------------------------------------- -- This is the module object, which will be returned at the end of this file. local M = { ["groups"] = {}, } ---------------------------------------- -- Initialize the test suite. Define one or more testing groups, -- giving the expected number of tests to run for each. -- (Telling it to "expect" zero tests for a group just skips -- the check that a specific number of tests ran in that group.) -- May be called repeatedly if you want to define group names -- at runtime. M.init = function(t) for group, expected in pairs(t) do M.groups[group] = { ["expected"] = expected, ["passed"] = 0, ["failed"] = 0, ["total"] = 0, ["packets"] = 0, } end end ---------------------------------------- -- Indicate a passed test in the named group. M.pass = function(group) M.groups[group].passed = M.groups[group].passed + 1 M.groups[group].total = M.groups[group].total + 1 end ---------------------------------------- -- Indicate a failed test in the named group. M.fail = function(group) M.groups[group].failed = M.groups[group].failed + 1 M.groups[group].total = M.groups[group].total + 1 end ---------------------------------------- -- There are some tests which track the number of packets they're testing. -- Use this function to count a single packet as being "seen" by a group. M.countPacket = function(group) M.groups[group].packets = M.groups[group].packets + 1 end ---------------------------------------- -- Get the number of packets that have been counted under the named group. M.getPktCount = function(group) return M.groups[group].packets end ---------------------------------------- -- Print a banner reporting test progress. -- Has no material affect on test progression, but is useful for -- understanding the test results. M.testing = function(group, msg) if msg == nil then msg, group = group, nil end if group then if M.groups[group].packets > 0 then print(string.format("\n-------- Testing %s -- %s for packet # %d --------\n", group, msg, M.groups[group].packets)) else print(string.format("\n-------- Testing %s -- %s --------\n", group, msg)) end else print(string.format("\n-------- Testing %s --------\n", msg)) end end ---------------------------------------- -- Core function: test a condition, report and track its status. -- The output format shown here is what was commonly used in test scripts, -- but can be changed. M.test = function(group, name, cond, msg) -- io.stdout:write() doesn't add a newline like print() does io.stdout:write(string.format("test %s --> %s-%d-%d...", group, name, M.groups[group].total, M.groups[group].packets)) if cond then io.stdout:write("passed\n") M.pass(group) return true else io.stdout:write("failed!\n") M.fail(group) if msg then print(string.format("Got the following error: '%s'", msg)) end -- Using error() causes the entire test script to abort. -- This is how the lua test suite typically operates. -- If a test script wants to continue with subsequent tests -- after a failed test, this behaviour could be made -- configurable in this module. error(name .. " test failed!") return false end end ---------------------------------------- -- Call this at the finale of a test script to output the results of testing. -- This is where the number of tests run is compared to what was expected, -- if applicable. -- Scripts which run over empty.pcap will usually call this at the end of -- the file. -- Scripts which test by creating a protocol object will call this from -- the object's .init() method *the second time it is called*. -- Others usually call it in a tap listener's .draw() method, -- which tshark calls once when it reaches the end of the pcap. M.getResults = function() local rv = true print("\n===== Test Results =====") for group, num in pairs(M.groups) do if num.expected > 0 and num.total ~= num.expected then rv = false print("Something didn't run or ran too much... tests failed!") print(string.format("%s: expected %d tests but ran %d tests", group, num.expected, num.total)) end if num.failed > 0 then rv = false print(string.format("%s: passed %d/%d, FAILED %d/%d", group, num.passed, num.total, num.failed, num.total)) else print(string.format("%s: passed %d/%d", group, num.passed, num.total)) end end if rv then -- The python wrapper which performs our lua testing -- expects to see this string in the output if there were no failures. print("All tests passed!") else print("Some tests failed!") end return rv end ---------------------------------------- -- That's the end of this library. Return the module we've created. return M
Lua
wireshark/test/lua/try_heuristics.lua
-- Define a new protocol that runs TCP heuristics and on failure runs UDP heuristics -- -- This expects to be run against dns_port.pcap, so it should end up resolving all packets to DNS with the UDP heuristic local test_proto = Proto("test", "Test Protocol") -- Have all tests passed so far? -- Anything that fails should set this to false, which will suppress the "". all_ok = true -- The number of frames expected -- Final test status is output with last frame LAST_FRAME = 4 function test_proto.dissector(buf, pinfo, root) print("Dissector function run") orig_proto_name = tostring(pinfo.cols.protocol) -- Run TCP heuristic dissectors -- Dissection should fail, and the protocol name should be unchanged tcp_success = DissectorTable.try_heuristics("tcp", buf, pinfo, root) curr_proto_name = tostring(pinfo.cols.protocol) if tcp_success then all_ok = false print("tcp heuristics were not expected to report success, but did!") end if curr_proto_name ~= orig_proto_name then all_ok = false print("after tcp heuristics were run, protocol " .. orig_proto_name .. " was not expected to change, but became " .. curr_proto_name .. "!") end -- Run UDP heuristic dissectors -- Dissection should succeed, and the protocol name should be changed to DNS udp_success = DissectorTable.try_heuristics("udp", buf, pinfo, root) curr_proto_name = tostring(pinfo.cols.protocol) if not udp_success then all_ok = false print("udp heuristics were expected to report success, but did not!") end if curr_proto_name ~= "DNS" then all_ok = false print("after udp heuristics were run, protocol should be changed to DNS, but became " .. curr_proto_name .. "!") end -- If we're on the last frame, report success or failure if pinfo.number == LAST_FRAME then if all_ok then print("All tests passed!") else print("Some tests failed!") end end end -- Invoke test_proto on the expected UDP traffic DissectorTable.get("udp.port"):add(65333, test_proto) DissectorTable.get("udp.port"):add(65346, test_proto)
Lua
wireshark/test/lua/tvb.lua
---------------------------------------- -- script-name: tvb.lua -- This tests the Tvb/TvbRange and proto_add_XXX_item API. ---------------------------------------- local testlib = require("testlib") local FRAME = "frame" local OTHER = "other" -- expected number of runs per type -- -- CHANGE THIS TO MATCH HOW MANY TESTS THERE ARE -- -- The number of tests in a specific category (other than FRAME) is the -- number of times execute() is called by any function below testing(). -- From the user's perspective, it can be calculated with the following -- formula: -- -- N = number of execute() you call + -- number of verifyFields() * (1 + number of fields) + -- number of verifyResults() * (1 + 2 * number of values) -- local n_frames = 1 local taptests = { [FRAME]=n_frames, [OTHER]=413*n_frames } testlib.init(taptests) ------------- test script ------------ ---------------------------------------- -- creates a Proto object for our testing local test_proto = Proto("test","Test Protocol") local numinits = 0 function test_proto.init() numinits = numinits + 1 if numinits == 2 then testlib.getResults() end end ---------------------------------------- -- a table of all of our Protocol's fields range_string = { { 0, 200, "The first part" }, { 201, 233, "The second part" }, { 234, 255, "The last part" }, } local testfield = { basic = { STRING = ProtoField.string ("test.basic.string", "Basic string"), BOOLEAN = ProtoField.bool ("test.basic.boolean", "Basic boolean", 16, {"yes","no"}, 0x0001), UINT8 = ProtoField.uint8 ("test.basic.uint8", "Basic uint8 with range string", base.RANGE_STRING, range_string ), UINT16 = ProtoField.uint16 ("test.basic.uint16", "Basic uint16"), UINT32 = ProtoField.uint32 ("test.basic.uint32", "Basic uint32 test with a unit string", base.UINT_STRING, { "femtoFarads" }), INT24 = ProtoField.int24 ("test.basic.uint24", "Basic uint24"), BYTES = ProtoField.bytes ("test.basic.bytes", "Basic Bytes"), UINT_BYTES = ProtoField.ubytes ("test.basic.ubytes", "Basic Uint Bytes"), OID = ProtoField.oid ("test.basic.oid", "Basic OID"), REL_OID = ProtoField.rel_oid("test.basic.rel_oid", "Basic Relative OID"), ABSOLUTE_LOCAL = ProtoField.absolute_time("test.basic.absolute.local","Basic absolute local"), ABSOLUTE_UTC = ProtoField.absolute_time("test.basic.absolute.utc", "Basic absolute utc", base.UTC), IPv4 = ProtoField.ipv4 ("test.basic.ipv4", "Basic ipv4 address"), IPv6 = ProtoField.ipv6 ("test.basic.ipv6", "Basic ipv6 address"), ETHER = ProtoField.ether ("test.basic.ether", "Basic ethernet address"), -- GUID = ProtoField.guid ("test.basic.guid", "Basic GUID"), }, time = { ABSOLUTE_LOCAL = ProtoField.absolute_time("test.time.absolute.local","Time absolute local"), ABSOLUTE_UTC = ProtoField.absolute_time("test.time.absolute.utc", "Time absolute utc", base.UTC), }, bytes = { BYTES = ProtoField.bytes ("test.bytes.bytes", "Bytes"), UINT_BYTES = ProtoField.ubytes ("test.bytes.ubytes", "Uint Bytes"), OID = ProtoField.oid ("test.bytes.oid", "OID"), REL_OID = ProtoField.rel_oid("test.bytes.rel_oid", "Relative OID"), -- GUID = ProtoField.guid ("test.bytes.guid", "GUID"), }, } -- create a flat array table of the above that can be registered local pfields = {} for _,t in pairs(testfield) do for k,v in pairs(t) do pfields[#pfields+1] = v end end -- register them test_proto.fields = pfields print("test_proto ProtoFields registered") local getfield = { basic = { STRING = Field.new ("test.basic.string"), BOOLEAN = Field.new ("test.basic.boolean"), UINT8 = Field.new ("test.basic.uint8"), UINT16 = Field.new ("test.basic.uint16"), INT24 = Field.new ("test.basic.uint24"), BYTES = Field.new ("test.basic.bytes"), UINT_BYTES = Field.new ("test.basic.ubytes"), OID = Field.new ("test.basic.oid"), REL_OID = Field.new ("test.basic.rel_oid"), ABSOLUTE_LOCAL = Field.new ("test.basic.absolute.local"), ABSOLUTE_UTC = Field.new ("test.basic.absolute.utc"), IPv4 = Field.new ("test.basic.ipv4"), IPv6 = Field.new ("test.basic.ipv6"), ETHER = Field.new ("test.basic.ether"), -- GUID = Field.new ("test.basic.guid"), }, time = { ABSOLUTE_LOCAL = Field.new ("test.time.absolute.local"), ABSOLUTE_UTC = Field.new ("test.time.absolute.utc"), }, bytes = { BYTES = Field.new ("test.bytes.bytes"), UINT_BYTES = Field.new ("test.bytes.ubytes"), OID = Field.new ("test.bytes.oid"), REL_OID = Field.new ("test.bytes.rel_oid"), -- GUID = Field.new ("test.bytes.guid"), }, } print("test_proto Fields created") local function addMatchFields(match_fields, ... ) match_fields[#match_fields + 1] = { ... } end local function getFieldInfos(name) local base, field = name:match("([^.]+)%.(.+)") if not base or not field then error("failed to get base.field from '" .. name .. "'") end local t = { getfield[base][field]() } return t end local function verifyFields(name, match_fields) local finfos = getFieldInfos(name) testlib.test(OTHER, "verify-fields-size-" .. name, #finfos == #match_fields, "#finfos=" .. #finfos .. ", #match_fields=" .. #match_fields) for i, t in ipairs(match_fields) do if type(t) ~= 'table' then error("verifyFields didn't get a table inside the matches table") end if #t ~= 1 then error("verifyFields matches table's table is not size 1") end local result = finfos[i]() local value = t[1] print( name .. " got:", "\n\tfinfos [" .. i .. "]='" .. tostring( result ) .. "'", "\n\tmatches[" .. i .. "]='" .. tostring( value ) .. "'" ) testlib.test(OTHER, "verify-fields-value-" .. name .. "-" .. i, result == value ) end end local function addMatchValues(match_values, ... ) match_values[#match_values + 1] = { ... } end local function addMatchFieldValues(match_fields, match_values, match_both, ...) addMatchFields(match_fields, match_both) addMatchValues(match_values, match_both, ...) end local result_values = {} local function resetResults() result_values = {} end local function treeAddPField(...) local t = { pcall ( TreeItem.add_packet_field, ... ) } if t[1] == nil then return nil, t[2] end -- it gives back a TreeItem, then the results if typeof(t[2]) ~= 'TreeItem' then return nil, "did not get a TreeItem returned from TreeItem.add_packet_field, ".. "got a '" .. typeof(t[2]) .."'" end if #t ~= 4 then return nil, "did not get 3 return values from TreeItem.add_packet_field" end result_values[#result_values + 1] = { t[3], t[4] } return true end local function verifyResults(name, match_values) testlib.test(OTHER, "verify-results-size-" .. name, #result_values == #match_values, "#result_values=" .. #result_values .. ", #match_values=" .. #match_values) for j, t in ipairs(match_values) do if type(t) ~= 'table' then error("verifyResults didn't get a table inside the matches table") end for i, match in ipairs(t) do local r = result_values[j][i] print( name .. " got:", "\n\tresults[" .. j .. "][" .. i .. "]='" .. tostring( r ) .. "'", "\n\tmatches[" .. j .. "][" .. i .. "]='" .. tostring( match ) .. "'" ) local result_type, match_type if type(match) == 'userdata' then match_type = typeof(match) else match_type = type(match) end if type(r) == 'userdata' then result_type = typeof(r) else result_type = type(r) end testlib.test(OTHER, "verify-results-type-" .. name .. "-" .. i, result_type == match_type ) testlib.test(OTHER, "verify-results-value-" .. name .. "-" .. i, r == match ) end end end -- Compute the difference in seconds between local time and UTC -- from http://lua-users.org/wiki/TimeZone local function get_timezone() local now = os.time() return os.difftime(now, os.time(os.date("!*t", now))) end local timezone = get_timezone() print ("timezone = " .. timezone) ---------------------------------------- -- The following creates the callback function for the dissector. -- The 'tvbuf' is a Tvb object, 'pktinfo' is a Pinfo object, and 'root' is a TreeItem object. function test_proto.dissector(tvbuf,pktinfo,root) testlib.countPacket(FRAME) testlib.countPacket(OTHER) testlib.testing(OTHER, "Basic string") local tree = root:add(test_proto, tvbuf:range(0,tvbuf:len())) -- create a fake Tvb to use for testing local teststring = "this is the string for the first test" local bytearray = ByteArray.new(teststring, true) local tvb_string = bytearray:tvb("Basic string") local function callTreeAdd(tree,...) tree:add(...) end local string_match_fields = {} testlib.test(OTHER, "basic-tvb_get_string", tvb_string:range():string() == teststring ) testlib.test(OTHER, "basic-string", tree:add(testfield.basic.STRING, tvb_string:range(0,tvb_string:len())) ~= nil ) addMatchFields(string_match_fields, teststring) testlib.test(OTHER, "basic-string", pcall (callTreeAdd, tree, testfield.basic.STRING, tvb_string:range() ) ) addMatchFields(string_match_fields, teststring) verifyFields("basic.STRING", string_match_fields) ---------------------------------------- testlib.testing(OTHER, "Basic boolean") local barray_bytes_hex = "00FF00018000" local barray_bytes = ByteArray.new(barray_bytes_hex) local tvb_bytes = barray_bytes:tvb("Basic bytes") local bool_match_fields = {} testlib.test(OTHER, "basic-boolean", pcall (callTreeAdd, tree, testfield.basic.BOOLEAN, tvb_bytes:range(0,2)) ) addMatchFields(bool_match_fields, true) testlib.test(OTHER, "basic-boolean", pcall (callTreeAdd, tree, testfield.basic.BOOLEAN, tvb_bytes:range(2,2)) ) addMatchFields(bool_match_fields, true) testlib.test(OTHER, "basic-boolean", pcall (callTreeAdd, tree, testfield.basic.BOOLEAN, tvb_bytes:range(4,2)) ) addMatchFields(bool_match_fields, false) verifyFields("basic.BOOLEAN", bool_match_fields ) ---------------------------------------- testlib.testing(OTHER, "Basic uint16") local uint16_match_fields = {} testlib.test(OTHER, "basic-uint16", pcall (callTreeAdd, tree, testfield.basic.UINT16, tvb_bytes:range(0,2)) ) addMatchFields(uint16_match_fields, 255) testlib.test(OTHER, "basic-uint16", pcall (callTreeAdd, tree, testfield.basic.UINT16, tvb_bytes:range(2,2)) ) addMatchFields(uint16_match_fields, 1) testlib.test(OTHER, "basic-uint16", pcall (callTreeAdd, tree, testfield.basic.UINT16, tvb_bytes:range(4,2)) ) addMatchFields(uint16_match_fields, 32768) verifyFields("basic.UINT16", uint16_match_fields) ---------------------------------------- testlib.testing(OTHER, "Basic uint16-le") local function callTreeAddLE(tree,...) tree:add_le(...) end testlib.test(OTHER, "basic-uint16-le", pcall (callTreeAddLE, tree, testfield.basic.UINT16, tvb_bytes:range(0,2)) ) addMatchFields(uint16_match_fields, 65280) testlib.test(OTHER, "basic-uint16-le", pcall (callTreeAddLE, tree, testfield.basic.UINT16, tvb_bytes:range(2,2)) ) addMatchFields(uint16_match_fields, 256) testlib.test(OTHER, "basic-uint16-le", pcall (callTreeAddLE, tree, testfield.basic.UINT16, tvb_bytes:range(4,2)) ) addMatchFields(uint16_match_fields, 128) verifyFields("basic.UINT16", uint16_match_fields) ---------------------------------------- testlib.testing(OTHER, "Basic int24") local int24_match_fields = {} testlib.test(OTHER, "basic-int24", pcall (callTreeAdd, tree, testfield.basic.INT24, tvb_bytes:range(0,3)) ) addMatchFields(int24_match_fields, 65280) testlib.test(OTHER, "basic-int24", pcall (callTreeAdd, tree, testfield.basic.INT24, tvb_bytes:range(3,3)) ) addMatchFields(int24_match_fields, 98304) verifyFields("basic.INT24", int24_match_fields) ---------------------------------------- testlib.testing(OTHER, "Basic int24-le") testlib.test(OTHER, "basic-int24", pcall (callTreeAddLE, tree, testfield.basic.INT24, tvb_bytes:range(0,3)) ) addMatchFields(int24_match_fields, 65280) testlib.test(OTHER, "basic-int24", pcall (callTreeAddLE, tree, testfield.basic.INT24, tvb_bytes:range(3,3)) ) addMatchFields(int24_match_fields, 32769) verifyFields("basic.INT24", int24_match_fields) ---------------------------------------- testlib.testing(OTHER, "Basic bytes") local bytes_match_fields = {} testlib.test(OTHER, "basic-tvb_get_string_bytes", string.lower(tostring(tvb_bytes:range():bytes())) == string.lower(barray_bytes_hex)) testlib.test(OTHER, "basic-bytes", pcall (callTreeAdd, tree, testfield.basic.BYTES, tvb_bytes:range()) ) addMatchFields(bytes_match_fields, barray_bytes) -- TODO: it's silly that tree:add_packet_field() requires an encoding argument -- need to fix that separately in a bug fix testlib.test(OTHER, "add_pfield-bytes", treeAddPField(tree, testfield.basic.BYTES, tvb_bytes:range(), ENC_BIG_ENDIAN)) addMatchFields(bytes_match_fields, barray_bytes) verifyFields("basic.BYTES", bytes_match_fields) ---------------------------------------- testlib.testing(OTHER, "Basic uint bytes") local len_string = string.format("%02x", barray_bytes:len()) local barray_uint_bytes = ByteArray.new(len_string) .. barray_bytes local tvb_uint_bytes = barray_uint_bytes:tvb("Basic UINT_BYTES") local uint_bytes_match_fields = {} testlib.test(OTHER, "basic-uint-bytes", pcall (callTreeAdd, tree, testfield.basic.UINT_BYTES, tvb_uint_bytes:range(0,1)) ) addMatchFields(uint_bytes_match_fields, barray_bytes) testlib.test(OTHER, "add_pfield-uint-bytes", treeAddPField(tree, testfield.basic.UINT_BYTES, tvb_uint_bytes:range(0,1), ENC_BIG_ENDIAN) ) addMatchFields(uint_bytes_match_fields, barray_bytes) verifyFields("basic.UINT_BYTES", uint_bytes_match_fields) ---------------------------------------- testlib.testing(OTHER, "Basic OID") -- note: the tvb being dissected and compared isn't actually a valid OID. -- tree:add() and tree:add_packet-field() don't care about its validity right now. local oid_match_fields = {} testlib.test(OTHER, "basic-oid", pcall(callTreeAdd, tree, testfield.basic.OID, tvb_bytes:range()) ) addMatchFields(oid_match_fields, barray_bytes) testlib.test(OTHER, "add_pfield-oid", treeAddPField(tree, testfield.basic.OID, tvb_bytes:range(), ENC_BIG_ENDIAN) ) addMatchFields(oid_match_fields, barray_bytes) verifyFields("basic.OID", oid_match_fields) ---------------------------------------- testlib.testing(OTHER, "Basic REL_OID") -- note: the tvb being dissected and compared isn't actually a valid OID. -- tree:add() and tree:add_packet-field() don't care about its validity right now. local rel_oid_match_fields = {} testlib.test(OTHER, "basic-rel-oid", pcall(callTreeAdd, tree, testfield.basic.REL_OID, tvb_bytes:range())) addMatchFields(rel_oid_match_fields, barray_bytes) testlib.test(OTHER, "add_pfield-rel_oid", treeAddPField(tree, testfield.basic.REL_OID, tvb_bytes:range(), ENC_BIG_ENDIAN) ) addMatchFields(rel_oid_match_fields, barray_bytes) verifyFields("basic.REL_OID", rel_oid_match_fields) -- TODO: a FT_GUID is not really a ByteArray, so we can't simply treat it as one -- local barray_guid = ByteArray.new("00FF0001 80001234 567890AB CDEF00FF") -- local tvb_guid = barray_guid:tvb("Basic GUID") -- local guid_match_fields = {} -- testlib.test(OTHER, "basic-guid", pcall(callTreeAdd, tree, testfield.basic.GUID, tvb_guid:range()) ) -- addMatchFields(guid_match_fields, barray_guid) -- testlib.test(OTHER, "add_pfield-guid", treeAddPField(tree, testfield.basic.GUID, -- tvb_guid:range(), ENC_BIG_ENDIAN) ) -- addMatchFields(guid_match_fields, barray_guid) -- verifyFields("basic.GUID", guid_match_fields) ---------------------------------------- testlib.testing(OTHER, "tree:add ipv6") local tvb = ByteArray.new("20010db8 00000000 0000ff00 00428329"):tvb("IPv6") local IPv6 = testfield.basic.IPv6 local ipv6_match_fields = {} testlib.test(OTHER, "ipv6", pcall (callTreeAdd, tree, IPv6, tvb:range(0,16))) addMatchFields(ipv6_match_fields, Address.ipv6('2001:0db8:0000:0000:0000:ff00:0042:8329')) verifyFields("basic.IPv6", ipv6_match_fields) ---------------------------------------- testlib.testing(OTHER, "tree:add ipv4") local tvb = ByteArray.new("7f000001"):tvb("IPv4") local IPv4 = testfield.basic.IPv4 local ipv4_match_fields = {} testlib.test(OTHER, "ipv4", pcall (callTreeAdd, tree, IPv4, tvb:range(0,4))) addMatchFields(ipv4_match_fields, Address.ip('127.0.0.1')) -- TODO: currently, tree:add_le only works for numeric values, not IPv4 -- addresses. Test this in the future. -- testlib.test(OTHER, "ipv4", pcall (callTreeAddLE, tree, IPv4, tvb:range(0,4))) -- addMatchFields(ipv4_match_fields, Address.ip('1.0.0.127')) verifyFields("basic.IPv4", ipv4_match_fields) ---------------------------------------- testlib.testing(OTHER, "tree:add ether") local tvb = ByteArray.new("010203040506"):tvb("Ether") local tvb0 = ByteArray.new("000000000000"):tvb("Ether0") local ether = testfield.basic.ETHER local ether_match_fields = {} testlib.test(OTHER, "ether", pcall (callTreeAdd, tree, ether, tvb:range(0,6))) addMatchFields(ether_match_fields, Address.ether('01:02:03:04:05:06')) testlib.test(OTHER, "ether0", pcall (callTreeAdd, tree, ether, tvb0:range(0,6))) addMatchFields(ether_match_fields, Address.ether('11:22:33')) verifyFields("basic.ETHER", ether_match_fields) ---------------------------------------- testlib.testing(OTHER, "tree:add_packet_field Bytes") resetResults() bytes_match_fields = {} local bytes_match_values = {} -- something to make this easier to read local function addMatch(...) addMatchFieldValues(bytes_match_fields, bytes_match_values, ...) end local bytesstring1 = "deadbeef0123456789DEADBEEFabcdef" local bytesstring = ByteArray.new(bytesstring1) -- the binary version of above, for comparing local bytestvb1 = ByteArray.new(bytesstring1, true):tvb("Bytes hex-string 1") local bytesstring2 = " de:ad:be:ef:01:23:45:67:89:DE:AD:BE:EF:ab:cd:ef" local bytestvb2 = ByteArray.new(bytesstring2 .. "-f0-00 foobar", true):tvb("Bytes hex-string 2") local bytestvb1_decode = bytestvb1:range():bytes(ENC_STR_HEX + ENC_SEP_NONE + ENC_SEP_COLON + ENC_SEP_DASH) testlib.test(OTHER, "tvb_get_string_bytes", string.lower(tostring(bytestvb1_decode)) == string.lower(tostring(bytesstring1))) testlib.test(OTHER, "add_pfield-bytes1", treeAddPField(tree, testfield.bytes.BYTES, bytestvb1:range(), ENC_STR_HEX + ENC_SEP_NONE + ENC_SEP_COLON + ENC_SEP_DASH)) addMatch(bytesstring, string.len(bytesstring1)) testlib.test(OTHER, "add_pfield-bytes2", treeAddPField(tree, testfield.bytes.BYTES, bytestvb2:range(), ENC_STR_HEX + ENC_SEP_NONE + ENC_SEP_COLON + ENC_SEP_DASH)) addMatch(bytesstring, string.len(bytesstring2)) verifyResults("add_pfield-bytes", bytes_match_values) verifyFields("bytes.BYTES", bytes_match_fields) -- extra test of ByteArray local b64padded = ByteArray.new("dGVzdA==", true):base64_decode():raw() local b64unpadded = ByteArray.new("dGVzdA", true):base64_decode():raw() testlib.test(OTHER, "bytearray_base64_padded", b64padded == "test") testlib.test(OTHER, "bytearray_base64_unpadded", b64unpadded == "test") ---------------------------------------- testlib.testing(OTHER, "tree:add_packet_field OID") resetResults() bytes_match_fields = {} bytes_match_values = {} testlib.test(OTHER, "add_pfield-oid1", treeAddPField(tree, testfield.bytes.OID, bytestvb1:range(), ENC_STR_HEX + ENC_SEP_NONE + ENC_SEP_COLON + ENC_SEP_DASH)) addMatch(bytesstring, string.len(bytesstring1)) testlib.test(OTHER, "add_pfield-oid2", treeAddPField(tree, testfield.bytes.OID, bytestvb2:range(), ENC_STR_HEX + ENC_SEP_NONE + ENC_SEP_COLON + ENC_SEP_DASH)) addMatch(bytesstring, string.len(bytesstring2)) verifyResults("add_pfield-oid", bytes_match_values) verifyFields("bytes.OID", bytes_match_fields) ---------------------------------------- testlib.testing(OTHER, "tree:add_packet_field REL_OID") resetResults() bytes_match_fields = {} bytes_match_values = {} testlib.test(OTHER, "add_pfield-rel_oid1", treeAddPField(tree, testfield.bytes.REL_OID, bytestvb1:range(), ENC_STR_HEX + ENC_SEP_NONE + ENC_SEP_COLON + ENC_SEP_DASH)) addMatch(bytesstring, string.len(bytesstring1)) testlib.test(OTHER, "add_pfield-rel_oid2", treeAddPField(tree, testfield.bytes.REL_OID, bytestvb2:range(), ENC_STR_HEX + ENC_SEP_NONE + ENC_SEP_COLON + ENC_SEP_DASH)) addMatch(bytesstring, string.len(bytesstring2)) verifyResults("add_pfield-rel_oid", bytes_match_values) verifyFields("bytes.REL_OID", bytes_match_fields) ---------------------------------------- testlib.testing(OTHER, "tree:add Time") local tvb = ByteArray.new("00000000 00000000 0000FF0F 00FF000F"):tvb("Time") local ALOCAL = testfield.time.ABSOLUTE_LOCAL local alocal_match_fields = {} testlib.test(OTHER, "time-local", pcall (callTreeAdd, tree, ALOCAL, tvb:range(0,8)) ) addMatchFields(alocal_match_fields, NSTime()) testlib.test(OTHER, "time-local", pcall (callTreeAdd, tree, ALOCAL, tvb:range(8,8)) ) addMatchFields(alocal_match_fields, NSTime( 0x0000FF0F, 0x00FF000F) ) testlib.test(OTHER, "time-local-le", pcall (callTreeAddLE, tree, ALOCAL, tvb:range(0,8)) ) addMatchFields(alocal_match_fields, NSTime()) testlib.test(OTHER, "time-local-le", pcall (callTreeAddLE, tree, ALOCAL, tvb:range(8,8)) ) addMatchFields(alocal_match_fields, NSTime( 0x0FFF0000, 0x0F00FF00 ) ) verifyFields("time.ABSOLUTE_LOCAL", alocal_match_fields) local AUTC = testfield.time.ABSOLUTE_UTC local autc_match_fields = {} testlib.test(OTHER, "time-utc", pcall (callTreeAdd, tree, AUTC, tvb:range(0,8)) ) addMatchFields(autc_match_fields, NSTime()) testlib.test(OTHER, "time-utc", pcall (callTreeAdd, tree, AUTC, tvb:range(8,8)) ) addMatchFields(autc_match_fields, NSTime( 0x0000FF0F, 0x00FF000F) ) testlib.test(OTHER, "time-utc-le", pcall (callTreeAddLE, tree, AUTC, tvb:range(0,8)) ) addMatchFields(autc_match_fields, NSTime()) testlib.test(OTHER, "time-utc-le", pcall (callTreeAddLE, tree, AUTC, tvb:range(8,8)) ) addMatchFields(autc_match_fields, NSTime( 0x0FFF0000, 0x0F00FF00 ) ) verifyFields("time.ABSOLUTE_UTC", autc_match_fields ) ---------------------------------------- testlib.testing(OTHER, "tree:add_packet_field Time bytes") resetResults() local autc_match_values = {} -- something to make this easier to read addMatch = function(...) addMatchFieldValues(autc_match_fields, autc_match_values, ...) end -- tree:add_packet_field(ALOCAL, tvb:range(0,8), ENC_BIG_ENDIAN) testlib.test(OTHER, "add_pfield-time-bytes-local", treeAddPField ( tree, AUTC, tvb:range(0,8), ENC_BIG_ENDIAN) ) addMatch( NSTime(), 8) testlib.test(OTHER, "add_pfield-time-bytes-local", treeAddPField ( tree, AUTC, tvb:range(8,8), ENC_BIG_ENDIAN) ) addMatch( NSTime( 0x0000FF0F, 0x00FF000F), 16) testlib.test(OTHER, "add_pfield-time-bytes-local-le", treeAddPField ( tree, AUTC, tvb:range(0,8), ENC_LITTLE_ENDIAN) ) addMatch( NSTime(), 8) testlib.test(OTHER, "add_pfield-time-bytes-local-le", treeAddPField ( tree, AUTC, tvb:range(8,8), ENC_LITTLE_ENDIAN) ) addMatch( NSTime( 0x0FFF0000, 0x0F00FF00 ), 16) verifyFields("time.ABSOLUTE_UTC", autc_match_fields) verifyResults("add_pfield-time-bytes-local", autc_match_values) ---------------------------------------- testlib.testing(OTHER, "tree:add_packet_field Time string ENC_ISO_8601_DATE_TIME") resetResults() autc_match_values = {} local datetimestring1 = "2013-03-01T22:14:48+00:00" -- this is 1362176088 seconds epoch time local tvb1 = ByteArray.new(datetimestring1, true):tvb("Date_Time string 1") local datetimestring2 = " 2013-03-02T03:14:48+05:00" -- this is 1362176088 seconds epoch time local tvb2 = ByteArray.new(datetimestring2 .. " foobar", true):tvb("Date_Time string 2") local datetimestring3 = " 2013-03-01T16:44-05:30" -- this is 1362176040 seconds epoch time local tvb3 = ByteArray.new(datetimestring3, true):tvb("Date_Time string 3") local datetimestring4 = "2013-03-02T01:44:00+03:30" -- this is 1362176040 seconds epoch time local tvb4 = ByteArray.new(datetimestring4, true):tvb("Date_Time string 4") local datetimestring5 = "2013-03-01T22:14:48Z" -- this is 1362176088 seconds epoch time local tvb5 = ByteArray.new(datetimestring5, true):tvb("Date_Time string 5") local datetimestring6 = "2013-03-01T22:14Z" -- this is 1362176040 seconds epoch time local tvb6 = ByteArray.new(datetimestring6, true):tvb("Date_Time string 6") testlib.test(OTHER, "add_pfield-datetime-local", treeAddPField ( tree, AUTC, tvb1:range(), ENC_ISO_8601_DATE_TIME) ) addMatch( NSTime( 1362176088, 0), string.len(datetimestring1)) testlib.test(OTHER, "add_pfield-datetime-local", treeAddPField ( tree, AUTC, tvb2:range(), ENC_ISO_8601_DATE_TIME) ) addMatch( NSTime( 1362176088, 0), string.len(datetimestring2)) testlib.test(OTHER, "add_pfield-datetime-local", treeAddPField ( tree, AUTC, tvb3:range(), ENC_ISO_8601_DATE_TIME) ) addMatch( NSTime( 1362176040, 0), string.len(datetimestring3)) testlib.test(OTHER, "add_pfield-datetime-local", treeAddPField ( tree, AUTC, tvb4:range(), ENC_ISO_8601_DATE_TIME) ) addMatch( NSTime( 1362176040, 0), string.len(datetimestring4)) testlib.test(OTHER, "add_pfield-datetime-local", treeAddPField ( tree, AUTC, tvb5:range(), ENC_ISO_8601_DATE_TIME) ) addMatch( NSTime( 1362176088, 0), string.len(datetimestring5)) testlib.test(OTHER, "add_pfield-datetime-local", treeAddPField ( tree, AUTC, tvb6:range(), ENC_ISO_8601_DATE_TIME) ) addMatch( NSTime( 1362176040, 0), string.len(datetimestring6)) verifyFields("time.ABSOLUTE_UTC", autc_match_fields) verifyResults("add_pfield-datetime-local", autc_match_values) ---------------------------------------- testlib.testing(OTHER, "tree:add_packet_field Time string ENC_ISO_8601_DATE") resetResults() autc_match_values = {} local datestring1 = "2013-03-01" -- this is 1362096000 seconds epoch time local d_tvb1 = ByteArray.new(datestring1, true):tvb("Date string 1") local datestring2 = " 2013-03-01" -- this is 1362096000 seconds epoch time local d_tvb2 = ByteArray.new(datestring2 .. " foobar", true):tvb("Date string 2") testlib.test(OTHER, "add_pfield-date-local", treeAddPField ( tree, AUTC, d_tvb1:range(), ENC_ISO_8601_DATE) ) addMatch( NSTime( 1362096000, 0), string.len(datestring1)) testlib.test(OTHER, "add_pfield-date-local", treeAddPField ( tree, AUTC, d_tvb2:range(), ENC_ISO_8601_DATE) ) addMatch( NSTime( 1362096000, 0), string.len(datestring2)) verifyFields("time.ABSOLUTE_UTC", autc_match_fields) verifyResults("add_pfield-date-local", autc_match_values) ---------------------------------------- testlib.testing(OTHER, "tree:add_packet_field Time string ENC_ISO_8601_TIME") resetResults() autc_match_values = {} local timestring1 = "22:14:48" -- this is 80088 seconds local t_tvb1 = ByteArray.new(timestring1, true):tvb("Time string 1") local timestring2 = " 22:14:48" -- this is 80088 seconds local t_tvb2 = ByteArray.new(timestring2 .. " foobar", true):tvb("Time string 2") local now = os.date("!*t") now.hour = 22 now.min = 14 now.sec = 48 local timebase = os.time( now ) timebase = timebase + timezone print ("timebase = " .. tostring(timebase) .. ", timezone=" .. timezone) testlib.test(OTHER, "add_pfield-time-local", treeAddPField ( tree, AUTC, t_tvb1:range(), ENC_ISO_8601_TIME) ) addMatch( NSTime( timebase, 0), string.len(timestring1)) testlib.test(OTHER, "add_pfield-time-local", treeAddPField ( tree, AUTC, t_tvb2:range(), ENC_ISO_8601_TIME) ) addMatch( NSTime( timebase, 0), string.len(timestring2)) verifyFields("time.ABSOLUTE_UTC", autc_match_fields) verifyResults("add_pfield-time-local", autc_match_values) ---------------------------------------- testlib.testing(OTHER, "tree:add_packet_field Time string ENC_RFC_822") resetResults() autc_match_values = {} local rfc822string1 = "Fri, 01 Mar 13 22:14:48 GMT" -- this is 1362176088 seconds epoch time local rfc822_tvb1 = ByteArray.new(rfc822string1, true):tvb("RFC 822 Time string 1") local rfc822string2 = " Fri, 01 Mar 13 22:14:48 GMT" -- this is 1362176088 seconds epoch time local rfc822_tvb2 = ByteArray.new(rfc822string2 .. " foobar", true):tvb("RFC 822 Time string 2") testlib.test(OTHER, "add_pfield-time-local", treeAddPField ( tree, AUTC, rfc822_tvb1:range(), ENC_RFC_822) ) addMatch( NSTime( 1362176088, 0), string.len(rfc822string1)) testlib.test(OTHER, "add_pfield-time-local", treeAddPField ( tree, AUTC, rfc822_tvb2:range(), ENC_RFC_822) ) addMatch( NSTime( 1362176088, 0), string.len(rfc822string2)) verifyFields("time.ABSOLUTE_UTC", autc_match_fields) verifyResults("add_pfield-rfc822-local", autc_match_values) ---------------------------------------- testlib.testing(OTHER, "tree:add_packet_field Time string ENC_RFC_1123") resetResults() autc_match_values = {} local rfc1123string1 = "Fri, 01 Mar 2013 22:14:48 GMT" -- this is 1362176088 seconds epoch time local rfc1123_tvb1 = ByteArray.new(rfc1123string1, true):tvb("RFC 1123 Time string 1") local rfc1123string2 = " Fri, 01 Mar 2013 22:14:48 GMT" -- this is 1362176088 seconds epoch time local rfc1123_tvb2 = ByteArray.new(rfc1123string2 .. " foobar", true):tvb("RFC 1123 Time string 2") testlib.test(OTHER, "add_pfield-time-local", treeAddPField ( tree, AUTC, rfc1123_tvb1:range(), ENC_RFC_1123) ) addMatch( NSTime( 1362176088, 0), string.len(rfc1123string1)) testlib.test(OTHER, "add_pfield-time-local", treeAddPField ( tree, AUTC, rfc1123_tvb2:range(), ENC_RFC_1123) ) addMatch( NSTime( 1362176088, 0), string.len(rfc1123string2)) verifyFields("time.ABSOLUTE_UTC", autc_match_fields) verifyResults("add_pfield-rfc1123-local", autc_match_values) ---------------------------------------- testlib.testing(OTHER, "tree:add_packet_field Time string ENC_ISO_8601_DATE_TIME_BASIC") resetResults() autc_match_values = {} local datetimestring1 = "20130301T221448+0000" -- this is 1362176088 seconds epoch time local tvb1 = ByteArray.new(datetimestring1, true):tvb("Date_Time string 1") local datetimestring2 = " 20130301171448-0500" -- this is 1362176088 seconds epoch time local tvb2 = ByteArray.new(datetimestring2 .. " foobar", true):tvb("Date_Time string 2") local datetimestring3 = " 20130301T1644-0530" -- this is 1362176040 seconds epoch time local tvb3 = ByteArray.new(datetimestring3, true):tvb("Date_Time string 3") local datetimestring4 = "20130302 014400+0330" -- this is 1362176040 seconds epoch time local tvb4 = ByteArray.new(datetimestring4, true):tvb("Date_Time string 4") local datetimestring5 = "20130301T221448Z" -- this is 1362176088 seconds epoch time local tvb5 = ByteArray.new(datetimestring5, true):tvb("Date_Time string 5") local datetimestring6 = "201303012214Z" -- this is 1362176040 seconds epoch time local tvb6 = ByteArray.new(datetimestring6, true):tvb("Date_Time string 6") testlib.test(OTHER, "add_pfield-datetime-local", treeAddPField ( tree, AUTC, tvb1:range(), ENC_ISO_8601_DATE_TIME_BASIC) ) addMatch( NSTime( 1362176088, 0), string.len(datetimestring1)) testlib.test(OTHER, "add_pfield-datetime-local", treeAddPField ( tree, AUTC, tvb2:range(), ENC_ISO_8601_DATE_TIME_BASIC) ) addMatch( NSTime( 1362176088, 0), string.len(datetimestring2)) testlib.test(OTHER, "add_pfield-datetime-local", treeAddPField ( tree, AUTC, tvb3:range(), ENC_ISO_8601_DATE_TIME_BASIC) ) addMatch( NSTime( 1362176040, 0), string.len(datetimestring3)) testlib.test(OTHER, "add_pfield-datetime-local", treeAddPField ( tree, AUTC, tvb4:range(), ENC_ISO_8601_DATE_TIME_BASIC) ) addMatch( NSTime( 1362176040, 0), string.len(datetimestring4)) testlib.test(OTHER, "add_pfield-datetime-local", treeAddPField ( tree, AUTC, tvb5:range(), ENC_ISO_8601_DATE_TIME_BASIC) ) addMatch( NSTime( 1362176088, 0), string.len(datetimestring5)) testlib.test(OTHER, "add_pfield-datetime-local", treeAddPField ( tree, AUTC, tvb6:range(), ENC_ISO_8601_DATE_TIME_BASIC) ) addMatch( NSTime( 1362176040, 0), string.len(datetimestring6)) verifyFields("time.ABSOLUTE_UTC", autc_match_fields) verifyResults("add_pfield-datetime-local", autc_match_values) ---------------------------------------- testlib.testing(OTHER, "TvbRange subsets") resetResults() local offset = 5 local len = 10 local b_offset = 3 local b_len = 2 local range local range_raw local expected -- This is the same data from the "tree:add_packet_field Bytes" test -- copied here for clarity local bytesstring1 = "deadbeef0123456789DEADBEEFabcdef" local bytestvb1 = ByteArray.new(bytesstring1, true):tvb("Bytes hex-string 1") -- tvbrange with no offset or length (control test case) range = bytestvb1() range_raw = range:raw() expected = range:bytes():raw() testlib.test(OTHER, "tvbrange_raw", range_raw == expected, string.format('range_raw="%s" expected="%s"', range_raw, expected)) range_raw = range:raw(b_offset) expected = range:bytes():raw(b_offset) testlib.test(OTHER, "tvbrange_raw_offset", range_raw == expected, string.format('range_raw="%s" expected="%s"', range_raw, expected)) range_raw = range:raw(0, b_len) expected = range:bytes():raw(0, b_len) testlib.test(OTHER, "tvbrange_raw_len", range_raw == expected, string.format('range_raw="%s" expected="%s"', range_raw, expected)) range_raw = range:raw(b_offset, b_len) expected = range:bytes():raw(b_offset, b_len) testlib.test(OTHER, "tvbrange_raw_offset_len", range_raw == expected, string.format('range_raw="%s" expected="%s"', range_raw, expected)) -- tvbrange with len only range = bytestvb1(0, len) range_raw = range:raw() expected = range:bytes():raw() testlib.test(OTHER, "tvbrange_len_raw", range_raw == expected, string.format('range_raw="%s" expected="%s"', range_raw, expected)) range_raw = range:raw(b_offset) expected = range:bytes():raw(b_offset) testlib.test(OTHER, "tvbrange_len_raw_offset", range_raw == expected, string.format('range_raw="%s" expected="%s"', range_raw, expected)) range_raw = range:raw(0, b_len) expected = range:bytes():raw(0, b_len) testlib.test(OTHER, "tvbrange_len_raw_len", range_raw == expected, string.format('range_raw="%s" expected="%s"', range_raw, expected)) range_raw = range:raw(b_offset, b_len) expected = range:bytes():raw(b_offset, b_len) testlib.test(OTHER, "tvbrange_len_raw_offset_len", range_raw == expected, string.format('range_raw="%s" expected="%s"', range_raw, expected)) -- tvbrange with offset only range = bytestvb1(offset) range_raw = range:raw() expected = range:bytes():raw() testlib.test(OTHER, "tvbrange_offset_raw", range_raw == expected, string.format('range_raw="%s" expected="%s"', range_raw, expected)) range_raw = range:raw(b_offset) expected = range:bytes():raw(b_offset) testlib.test(OTHER, "tvbrange_offset_raw_offset", range_raw == expected, string.format('range_raw="%s" expected="%s"', range_raw, expected)) range_raw = range:raw(0, b_len) expected = range:bytes():raw(0, b_len) testlib.test(OTHER, "tvbrange_offset_raw_len", range_raw == expected, string.format('range_raw="%s" expected="%s"', range_raw, expected)) range_raw = range:raw(b_offset, b_len) expected = range:bytes():raw(b_offset, b_len) testlib.test(OTHER, "tvbrange_offset_raw_offset_len", range_raw == expected, string.format('range_raw="%s" expected="%s"', range_raw, expected)) -- tvbrange with offset and len range = bytestvb1(offset, len) range_raw = range:raw() expected = range:bytes():raw() testlib.test(OTHER, "tvbrange_offset_len_raw", range_raw == expected, string.format('range_raw="%s" expected="%s"', range_raw, expected)) range_raw = range:raw(b_offset) expected = range:bytes():raw(b_offset) testlib.test(OTHER, "tvbrange_offset_len_raw_offset", range_raw == expected, string.format('range_raw="%s" expected="%s"', range_raw, expected)) range_raw = range:raw(0, b_len) expected = range:bytes():raw(0, b_len) testlib.test(OTHER, "tvbrange_offset_len_raw_len", range_raw == expected, string.format('range_raw="%s" expected="%s"', range_raw, expected)) range_raw = range:raw(b_offset, b_len) expected = range:bytes():raw(b_offset, b_len) testlib.test(OTHER, "tvbrange_offset_len_raw_offset_len", range_raw == expected, string.format('range_raw="%s" expected="%s"', range_raw, expected)) ---------------------------------------- testlib.pass(FRAME) end ---------------------------------------- -- we want to have our protocol dissection invoked for a specific UDP port, -- so get the udp dissector table and add our protocol to it DissectorTable.get("udp.port"):add(65333, test_proto) DissectorTable.get("udp.port"):add(65346, test_proto) print("test_proto dissector registered")
Lua
wireshark/test/lua/unicode.lua
-- -- Unicode tests -- local errors = 0 function assertEqual(what, a, b) if a == b then return true end print('ERROR:', what) print('Expected:', tostring(a)) print(' Actual:', tostring(b)) errors = errors + 1 end -- script name check local scriptname = (debug.getinfo(1, 'S').source or ''):gsub("^@.*[/\\]", "") assertEqual('script name', 'script-Ф-€-中.lua', scriptname) -- loadfile local code, err = loadfile('load-Ф-€-中.lua') assertEqual('loadfile', nil, err) assertEqual('loadfile contents', 'Contents of Ф-€-中', code and code()) -- dofile local ok, result = pcall(dofile, 'load-Ф-€-中.lua') assertEqual('dofile pcall', true, ok) assertEqual('dofile contents', 'Contents of Ф-€-中', result) -- io.open (read) local fr, err = io.open('load-Ф-€-中.lua') assertEqual('io.open (read)', nil, err) assertEqual('io.read', 'return "Contents of Ф-€-中"\n', fr and fr:read('*a')) if fr then fr:close() end -- io.open (write) local fw, err = io.open('written-by-lua-Ф-€-中.txt', 'w') assertEqual('io.open (write)', nil, err) if fw then local _, err = fw:write('Feedback from Lua: Ф-€-中\n') assertEqual('io.write', nil, err) end if fw then fw:close() end -- Check for Unicode in personal plugins directory path. local pdir_expected = 'unicode-Ф-€-中-testcases' local pdir = Dir.personal_plugins_path() pdir = pdir:gsub('.*[/\\]unicode-.*-.*-testcases[/\\].*', pdir_expected) assertEqual('Unicode in Dir.personal_plugins_path', pdir_expected, pdir) if errors ~= 0 then error('Failed tests: ' .. errors) end print("All tests passed!")
Lua
wireshark/test/lua/util.lua
-- test script for wslua utility functions local testlib = require("testlib") local GET_PREF = "get" local SET_PREF = "set" local RESET_PREF = "reset" local OTHER = "other" testlib.init( { [GET_PREF] = 14, [SET_PREF] = 37, [RESET_PREF] = 11, [OTHER] = 0 } ) local console_open -------------------------- -- Note: This tests expects some specific default values testlib.testing("get_preference") success = pcall(get_preference) testlib.test(GET_PREF,"get_preference-empty-0", not success) testlib.test(GET_PREF,"get_preference-empty-1",get_preference("") == nil) testlib.test(GET_PREF,"get_preference-unknown-0",get_preference("g") == nil) testlib.test(GET_PREF,"get_preference-unknown-1",get_preference("gui") == nil) testlib.test(GET_PREF,"get_preference-unknown-2",get_preference("gui.") == nil) testlib.test(GET_PREF,"get_preference-unknown-3",get_preference("gui.ask") == nil) testlib.test(GET_PREF,"get_preference-unknown-4",get_preference("ugi.ask_unsaved") == nil) testlib.test(GET_PREF,"get_preference-uint-0",get_preference("gui.fileopen.preview") == 3) testlib.test(GET_PREF,"get_preference-bool-0",get_preference("gui.ask_unsaved") == true) testlib.test(GET_PREF,"get_preference-bool-1",get_preference("gui.interfaces_show_hidden") == false) -- gui.console_open is persistent (in the Windows registry) and for that -- reason does not have a default value. console_open = get_preference("gui.console_open") testlib.test(GET_PREF,"get_preference-enum-0",console_open == "NEVER" or console_open == "AUTOMATIC" or console_open == "ALWAYS") testlib.test(GET_PREF,"get_preference-string-0",get_preference("gui.window_title") == "") testlib.test(GET_PREF,"get_preference-range-0",get_preference("http.tls.port") == "443") success = pcall(get_preference, "user_dlt.encaps_table") testlib.test(GET_PREF,"get_preference-uat-0", not success) -------------------------- testlib.testing("set_preference") success = pcall(set_preference) testlib.test(SET_PREF,"set_preference-empty-0", not success) testlib.test(SET_PREF,"set_preference-empty-1",set_preference("") == nil) testlib.test(SET_PREF,"set_preference-unknown-0",set_preference("g") == nil) testlib.test(SET_PREF,"set_preference-unknown-1",set_preference("gui") == nil) testlib.test(SET_PREF,"set_preference-unknown-2",set_preference("gui.") == nil) testlib.test(SET_PREF,"set_preference-unknown-3",set_preference("gui.ask") == nil) testlib.test(SET_PREF,"set_preference-unknown-4",set_preference("ugi.ask_unsaved") == nil) success = pcall(set_preference,"gui.fileopen.preview") testlib.test(SET_PREF,"set_preference-uint-0", not success) success = pcall(set_preference,"gui.fileopen.preview",true) testlib.test(SET_PREF,"set_preference-uint-1", not success) success = pcall(set_preference,"gui.fileopen.preview","string") testlib.test(SET_PREF,"set_preference-uint-2", not success) testlib.test(SET_PREF,"set_preference-uint-3",set_preference("gui.fileopen.preview",3) == false) testlib.test(SET_PREF,"set_preference-uint-4",set_preference("gui.fileopen.preview",42) == true) testlib.test(SET_PREF,"set_preference-uint-4-get",get_preference("gui.fileopen.preview") == 42) success = pcall(set_preference,"gui.ask_unsaved") testlib.test(SET_PREF,"set_preference-bool-0", not success) success = pcall(set_preference,"gui.ask_unsaved",42) testlib.test(SET_PREF,"set_preference-bool-1", not success) success = pcall(set_preference,"gui.ask_unsaved","string") testlib.test(SET_PREF,"set_preference-bool-2", not success) testlib.test(SET_PREF,"set_preference-bool-3",set_preference("gui.ask_unsaved", true) == false) testlib.test(SET_PREF,"set_preference-bool-4",set_preference("gui.ask_unsaved", false) == true) success = pcall(set_preference,"gui.console_open") testlib.test(SET_PREF,"set_preference-enum-0", not success) success = pcall(set_preference,"gui.console_open",true) testlib.test(SET_PREF,"set_preference-enum-1", not success) -- false means unchanged testlib.test(SET_PREF,"set_preference-enum-2",set_preference("gui.console_open",console_open) == false) success = pcall(set_preference,"gui.window_title") testlib.test(SET_PREF,"set_preference-string-0", not success) success = pcall(set_preference,"gui.window_title",true) testlib.test(SET_PREF,"set_preference-string-1", not success) testlib.test(SET_PREF,"set_preference-string-2",set_preference("gui.window_title","Title") == true) testlib.test(SET_PREF,"set_preference-string-2-get",get_preference("gui.window_title") == "Title") testlib.test(SET_PREF,"set_preference-string-3",set_preference("gui.window_title","Title") == false) testlib.test(SET_PREF,"set_preference-string-4",set_preference("gui.window_title","") == true) testlib.test(SET_PREF,"set_preference-string-4-get",get_preference("gui.window_title") == "") testlib.test(SET_PREF,"set_preference-string-5",set_preference("gui.window_title","") == false) success = pcall(set_preference,"http.tls.port") testlib.test(SET_PREF,"set_preference-range-0", not success) success = pcall(set_preference,"http.tls.port","65536") -- Number too big testlib.test(SET_PREF,"set_preference-range-1", not success) success = pcall(set_preference,"http.tls.port","http") -- Syntax error testlib.test(SET_PREF,"set_preference-range-2", not success) testlib.test(SET_PREF,"set_preference-range-3",set_preference("http.tls.port","443") == false) testlib.test(SET_PREF,"set_preference-range-4",set_preference("http.tls.port","443-444") == true) testlib.test(SET_PREF,"set_preference-range-4-get",get_preference("http.tls.port") == "443-444") testlib.test(SET_PREF,"set_preference-range-5",set_preference("http.tls.port","443-444") == false) success = pcall(set_preference, "user_dlt.encaps_table") testlib.test(SET_PREF,"set_preference-uat-0", not success) -------------------------- testlib.testing("reset_preference") success = pcall(set_preference) testlib.test(RESET_PREF,"reset_preference-empty-0", not success) testlib.test(RESET_PREF,"reset_preference-empty-1",reset_preference("") == nil) testlib.test(RESET_PREF,"reset_preference-unknown-0",reset_preference("unknown") == nil) testlib.test(RESET_PREF,"reset_preference-uint-0",reset_preference("gui.fileopen.preview") == true) testlib.test(RESET_PREF,"reset_preference-uint-0-get",get_preference("gui.fileopen.preview") == 3) testlib.test(RESET_PREF,"reset_preference-bool-0",reset_preference("gui.ask_unsaved") == true) testlib.test(RESET_PREF,"reset_preference-bool-0-get",get_preference("gui.ask_unsaved") == true) testlib.test(RESET_PREF,"reset_preference-string-0",reset_preference("gui.window_title") == true) testlib.test(RESET_PREF,"reset_preference-string-0-get",get_preference("gui.window_title") == "") testlib.test(RESET_PREF,"reset_preference-range-0",reset_preference("http.tls.port") == true) testlib.test(RESET_PREF,"reset_preference-range-0-get",get_preference("http.tls.port") == "443") testlib.getResults()
Lua
wireshark/test/lua/verify_dissector.lua
-- This is a test script for tshark. -- This script runs inside tshark. -- FIRST run tshark with the "dns_dissector.lua" plugin, with the dns_port.pcap file, -- and with full tree output (-V switch). Pipe that to a file named testin.txt. -- This verify script then reads in that testin.txt. The filename can be specified -- using the "verify_file" argument. -- -- tshark -r bogus.cap -X lua_script:<path_to_testdir>/lua/verify_dns_dissector.lua local function testing(...) print("---- Testing "..tostring(...).." ----") end local lines = { { "MyDNS Protocol", "Transaction ID: 42", "Flags: 0x0100", "0... .... .... .... = Response: this is a query", "[Expert Info (Chat/Request): DNS query message]", "[DNS query message]", "[Severity level: Chat]", "[Group: Request]", ".000 0... .... .... = Opcode: 0", ".... ..0. .... .... = Truncated: False", ".... ...1 .... .... = Recursion desired: yes", ".... .... .0.. .... = World War Z - Reserved for future use: 0x0", ".... .... ...0 .... = Checking disabled: False", "Number of Questions: 1", "Number of Answer RRs: 0", "Number of Authority RRs: 0", "Number of Additional RRs: 0", "Queries", "us.pool.ntp.org: type A (IPv4 host address) (1), class IN (Internet) (1)", "Name: us.pool.ntp.org", "[Name Length: 17]", "[Label Count: 4]", "Type: A (IPv4 host address) (1)", "Class: IN (Internet) (1)", }, { "MyDNS Protocol", "Transaction ID: 42", "Flags: 0x8180", "1... .... .... .... = Response: this is a response", "[Expert Info (Chat/Response): It's a response!]", "[It's a response!]", "[Severity level: Chat]", "[Group: Response]", ".000 0... .... .... = Opcode: 0", ".... .0.. .... .... = Authoritative: False", ".... ..0. .... .... = Truncated: False", ".... .... 1... .... = Recursion available: True", ".... .... .0.. .... = World War Z - Reserved for future use: 0x0", ".... .... ..0. .... = Authenticated: no", ".... .... .... 0000 = Response code: No Error (0)", ".... .... ...0 .... = Checking disabled: False", "DNS answer to life, the universe, and everything", "[Expert Info (Note/Comment): DNS answer to life, the universe, and everything]", "[DNS answer to life, the universe, and everything]", "[Severity level: Note]", "[Group: Comment]", "Number of Questions: 1", "Number of Answer RRs: 15", "Number of Authority RRs: 6", "Number of Additional RRs: 2", "Queries", "us.pool.ntp.org: type A (IPv4 host address) (1), class IN (Internet) (1)", "Name: us.pool.ntp.org", "[Name Length: 17]", "[Label Count: 4]", "Type: A (IPv4 host address) (1)", "Class: IN (Internet) (1)", }, { "MyDNS Protocol", "Transaction ID: 43", "Flags: 0x0100", "0... .... .... .... = Response: this is a query", "[Expert Info (Chat/Request): DNS query message]", "[DNS query message]", "[Severity level: Chat]", "[Group: Request]", ".000 0... .... .... = Opcode: 0", ".... ..0. .... .... = Truncated: False", ".... ...1 .... .... = Recursion desired: yes", ".... .... .0.. .... = World War Z - Reserved for future use: 0x0", ".... .... ...0 .... = Checking disabled: False", "Number of Questions: 1", "Number of Answer RRs: 0", "Number of Authority RRs: 0", "Number of Additional RRs: 0", "Queries", "us.pool.ntp.org: type A (IPv4 host address) (1), class IN (Internet) (1)", "Name: us.pool.ntp.org", "[Name Length: 17]", "[Label Count: 4]", "Type: A (IPv4 host address) (1)", "Class: IN (Internet) (1)", }, { "MyDNS Protocol", "Transaction ID: 43", "Flags: 0x8180", "1... .... .... .... = Response: this is a response", "[Expert Info (Chat/Response): It's a response!]", "[It's a response!]", "[Severity level: Chat]", "[Group: Response]", ".000 0... .... .... = Opcode: 0", ".... .0.. .... .... = Authoritative: False", ".... ..0. .... .... = Truncated: False", ".... .... 1... .... = Recursion available: True", ".... .... .0.. .... = World War Z - Reserved for future use: 0x0", ".... .... ..0. .... = Authenticated: no", ".... .... .... 0000 = Response code: No Error (0)", ".... .... ...0 .... = Checking disabled: False", "Number of Questions: 1", "Number of Answer RRs: 15", "Number of Authority RRs: 6", "Number of Additional RRs: 2", "Queries", "us.pool.ntp.org: type A (IPv4 host address) (1), class IN (Internet) (1)", "Name: us.pool.ntp.org", "[Name Length: 17]", "[Label Count: 4]", "Type: A (IPv4 host address) (1)", "Class: IN (Internet) (1)", }, } -- we're going to see those two sets of output twice: both by the normal -- dissector, then the first one by the heuristic, then the second one by -- a conversation match local numtests = 1 + #lines[1] + #lines[2] + #lines[3] + #lines[4] local hasHeuristic = true local verify_file = "testin.txt" -- grab passed-in arguments local args = { ... } if #args > 0 then for _, arg in ipairs(args) do local name, value = arg:match("(.+)=(.+)") if arg == "no_heur" then numtests = numtests - 1 elseif name == "verify_file" and value then verify_file = value end end end print("going to run "..numtests.." tests") -- for an example of what we're reading through to verify, look at end of this file print("opening file "..verify_file) local file = io.open(verify_file, "r") local line = file:read() local pktidx = 1 local total = 0 local found = false while line do -- eat beginning whitespace line = line:gsub("^%s+","",1) if line:find("^Frame %d+:") then pktidx = line:match("^Frame (%d+):") testing("Frame "..pktidx) pktidx = tonumber(pktidx) if pktidx > 4 then pktidx = pktidx - 4 end line = file:read() elseif line:find("%[Heuristic dissector used%]") then -- start again, because it now repeats -- but we should not see this [Heuristic dissector used] line again -- or it's an error in setting the conversation if found then error("Heuristic dissector ran twice - conversation setting not working?") return end found = true total = total + 1 line = file:read() elseif line == lines[pktidx][1] then -- we've matched the first line of our section -- now verify the rest is sequential for i, v in ipairs(lines[pktidx]) do io.stdout:write("testing Frame "..pktidx..", line "..i.."...") if not line then -- ended too soon io.stdout:write("failed!\n") error("Ran out of file lines!") return end -- eat beginning whitespace line = line:gsub("^%s+","",1) if line ~= v then io.stdout:write("failed!\n") print("Got this:'"..line.."', expected this:'"..v.."'") error("mismatched lines!") return end io.stdout:write("passed\n") total = total + 1 line = file:read() end else line = file:read() end end print(total.." of "..numtests.." tests run and passed") if total ~= numtests then error("Did not find all our lines to test!") return end print("\n-----------------------------\n") -- must print out the following for success (the test shell sciprt looks for this) print("All tests passed!\n\n") ---------------------------------------------------------- -- We should see something like this: --[[ Frame 1: 75 bytes on wire (600 bits), 75 bytes captured (600 bits) Encapsulation type: Ethernet (1) Arrival Time: Sep 26, 2004 23:18:04.938672000 EDT [Time shift for this packet: 0.000000000 seconds] Epoch Time: 1096255084.938672000 seconds [Time delta from previous captured frame: 0.000000000 seconds] [Time delta from previous displayed frame: 0.000000000 seconds] [Time since reference or first frame: 0.000000000 seconds] Frame Number: 1 Frame Length: 75 bytes (600 bits) Capture Length: 75 bytes (600 bits) [Frame is marked: False] [Frame is ignored: False] [Protocols in frame: eth:ethertype:ip:udp:mydns] Ethernet II, Src: AmbitMic_6c:40:4e (00:d0:59:6c:40:4e), Dst: Cisco-Li_82:b2:53 (00:0c:41:82:b2:53) Destination: Cisco-Li_82:b2:53 (00:0c:41:82:b2:53) Address: Cisco-Li_82:b2:53 (00:0c:41:82:b2:53) .... ..0. .... .... .... .... = LG bit: Globally unique address (factory default) .... ...0 .... .... .... .... = IG bit: Individual address (unicast) Source: AmbitMic_6c:40:4e (00:d0:59:6c:40:4e) Address: AmbitMic_6c:40:4e (00:d0:59:6c:40:4e) .... ..0. .... .... .... .... = LG bit: Globally unique address (factory default) .... ...0 .... .... .... .... = IG bit: Individual address (unicast) Type: IP (0x0800) Internet Protocol Version 4, Src: 192.168.50.50 (192.168.50.50), Dst: 192.168.0.1 (192.168.0.1) Version: 4 Header Length: 20 bytes Differentiated Services Field: 0x00 (DSCP 0x00: Default; ECN: 0x00: Not-ECT (Not ECN-Capable Transport)) 0000 00.. = Differentiated Services Codepoint: Default (0x00) .... ..00 = Explicit Congestion Notification: Not-ECT (Not ECN-Capable Transport) (0x00) Total Length: 61 Identification: 0x0a41 (2625) Flags: 0x00 0... .... = Reserved bit: Not set .0.. .... = Don't fragment: Not set ..0. .... = More fragments: Not set Fragment offset: 0 Time to live: 128 Protocol: UDP (17) Header checksum: 0x7ceb [correct] [Good: True] [Bad: False] Source: 192.168.50.50 (192.168.50.50) Destination: 192.168.0.1 (192.168.0.1) User Datagram Protocol, Src Port: 65282 (65282), Dst Port: 65333 (65333) Source Port: 65282 (65282) Destination Port: 65333 (65333) Length: 41 Checksum: 0x07a9 [validation disabled] [Good Checksum: False] [Bad Checksum: False] [Stream index: 0] MyDNS Protocol Transaction ID: 43 Flags: 0x0100 0... .... .... .... = Response: this is a query .000 0... .... .... = Opcode: 0 .... ..0. .... .... = Truncated: False .... ...1 .... .... = Recursion desired: yes .... .... .0.. .... = World War Z - Reserved for future use: 0x0 .... .... ...0 .... = Checking disabled: False Number of Questions: 1 Number of Answer RRs: 0 Number of Authority RRs: 0 Number of Additional RRs: 0 Queries us.pool.ntp.org: type A (IPv4 host address) (1), class IN (Internet) (1) Name: us.pool.ntp.org [Name Length: 17] [Label Count: 4] Type: A (IPv4 host address) (1) Class: IN (Internet) (1) Frame 2: 540 bytes on wire (4320 bits), 540 bytes captured (4320 bits) Encapsulation type: Ethernet (1) Arrival Time: Sep 26, 2004 23:18:04.945618000 EDT [Time shift for this packet: 0.000000000 seconds] Epoch Time: 1096255084.945618000 seconds [Time delta from previous captured frame: 0.006946000 seconds] [Time delta from previous displayed frame: 0.006946000 seconds] [Time since reference or first frame: 0.006946000 seconds] Frame Number: 2 Frame Length: 540 bytes (4320 bits) Capture Length: 540 bytes (4320 bits) [Frame is marked: False] [Frame is ignored: False] [Protocols in frame: eth:ethertype:ip:udp:mydns] Ethernet II, Src: Cisco-Li_82:b2:53 (00:0c:41:82:b2:53), Dst: AmbitMic_6c:40:4e (00:d0:59:6c:40:4e) Destination: AmbitMic_6c:40:4e (00:d0:59:6c:40:4e) Address: AmbitMic_6c:40:4e (00:d0:59:6c:40:4e) .... ..0. .... .... .... .... = LG bit: Globally unique address (factory default) .... ...0 .... .... .... .... = IG bit: Individual address (unicast) Source: Cisco-Li_82:b2:53 (00:0c:41:82:b2:53) Address: Cisco-Li_82:b2:53 (00:0c:41:82:b2:53) .... ..0. .... .... .... .... = LG bit: Globally unique address (factory default) .... ...0 .... .... .... .... = IG bit: Individual address (unicast) Type: IP (0x0800) Internet Protocol Version 4, Src: 192.168.0.1 (192.168.0.1), Dst: 192.168.50.50 (192.168.50.50) Version: 4 Header Length: 20 bytes Differentiated Services Field: 0x00 (DSCP 0x00: Default; ECN: 0x00: Not-ECT (Not ECN-Capable Transport)) 0000 00.. = Differentiated Services Codepoint: Default (0x00) .... ..00 = Explicit Congestion Notification: Not-ECT (Not ECN-Capable Transport) (0x00) Total Length: 526 Identification: 0x2153 (8531) Flags: 0x00 0... .... = Reserved bit: Not set .0.. .... = Don't fragment: Not set ..0. .... = More fragments: Not set Fragment offset: 0 Time to live: 63 Protocol: UDP (17) Header checksum: 0xa508 [correct] [Good: True] [Bad: False] Source: 192.168.0.1 (192.168.0.1) Destination: 192.168.50.50 (192.168.50.50) User Datagram Protocol, Src Port: 65333 (65333), Dst Port: 65282 (65282) Source Port: 65333 (65333) Destination Port: 65282 (65282) Length: 506 Checksum: 0xf9d5 [validation disabled] [Good Checksum: False] [Bad Checksum: False] [Stream index: 0] MyDNS Protocol Transaction ID: 43 Flags: 0x8180 1... .... .... .... = Response: this is a response .000 0... .... .... = Opcode: 0 .... .0.. .... .... = Authoritative: False .... ..0. .... .... = Truncated: False .... .... 1... .... = Recursion available: True .... .... .0.. .... = World War Z - Reserved for future use: 0x0 .... .... ..0. .... = Authenticated: no .... .... .... 0000 = Response code: No Error (0) .... .... ...0 .... = Checking disabled: False Number of Questions: 1 Number of Answer RRs: 15 Number of Authority RRs: 6 Number of Additional RRs: 2 Queries us.pool.ntp.org: type A (IPv4 host address) (1), class IN (Internet) (1) Name: us.pool.ntp.org [Name Length: 17] [Label Count: 4] Type: A (IPv4 host address) (1) Class: IN (Internet) (1) ]]
Lua
wireshark/test/lua/verify_globals.lua
-- verify_globals.lua -- ignore things that change on different machines or every release -- the following items still have to exist, but their values don't have to match local filter = { -- differences by machine "DATA_DIR", "USER_DIR", "package.cpath", "package.path", "package.loaded", "run_user_scripts_when_superuser", "running_superuser", -- differences in Lua versions "_VERSION", "package.config", -- differences caused by changes in wireshark 1.11 "NSTime", "Proto", 'Listener["<metatable>"].__index', ".__index" } -- the following items don't have to exist local ignore = { -- not sure why this was removed in wireshark 1.11, but it was "TreeItem.set_expert_flags", -- in Lua 5.1 only "debug.getfenv", "debug.setfenv", "gcinfo", "getfenv", "io.gfind", "setfenv", "math.mod", "newproxy", "string.gfind", "table.foreach", "table.foreachi", "table.getn", "table.setn", -- in Lua 5.2+ only "bit32", "debug.getuservalu", "debug.setuservalu", "debug.upvalueid", "debug.upvaluejoin", "package.searchers", "package.searchpath", "rawlen", "table.pack", "table.unpack", } local arg={...} -- get passed-in args -- arg1 = path to find inspect -- arg2 = filename to read in (optional, unless 'verify' is set) -- arg3 = 'verify' to verify all of read-in file is in _G (default); 'new' to output all items in _G that are not in read-in file -- arg4 = 'nometa' to ignore metatables; 'meta' otherwise (default) local add_path = "lua/?.lua;" if #arg > 0 then add_path = arg[1].."?.lua;" end print("package.path = " .. package.path) -- need the path to find inspect.lua local old_path = package.path package.path = add_path .. package.path local inspect = require("inspect") package.path = old_path -- return path to original print("-- Wireshark version: " .. get_version()) if #arg == 1 then -- no more args, so just output globals print(inspect(_G, { serialize = true, filter = inspect.makeFilter(filter) })) return end local file = assert(io.open(arg[2], "r")) local input = file:read("*all") input = inspect.marshal(input) local nometa = false if #arg > 3 and arg[4] == "nometa" then nometa = true end if #arg == 2 or arg[3] == "verify" then print(string.rep("\n", 2)) print("Verifying input file '"..arg[2].."' is contained within the global table") local ret, diff = inspect.compare(input, _G, { ['filter'] = inspect.makeFilter(filter), ['ignore'] = inspect.makeFilter(ignore), ['nonumber'] = true, ['nometa'] = nometa }) if not ret then print("Comparison failed - global table does not have all the items in the input file!") print(string.rep("\n", 2)) print(string.rep("-", 80)) print("Differences are:") print(inspect(diff)) else print("\n-----------------------------\n") print("All tests passed!\n\n") end return elseif #arg > 2 and arg[3] == "new" then local ret, diff = inspect.compare(_G, input, { ['filter'] = inspect.makeFilter(filter), ['ignore'] = inspect.makeFilter(ignore), ['nonumber'] = true, ['keep'] = true, ['nometa'] = nometa }) if not ret then print(inspect(diff)) else print("\n-----------------------------\n") print("No new items!\n\n") end end
Protocol Buffers
wireshark/test/protobuf_lang_files/complex_proto_files/complex_syntax.proto
// Test more complex syntax of *.proto files. syntax = "proto3"; package wireshark.protobuf.test.complex.syntax; import "google/protobuf/descriptor.proto"; // equal to "testing.multiline.strings" option java_package = "testing." 'multiline.' "strings"; // user defined options for messages extend google.protobuf.MessageOptions { bool disabled = 1071; bool ignored = 1072; TestMultiLinesOption mlinemsg = 1073; } // user defined options for oneof types extend google.protobuf.OneofOptions { bool required = 1071; } // user defined options for fields extend google.protobuf.FieldOptions { FieldRules rules = 1071; } // test extend google.protobuf.FieldOptions twice extend google.protobuf.FieldOptions { string multilines = 1072; } message FieldRules { oneof type { BoolRules bool = 13; StringRules string = 14; } repeated uint32 repeated_uint = 15; } message StringRules { uint64 min_bytes = 2; BoolRules morebool = 3; string astr = 4; } message BoolRules { bool const = 1; repeated bool repeated_bool = 2; repeated int32 repeated_int = 3; } message TestMultiLinesOption { string mlines = 1; } message ComplexDefinedMessage { option (mlinemsg).mlines = "first line" "second line"; // test complex field options string fieldWithComplexOption1 = 1 [(wireshark.protobuf.test.complex.syntax.rules).string = {min_bytes: 1}]; string fieldWithComplexOption2 = 2 [(rules).string = {min_bytes: 2 astr: "abc" }]; string fieldWithComplexOption3 = 3 [(rules).string.morebool = {const: true, repeated_bool: [false, true], repeated_int: [1, 2]}]; string fieldWithComplexOption4 = 4 [(rules).string = {min_bytes: 1; morebool { const: true }}]; string fieldWithComplexOption5 = 5 [(rules).repeated_uint = 1, (rules).repeated_uint = 2]; // test oneof custom option oneof oneofWithOption { option (wireshark.protobuf.test.complex.syntax.required) = true; int32 field1 = 11; string field2 = 12; } // test multilines strings uint32 fieldWithMultilineStringOption = 20 [(wireshark.protobuf.test.complex.syntax.multilines) = "first line" 'Second line' ]; } // add this message for testing whether this file was successfully parsed message TestFileParsed { optional int32 last_field_for_wireshark_test = 1; }
Protocol Buffers
wireshark/test/protobuf_lang_files/complex_proto_files/unittest_custom_options.proto
// This file is from https://github.com/protocolbuffers/protobuf/blob/3.14.x/src/google/protobuf/unittest_custom_options.proto // To reduce the file size, some comments have been removed. // Message 'TestFileParsed' is added at the end of file for testing whether this file was successfully parsed. syntax = "proto2"; option cc_generic_services = true; option java_generic_services = true; option py_generic_services = true; option (file_opt1) = 9876543210; import "google/protobuf/any.proto"; import "google/protobuf/descriptor.proto"; package protobuf_unittest; // Some simple test custom options of various types. extend google.protobuf.FileOptions { optional uint64 file_opt1 = 7736974; } extend google.protobuf.MessageOptions { optional int32 message_opt1 = 7739036; } extend google.protobuf.FieldOptions { optional fixed64 field_opt1 = 7740936; optional int32 field_opt2 = 7753913 [default = 42]; } extend google.protobuf.OneofOptions { optional int32 oneof_opt1 = 7740111; } extend google.protobuf.EnumOptions { optional sfixed32 enum_opt1 = 7753576; } extend google.protobuf.EnumValueOptions { optional int32 enum_value_opt1 = 1560678; } extend google.protobuf.ServiceOptions { optional sint64 service_opt1 = 7887650; } enum MethodOpt1 { METHODOPT1_VAL1 = 1; METHODOPT1_VAL2 = 2; } extend google.protobuf.MethodOptions { optional MethodOpt1 method_opt1 = 7890860; } message TestMessageWithCustomOptions { option message_set_wire_format = false; option (message_opt1) = -56; optional string field1 = 1 [ctype = CORD, (field_opt1) = 8765432109]; oneof AnOneof { option (oneof_opt1) = -99; int32 oneof_field = 2; } enum AnEnum { option (enum_opt1) = -789; ANENUM_VAL1 = 1; ANENUM_VAL2 = 2 [(enum_value_opt1) = 123]; } } message CustomOptionFooRequest {} message CustomOptionFooResponse {} message CustomOptionFooClientMessage {} message CustomOptionFooServerMessage {} service TestServiceWithCustomOptions { option (service_opt1) = -9876543210; rpc Foo(CustomOptionFooRequest) returns (CustomOptionFooResponse) { option (method_opt1) = METHODOPT1_VAL2; } } message DummyMessageContainingEnum { enum TestEnumType { TEST_OPTION_ENUM_TYPE1 = 22; TEST_OPTION_ENUM_TYPE2 = -23; } } message DummyMessageInvalidAsOptionType {} extend google.protobuf.MessageOptions { optional bool bool_opt = 7706090; optional int32 int32_opt = 7705709; optional int64 int64_opt = 7705542; optional uint32 uint32_opt = 7704880; optional uint64 uint64_opt = 7702367; optional sint32 sint32_opt = 7701568; optional sint64 sint64_opt = 7700863; optional fixed32 fixed32_opt = 7700307; optional fixed64 fixed64_opt = 7700194; optional sfixed32 sfixed32_opt = 7698645; optional sfixed64 sfixed64_opt = 7685475; optional float float_opt = 7675390; optional double double_opt = 7673293; optional string string_opt = 7673285; optional bytes bytes_opt = 7673238; optional DummyMessageContainingEnum.TestEnumType enum_opt = 7673233; optional DummyMessageInvalidAsOptionType message_type_opt = 7665967; } message CustomOptionMinIntegerValues { option (bool_opt) = false; option (int32_opt) = -0x80000000; option (int64_opt) = -0x8000000000000000; option (uint32_opt) = 0; option (uint64_opt) = 0; option (sint32_opt) = -0x80000000; option (sint64_opt) = -0x8000000000000000; option (fixed32_opt) = 0; option (fixed64_opt) = 0; option (sfixed32_opt) = -0x80000000; option (sfixed64_opt) = -0x8000000000000000; } message CustomOptionMaxIntegerValues { option (bool_opt) = true; option (int32_opt) = 0x7FFFFFFF; option (int64_opt) = 0x7FFFFFFFFFFFFFFF; option (uint32_opt) = 0xFFFFFFFF; option (uint64_opt) = 0xFFFFFFFFFFFFFFFF; option (sint32_opt) = 0x7FFFFFFF; option (sint64_opt) = 0x7FFFFFFFFFFFFFFF; option (fixed32_opt) = 0xFFFFFFFF; option (fixed64_opt) = 0xFFFFFFFFFFFFFFFF; option (sfixed32_opt) = 0x7FFFFFFF; option (sfixed64_opt) = 0x7FFFFFFFFFFFFFFF; } message CustomOptionOtherValues { option (int32_opt) = -100; // To test sign-extension. option (float_opt) = 12.3456789; option (double_opt) = 1.234567890123456789; option (string_opt) = "Hello, \"World\""; option (bytes_opt) = "Hello\0World"; option (enum_opt) = TEST_OPTION_ENUM_TYPE2; } message SettingRealsFromPositiveInts { option (float_opt) = 12; option (double_opt) = 154; } message SettingRealsFromNegativeInts { option (float_opt) = -12; option (double_opt) = -154; } message ComplexOptionType1 { optional int32 foo = 1; optional int32 foo2 = 2; optional int32 foo3 = 3; repeated int32 foo4 = 4; extensions 100 to max; } message ComplexOptionType2 { optional ComplexOptionType1 bar = 1; optional int32 baz = 2; message ComplexOptionType4 { optional int32 waldo = 1; extend google.protobuf.MessageOptions { optional ComplexOptionType4 complex_opt4 = 7633546; } } optional ComplexOptionType4 fred = 3; repeated ComplexOptionType4 barney = 4; extensions 100 to max; } message ComplexOptionType3 { optional int32 qux = 1; optional group ComplexOptionType5 = 2 { optional int32 plugh = 3; } } extend ComplexOptionType1 { optional int32 quux = 7663707; optional ComplexOptionType3 corge = 7663442; } extend ComplexOptionType2 { optional int32 grault = 7650927; optional ComplexOptionType1 garply = 7649992; } extend google.protobuf.MessageOptions { optional protobuf_unittest.ComplexOptionType1 complex_opt1 = 7646756; optional ComplexOptionType2 complex_opt2 = 7636949; optional ComplexOptionType3 complex_opt3 = 7636463; optional group ComplexOpt6 = 7595468 { optional int32 xyzzy = 7593951; } } message VariousComplexOptions { option (.protobuf_unittest.complex_opt1).foo = 42; option (protobuf_unittest.complex_opt1).(.protobuf_unittest.quux) = 324; option (.protobuf_unittest.complex_opt1).(protobuf_unittest.corge).qux = 876; option (protobuf_unittest.complex_opt1).foo4 = 99; option (protobuf_unittest.complex_opt1).foo4 = 88; option (complex_opt2).baz = 987; option (complex_opt2).(grault) = 654; option (complex_opt2).bar.foo = 743; option (complex_opt2).bar.(quux) = 1999; option (complex_opt2).bar.(protobuf_unittest.corge).qux = 2008; option (complex_opt2).(garply).foo = 741; option (complex_opt2).(garply).(.protobuf_unittest.quux) = 1998; option (complex_opt2).(protobuf_unittest.garply).(corge).qux = 2121; option (ComplexOptionType2.ComplexOptionType4.complex_opt4).waldo = 1971; option (complex_opt2).fred.waldo = 321; option (complex_opt2).barney = { waldo: 101 }; option (complex_opt2).barney = { waldo: 212 }; option (protobuf_unittest.complex_opt3).qux = 9; option (complex_opt3).complexoptiontype5.plugh = 22; option (complexopt6).xyzzy = 24; } message AggregateMessageSet { option message_set_wire_format = true; extensions 4 to max; } message AggregateMessageSetElement { extend AggregateMessageSet { optional AggregateMessageSetElement message_set_extension = 15447542; } optional string s = 1; } message Aggregate { optional int32 i = 1; optional string s = 2; optional Aggregate sub = 3; optional google.protobuf.FileOptions file = 4; extend google.protobuf.FileOptions { optional Aggregate nested = 15476903; } optional AggregateMessageSet mset = 5; optional google.protobuf.Any any = 6; } extend google.protobuf.FileOptions { optional Aggregate fileopt = 15478479; } extend google.protobuf.MessageOptions { optional Aggregate msgopt = 15480088; } extend google.protobuf.FieldOptions { optional Aggregate fieldopt = 15481374; } extend google.protobuf.EnumOptions { optional Aggregate enumopt = 15483218; } extend google.protobuf.EnumValueOptions { optional Aggregate enumvalopt = 15486921; } extend google.protobuf.ServiceOptions { optional Aggregate serviceopt = 15497145; } extend google.protobuf.MethodOptions { optional Aggregate methodopt = 15512713; } option (fileopt) = { s: 'FileAnnotation' i: 100 sub { s: 'NestedFileAnnotation' } file { [protobuf_unittest.fileopt] { s: 'FileExtensionAnnotation' } } mset { [protobuf_unittest.AggregateMessageSetElement.message_set_extension] { s: 'EmbeddedMessageSetElement' } } any { [type.googleapis.com/protobuf_unittest.AggregateMessageSetElement] { s: 'EmbeddedMessageSetElement' } } }; message AggregateMessage { option (msgopt) = { i: 101 s: 'MessageAnnotation' }; optional int32 fieldname = 1 [(fieldopt) = { s: 'FieldAnnotation' }]; } service AggregateService { option (serviceopt) = { s: 'ServiceAnnotation' }; rpc Method(AggregateMessage) returns (AggregateMessage) { option (methodopt) = { s: 'MethodAnnotation' }; } } enum AggregateEnum { option (enumopt) = { s: 'EnumAnnotation' }; VALUE = 1 [(enumvalopt) = { s: 'EnumValueAnnotation' }]; } message NestedOptionType { message NestedMessage { option (message_opt1) = 1001; optional int32 nested_field = 1 [(field_opt1) = 1002]; } enum NestedEnum { option (enum_opt1) = 1003; NESTED_ENUM_VALUE = 1 [(enum_value_opt1) = 1004]; } extend google.protobuf.FileOptions { optional int32 nested_extension = 7912573 [(field_opt2) = 1005]; } } message OldOptionType { enum TestEnum { OLD_VALUE = 0; } required TestEnum value = 1; } message NewOptionType { enum TestEnum { OLD_VALUE = 0; NEW_VALUE = 1; } required TestEnum value = 1; } extend google.protobuf.MessageOptions { optional OldOptionType required_enum_opt = 106161807; } message TestMessageWithRequiredEnumOption { option (required_enum_opt) = { value: OLD_VALUE }; } // add this message for testing whether this file was successfully parsed message TestFileParsed { optional int32 last_field_for_wireshark_test = 1; }
Protocol Buffers
wireshark/test/protobuf_lang_files/user_defined_types/addressbook.proto
// This file comes from the official Protobuf example with a little modification. syntax = "proto3"; package tutorial; import "google/protobuf/timestamp.proto"; message Person { string name = 1; int32 id = 2; // Unique ID number for this person. string email = 3; enum PhoneType { MOBILE = 0; HOME = 1; WORK = 2; } message PhoneNumber { string number = 1; PhoneType type = 2; } repeated PhoneNumber phone = 4; google.protobuf.Timestamp last_updated = 5; bytes portrait_image = 6; } message AddressBook { repeated Person people = 1; }
Protocol Buffers
wireshark/test/protobuf_lang_files/user_defined_types/greet.proto
// This file is from https://github.com/grpc/grpc-dotnet/blob/v2.42.x/examples/Browser/Proto/greet.proto syntax = "proto3"; package greet; // The greeting service definition. service Greeter { // Sends a greeting rpc SayHello (HelloRequest) returns (HelloReply); rpc SayHellos (HelloRequest) returns (stream HelloReply); } // The request message containing the user's name. message HelloRequest { string name = 1; } // The response message containing the greetings message HelloReply { string message = 1; }
Protocol Buffers
wireshark/test/protobuf_lang_files/user_defined_types/person_search_service.proto
// A gRPC service that searches for persons based on certain attributes. syntax = "proto3"; package tutorial; import "addressbook.proto"; message PersonSearchRequest { repeated string name = 1; repeated int32 id = 2; repeated string phoneNumber = 3; } service PersonSearchService { rpc Search (PersonSearchRequest) returns (stream Person) {} }
Protocol Buffers
wireshark/test/protobuf_lang_files/user_defined_types/test_default_value.proto
// Test default values of Protobuf fields syntax = "proto2"; package wireshark.protobuf.test; message TestDefaultValueMessage { enum EnumFoo { ENUM_FOO_V_FIRST = 1; ENUM_FOO_V_SECOND = 0x2; ENUM_FOO_V_THIRD = 3; ENUM_FOO_V_FOURTH = - 4; } // The format of field name is: // <type> "With" ( "Value" | "DefaultValue" | "NoValue" ) [ "_" <correct_value_in_wireshark> ] // The "DefaultValue" fields should be wrapped with generated mark ("[" and "]") of Wireshark tree item. // The "NoValue" fields should not appear in Wireshark. // The default value is overridden to 8 at running time. required int32 int32WithValue_8 = 1 [ default = 2 ]; // The default value is overridden to ENUM_FOO_V_THIRD at running time. optional EnumFoo enumFooWithValue_Third = 2 [ default = ENUM_FOO_V_SECOND ]; // default values of bool optional bool boolWithDefaultValue_False = 11; optional bool boolWithDefaultValue_True = 12 [ default = true ]; // default values of enum optional EnumFoo enumFooWithDefaultValue_First = 21; optional EnumFoo enumFooWithDefaultValue_Second = 22 [ default = ENUM_FOO_V_SECOND ]; optional EnumFoo enumFooWithDefaultValue_Fouth = 23 [ default = ENUM_FOO_V_FOURTH ]; // default values of integer number optional int32 int32WithDefaultValue_0 = 31; optional int64 int64WithDefaultValue_Negative1152921504606846976 = 32 [ default = - 1152921504606846976 ]; optional uint32 uint32WithDefaultValue_11 = 33 [ default = 11 ]; optional uint64 uint64WithDefaultValue_1152921504606846976 = 34 [ default = 1152921504606846976 ]; // equals to 2^60 optional sint32 sint32WithDefaultValue_Negative12 = 35 [ default = -12 ]; optional sint64 sint64WithDefaultValue_0 = 36; // default value is zero optional fixed64 fixed64WithDefaultValue_1152921504606846976 = 37 [ default = 1152921504606846976 ]; optional sfixed32 sfixed32WithDefaultValue_Negative31 = 38 [ default = -0X1f ]; // -21 // default values of float and double optional float floatWithDefaultValue_0point23 = 41 [ default = 0.23 ]; optional double doubleWithDefaultValue_Negative0point12345678 = 42 [ default = -0.12345678 ]; // default values of string and bytes optional string stringWithNoValue = 51; // default value must not appear because not declared optional string stringWithDefaultValue_SymbolPi = 52 [ default = "The symbol \'\xF0\x9D\x9B\x91\' is mathematical bold small Pi."]; optional bytes bytesWithNoValue = 53; // default value must not appear because not declared // '\'nnn is octal value of a byte, '\x'nn is hex value of a byte optional bytes bytesWithDefaultValue_1F2F890D0A00004B = 54 [ default = "\x1F\x2F\211\r\n\000\x0\x4B" ]; // others repeated int32 repeatedFieldWithNoValue = 81; // should not appear required int32 missingRequiredField = 82; // for testing required field. (comment this line if you regenerated stub code) // test taking keyword as identification feature optional int32 message = 83; optional int32 optional = 84; }
Protocol Buffers
wireshark/test/protobuf_lang_files/user_defined_types/test_leading_dot.proto
syntax="proto3"; package a.b; message a { string param1 = 1; message b { string param2 = 2; message c { string param3 = 3; } } } message msg { a.b.c param4 = 4; /* the full name of the type is a.b.a.b.c */ .a.b.c param5 = 5; /* the full name of the type is a.b.c */ } message c { string param6 = 6; }
Protocol Buffers
wireshark/test/protobuf_lang_files/user_defined_types/test_map.proto
syntax="proto3"; package test.map; message Foo { int32 param1 = 1; } message MapMaster { oneof Abc { string param2 = 2; string param3 = 3; } map<string, int64> param4 = 4; map<sint32, Foo> param5 = 5; }
Protocol Buffers
wireshark/test/protobuf_lang_files/well_know_types/google/protobuf/any.proto
// This file is from https://github.com/protocolbuffers/protobuf/blob/3.14.x/src/google/protobuf/any.proto // To reduce the file size, some comments have been removed. syntax = "proto3"; package google.protobuf; option csharp_namespace = "Google.Protobuf.WellKnownTypes"; option go_package = "google.golang.org/protobuf/types/known/anypb"; option java_package = "com.google.protobuf"; option java_outer_classname = "AnyProto"; option java_multiple_files = true; option objc_class_prefix = "GPB"; message Any { // A URL/resource name that uniquely identifies the type of the serialized // protocol buffer message. string type_url = 1; // Must be a valid serialized protocol buffer of the above specified type. bytes value = 2; }
Protocol Buffers
wireshark/test/protobuf_lang_files/well_know_types/google/protobuf/descriptor.proto
// This file is from https://github.com/protocolbuffers/protobuf/blob/3.14.x/src/google/protobuf/descriptor.proto // To reduce the file size, some comments have been removed. syntax = "proto2"; package google.protobuf; option go_package = "google.golang.org/protobuf/types/descriptorpb"; option java_package = "com.google.protobuf"; option java_outer_classname = "DescriptorProtos"; option csharp_namespace = "Google.Protobuf.Reflection"; option objc_class_prefix = "GPB"; option cc_enable_arenas = true; option optimize_for = SPEED; message FileDescriptorSet { repeated FileDescriptorProto file = 1; } message FileDescriptorProto { optional string name = 1; optional string package = 2; repeated string dependency = 3; repeated int32 public_dependency = 10; repeated int32 weak_dependency = 11; repeated DescriptorProto message_type = 4; repeated EnumDescriptorProto enum_type = 5; repeated ServiceDescriptorProto service = 6; repeated FieldDescriptorProto extension = 7; optional FileOptions options = 8; optional SourceCodeInfo source_code_info = 9; optional string syntax = 12; } message DescriptorProto { optional string name = 1; repeated FieldDescriptorProto field = 2; repeated FieldDescriptorProto extension = 6; repeated DescriptorProto nested_type = 3; repeated EnumDescriptorProto enum_type = 4; message ExtensionRange { optional int32 start = 1; optional int32 end = 2; optional ExtensionRangeOptions options = 3; } repeated ExtensionRange extension_range = 5; repeated OneofDescriptorProto oneof_decl = 8; optional MessageOptions options = 7; message ReservedRange { optional int32 start = 1; optional int32 end = 2; } repeated ReservedRange reserved_range = 9; repeated string reserved_name = 10; } message ExtensionRangeOptions { repeated UninterpretedOption uninterpreted_option = 999; extensions 1000 to max; } message FieldDescriptorProto { enum Type { TYPE_DOUBLE = 1; TYPE_FLOAT = 2; TYPE_INT64 = 3; TYPE_UINT64 = 4; TYPE_INT32 = 5; TYPE_FIXED64 = 6; TYPE_FIXED32 = 7; TYPE_BOOL = 8; TYPE_STRING = 9; TYPE_GROUP = 10; TYPE_MESSAGE = 11; TYPE_BYTES = 12; TYPE_UINT32 = 13; TYPE_ENUM = 14; TYPE_SFIXED32 = 15; TYPE_SFIXED64 = 16; TYPE_SINT32 = 17; TYPE_SINT64 = 18; } enum Label { LABEL_OPTIONAL = 1; LABEL_REQUIRED = 2; LABEL_REPEATED = 3; } optional string name = 1; optional int32 number = 3; optional Label label = 4; optional Type type = 5; optional string type_name = 6; optional string extendee = 2; optional string default_value = 7; optional int32 oneof_index = 9; optional string json_name = 10; optional FieldOptions options = 8; optional bool proto3_optional = 17; } message OneofDescriptorProto { optional string name = 1; optional OneofOptions options = 2; } message EnumDescriptorProto { optional string name = 1; repeated EnumValueDescriptorProto value = 2; optional EnumOptions options = 3; message EnumReservedRange { optional int32 start = 1; optional int32 end = 2; } repeated EnumReservedRange reserved_range = 4; repeated string reserved_name = 5; } message EnumValueDescriptorProto { optional string name = 1; optional int32 number = 2; optional EnumValueOptions options = 3; } message ServiceDescriptorProto { optional string name = 1; repeated MethodDescriptorProto method = 2; optional ServiceOptions options = 3; } message MethodDescriptorProto { optional string name = 1; optional string input_type = 2; optional string output_type = 3; optional MethodOptions options = 4; optional bool client_streaming = 5 [default = false]; optional bool server_streaming = 6 [default = false]; } message FileOptions { optional string java_package = 1; optional string java_outer_classname = 8; optional bool java_multiple_files = 10 [default = false]; optional bool java_generate_equals_and_hash = 20 [deprecated=true]; optional bool java_string_check_utf8 = 27 [default = false]; enum OptimizeMode { SPEED = 1; CODE_SIZE = 2; LITE_RUNTIME = 3; } optional OptimizeMode optimize_for = 9 [default = SPEED]; optional string go_package = 11; optional bool cc_generic_services = 16 [default = false]; optional bool java_generic_services = 17 [default = false]; optional bool py_generic_services = 18 [default = false]; optional bool php_generic_services = 42 [default = false]; optional bool deprecated = 23 [default = false]; optional bool cc_enable_arenas = 31 [default = true]; optional string objc_class_prefix = 36; optional string csharp_namespace = 37; optional string swift_prefix = 39; optional string php_class_prefix = 40; optional string php_namespace = 41; optional string php_metadata_namespace = 44; optional string ruby_package = 45; repeated UninterpretedOption uninterpreted_option = 999; extensions 1000 to max; reserved 38; } message MessageOptions { optional bool message_set_wire_format = 1 [default = false]; optional bool no_standard_descriptor_accessor = 2 [default = false]; optional bool deprecated = 3 [default = false]; optional bool map_entry = 7; reserved 8; reserved 9; repeated UninterpretedOption uninterpreted_option = 999; extensions 1000 to max; } message FieldOptions { optional CType ctype = 1 [default = STRING]; enum CType { STRING = 0; CORD = 1; STRING_PIECE = 2; } optional bool packed = 2; optional JSType jstype = 6 [default = JS_NORMAL]; enum JSType { JS_NORMAL = 0; JS_STRING = 1; JS_NUMBER = 2; } optional bool lazy = 5 [default = false]; optional bool deprecated = 3 [default = false]; optional bool weak = 10 [default = false]; repeated UninterpretedOption uninterpreted_option = 999; extensions 1000 to max; reserved 4; } message OneofOptions { repeated UninterpretedOption uninterpreted_option = 999; extensions 1000 to max; } message EnumOptions { optional bool allow_alias = 2; optional bool deprecated = 3 [default = false]; reserved 5; repeated UninterpretedOption uninterpreted_option = 999; extensions 1000 to max; } message EnumValueOptions { optional bool deprecated = 1 [default = false]; repeated UninterpretedOption uninterpreted_option = 999; extensions 1000 to max; } message ServiceOptions { optional bool deprecated = 33 [default = false]; repeated UninterpretedOption uninterpreted_option = 999; extensions 1000 to max; } message MethodOptions { optional bool deprecated = 33 [default = false]; enum IdempotencyLevel { IDEMPOTENCY_UNKNOWN = 0; NO_SIDE_EFFECTS = 1; IDEMPOTENT = 2; } optional IdempotencyLevel idempotency_level = 34 [default = IDEMPOTENCY_UNKNOWN]; repeated UninterpretedOption uninterpreted_option = 999; extensions 1000 to max; } message UninterpretedOption { message NamePart { required string name_part = 1; required bool is_extension = 2; } repeated NamePart name = 2; optional string identifier_value = 3; optional uint64 positive_int_value = 4; optional int64 negative_int_value = 5; optional double double_value = 6; optional bytes string_value = 7; optional string aggregate_value = 8; } message SourceCodeInfo { repeated Location location = 1; message Location { repeated int32 path = 1 [packed = true]; repeated int32 span = 2 [packed = true]; optional string leading_comments = 3; optional string trailing_comments = 4; repeated string leading_detached_comments = 6; } } message GeneratedCodeInfo { repeated Annotation annotation = 1; message Annotation { repeated int32 path = 1 [packed = true]; optional string source_file = 2; optional int32 begin = 3; optional int32 end = 4; } }
Protocol Buffers
wireshark/test/protobuf_lang_files/well_know_types/google/protobuf/timestamp.proto
// This file is from https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/timestamp.proto // To reduce the file size, some comments have been removed. syntax = "proto3"; package google.protobuf; message Timestamp { // Represents seconds of UTC time since Unix epoch // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. int64 seconds = 1; // Non-negative fractions of a second at nanosecond resolution. Negative // second values with fractions must still have non-negative nanos values // that count forward in time. Must be from 0 to 999,999,999 // inclusive. int32 nanos = 2; }
Python
wireshark/test/suite_dfilter/dfiltertest.py
# Copyright (c) 2013 by Gilbert Ramirez <[email protected]> # # SPDX-License-Identifier: GPL-2.0-or-later import subprocess import pytest @pytest.fixture def dfilter_cmd(cmd_tshark, capture_file, request): def wrapped(dfilter, frame_number=None, prefs=None, read_filter=False): cmd = [ cmd_tshark, "-n", # No name resolution "-r", # Next arg is trace file to read capture_file(request.instance.trace_file), ] if frame_number: cmd.extend([ "-2", # two-pass mode "--selected-frame={}".format(frame_number) ]) if read_filter: cmd.extend([ "-2", # two-pass mode "-R", # read filter (requires two-pass mode) dfilter ]) else: cmd.extend([ "-Y", # packet display filter (used to be -R) dfilter ]) if prefs: cmd.extend([ "-o", prefs ]) return cmd return wrapped @pytest.fixture(scope='session') def cmd_dftest(program): return program('dftest') @pytest.fixture def checkDFilterCount(dfilter_cmd, base_env): def checkDFilterCount_real(dfilter, expected_count, prefs=None): """Run a display filter and expect a certain number of packets.""" output = subprocess.check_output(dfilter_cmd(dfilter, prefs=prefs), universal_newlines=True, stderr=subprocess.STDOUT, env=base_env) dfp_count = output.count("\n") msg = "Expected %d, got: %s\noutput: %r" % \ (expected_count, dfp_count, output) assert dfp_count == expected_count, msg return checkDFilterCount_real @pytest.fixture def checkDFilterCountWithSelectedFrame(dfilter_cmd, base_env): def checkDFilterCount_real(dfilter, expected_count, selected_frame, prefs=None): """Run a display filter and expect a certain number of packets.""" output = subprocess.check_output(dfilter_cmd(dfilter, frame_number=selected_frame, prefs=prefs), universal_newlines=True, stderr=subprocess.STDOUT, env=base_env) dfp_count = output.count("\n") msg = "Expected %d, got: %s\noutput: %r" % \ (expected_count, dfp_count, output) assert dfp_count == expected_count, msg return checkDFilterCount_real @pytest.fixture def checkDFilterCountReadFilter(dfilter_cmd, base_env): def checkDFilterCount_real(dfilter, expected_count): """Run a read filter in two pass mode and expect a certain number of packets.""" output = subprocess.check_output(dfilter_cmd(dfilter, read_filter=True), universal_newlines=True, stderr=subprocess.STDOUT, env=base_env) dfp_count = output.count("\n") msg = "Expected %d, got: %s\noutput: %r" % \ (expected_count, dfp_count, output) assert dfp_count == expected_count, msg return checkDFilterCount_real @pytest.fixture def checkDFilterFail(cmd_dftest, base_env): def checkDFilterFail_real(dfilter, error_message): """Run a display filter and expect dftest to fail.""" proc = subprocess.Popen([cmd_dftest, '--', dfilter], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=base_env) outs, errs = proc.communicate() assert error_message in errs, \ 'Unexpected dftest stderr:\n%s\nstdout:\n%s' % (errs, outs) assert proc.returncode == 4, \ 'Unexpected dftest exit code: %d. stdout:\n%s\n' % \ (proc.returncode, outs) return checkDFilterFail_real @pytest.fixture def checkDFilterSucceed(cmd_dftest, base_env): def checkDFilterSucceed_real(dfilter, expect_stdout=None): """Run a display filter and expect dftest to succeed.""" proc = subprocess.Popen([cmd_dftest, '--', dfilter], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=base_env) outs, errs = proc.communicate() assert proc.returncode == 0, \ 'Unexpected dftest exit code: %d. stderr:\n%s\n' % \ (proc.returncode, errs) if expect_stdout: assert expect_stdout in outs, \ 'Expected the string %s in the output' % expect_stdout return checkDFilterSucceed_real
Python
wireshark/test/suite_dfilter/group_bytes.py
# Copyright (c) 2013 by Gilbert Ramirez <[email protected]> # # SPDX-License-Identifier: GPL-2.0-or-later import pytest from suite_dfilter.dfiltertest import * class TestDfilterBytes: trace_file = "arp.pcap" def test_bytes_1(self, checkDFilterCount): dfilter = "arp.dst.hw == 00:64" checkDFilterCount(dfilter, 1) def test_ipv6_2(self, checkDFilterCount): dfilter = "arp.dst.hw == 00:00" checkDFilterCount(dfilter, 0)
Python
wireshark/test/suite_dfilter/group_columns.py
# Copyright (c) 2013 by Gilbert Ramirez <[email protected]> # # SPDX-License-Identifier: GPL-2.0-or-later import pytest from suite_dfilter.dfiltertest import * class TestDfilterColumns: trace_file = "http.pcap" def test_exists_1(self, checkDFilterCount): dfilter = "_ws.col.info" checkDFilterCount(dfilter, 1) def test_exists_2(self, checkDFilterFail): # Column not in the default configuration dfilter = "_ws.col.expert" error = f'"{dfilter}" is not a valid protocol or protocol field' checkDFilterFail(dfilter, error) def test_exists_3(self, checkDFilterFail): # Column not registered as field (it behaves unusally if filtered) dfilter = "_ws.col.delta_time_dis" error = f'"{dfilter}" is not a valid protocol or protocol field' checkDFilterFail(dfilter, error) def test_func_1(self, checkDFilterCount): dfilter = "len(_ws.col.protocol) == 4" checkDFilterCount(dfilter, 1) def test_matches_1(self, checkDFilterSucceed): dfilter = '_ws.col.info matches "^HEAD"' checkDFilterSucceed(dfilter) def test_equal_1(self, checkDFilterCount): dfilter = '_ws.col.protocol == "HTTP"' checkDFilterCount(dfilter, 1) def test_equal_2(self, checkDFilterCount): dfilter = '_ws.col.def_dst == "207.46.134.94"' checkDFilterCount(dfilter, 1) def test_not_equal_1(self, checkDFilterCount): dfilter = '_ws.col.def_src != "10.0.0.5"' checkDFilterCount(dfilter, 0) def test_read_filter(self, checkDFilterCountReadFilter): dfilter = '_ws.col.protocol == "HTTP"' checkDFilterCountReadFilter(dfilter, 1) def test_add_column(self, checkDFilterCount): # Add column to configuration dfilter = '_ws.col.expert == "Chat"' checkDFilterCount(dfilter, 1, 'gui.column.format:"Expert","%a"')
Python
wireshark/test/suite_dfilter/group_double.py
# Copyright (c) 2013 by Gilbert Ramirez <[email protected]> # # SPDX-License-Identifier: GPL-2.0-or-later import pytest from suite_dfilter.dfiltertest import * class TestDfilterDouble: trace_file = "icmp.pcapng.gz" def test_eq_1(self, checkDFilterCount): dfilter = "icmp.resptime == 492.204" checkDFilterCount(dfilter, 1) def test_eq_2(self, checkDFilterCount): dfilter = "icmp.resptime == 492.205" checkDFilterCount(dfilter, 0) def test_eq_3(self, checkDFilterCount): dfilter = "icmp.resptime == 492204e-3" checkDFilterCount(dfilter, 1) def test_eq_4(self, checkDFilterCount): dfilter = "icmp.resptime == 492205e-3" checkDFilterCount(dfilter, 0) def test_ne_1(self, checkDFilterCount): dfilter = "icmp.resptime != 492.204" checkDFilterCount(dfilter, 0) def test_ne_2(self, checkDFilterCount): dfilter = "icmp.resptime != 492.205" checkDFilterCount(dfilter, 1) def test_ne_3(self, checkDFilterCount): dfilter = "icmp.resptime != 492204e-3" checkDFilterCount(dfilter, 0) def test_ne_4(self, checkDFilterCount): dfilter = "icmp.resptime != 492205e-3" checkDFilterCount(dfilter, 1) def test_gt_1(self, checkDFilterCount): dfilter = "icmp.resptime > 492" checkDFilterCount(dfilter, 1) def test_gt_2(self, checkDFilterCount): dfilter = "icmp.resptime > 492.203" checkDFilterCount(dfilter, 1) def test_gt_3(self, checkDFilterCount): dfilter = "icmp.resptime > 493" checkDFilterCount(dfilter, 0) def test_ge_1(self, checkDFilterCount): dfilter = "icmp.resptime >= 493" checkDFilterCount(dfilter, 0) def test_ge_2(self, checkDFilterCount): dfilter = "icmp.resptime >= 492" checkDFilterCount(dfilter, 1) def test_ge_3(self, checkDFilterCount): dfilter = "icmp.resptime >= 492.204" checkDFilterCount(dfilter, 1) def test_lt_1(self, checkDFilterCount): dfilter = "icmp.resptime < 493" checkDFilterCount(dfilter, 1) def test_lt_2(self, checkDFilterCount): dfilter = "icmp.resptime < 492" checkDFilterCount(dfilter, 0) def test_lt_3(self, checkDFilterCount): dfilter = "icmp.resptime < 492.204" checkDFilterCount(dfilter, 0) def test_le_1(self, checkDFilterCount): dfilter = "icmp.resptime <= 492.204" checkDFilterCount(dfilter, 1) def test_le_2(self, checkDFilterCount): dfilter = "icmp.resptime <= 493" checkDFilterCount(dfilter, 1) def test_le_3(self, checkDFilterCount): dfilter = "icmp.resptime <= 492" checkDFilterCount(dfilter, 0)
Python
wireshark/test/suite_dfilter/group_ether.py
# Copyright (c) 2013 by Gilbert Ramirez <[email protected]> # # SPDX-License-Identifier: GPL-2.0-or-later import pytest from suite_dfilter.dfiltertest import * class TestDfilterEther: trace_file = "ipx_rip.pcap" ### Note: Bytes test does not yet test FT_INT64. def test_eq_1(self, checkDFilterCount): dfilter = "eth.dst == ff:ff:ff:ff:ff:ff" checkDFilterCount(dfilter, 1) def test_eq_2(self, checkDFilterCount): dfilter = "eth.src == ff:ff:ff:ff:ff:ff" checkDFilterCount(dfilter, 0) def test_ne_1(self, checkDFilterCount): dfilter = "eth.dst != ff:ff:ff:ff:ff:ff" checkDFilterCount(dfilter, 0) def test_ne_2(self, checkDFilterCount): dfilter = "eth.src != ff:ff:ff:ff:ff:ff" checkDFilterCount(dfilter, 1) def test_gt_1(self, checkDFilterCount): dfilter = "eth.src > 00:aa:00:a3:e3:ff" checkDFilterCount(dfilter, 0) def test_gt_2(self, checkDFilterCount): dfilter = "eth.src > 00:aa:00:a3:e3:a4" checkDFilterCount(dfilter, 0) def test_gt_3(self, checkDFilterCount): dfilter = "eth.src > 00:aa:00:a3:e3:00" checkDFilterCount(dfilter, 1) def test_ge_1(self, checkDFilterCount): dfilter = "eth.src >= 00:aa:00:a3:e3:ff" checkDFilterCount(dfilter, 0) def test_ge_2(self, checkDFilterCount): dfilter = "eth.src >= 00:aa:00:a3:e3:a4" checkDFilterCount(dfilter, 1) def test_ge_3(self, checkDFilterCount): dfilter = "eth.src >= 00:aa:00:a3:e3:00" checkDFilterCount(dfilter, 1) def test_lt_1(self, checkDFilterCount): dfilter = "eth.src < 00:aa:00:a3:e3:ff" checkDFilterCount(dfilter, 1) def test_lt_2(self, checkDFilterCount): dfilter = "eth.src < 00:aa:00:a3:e3:a4" checkDFilterCount(dfilter, 0) def test_lt_3(self, checkDFilterCount): dfilter = "eth.src < 00:aa:00:a3:e3:00" checkDFilterCount(dfilter, 0) def test_le_1(self, checkDFilterCount): dfilter = "eth.src <= 00:aa:00:a3:e3:ff" checkDFilterCount(dfilter, 1) def test_le_2(self, checkDFilterCount): dfilter = "eth.src <= 00:aa:00:a3:e3:a4" checkDFilterCount(dfilter, 1) def test_le_3(self, checkDFilterCount): dfilter = "eth.src <= 00:aa:00:a3:e3:00" checkDFilterCount(dfilter, 0) def test_slice_1(self, checkDFilterCount): dfilter = "eth.src[0:3] == 00:aa:00" checkDFilterCount(dfilter, 1) def test_slice_2(self, checkDFilterCount): dfilter = "eth.src[-3:3] == a3:e3:a4" checkDFilterCount(dfilter, 1) def test_slice_3(self, checkDFilterCount): dfilter = "eth.src[1:4] == aa:00:a3:e3" checkDFilterCount(dfilter, 1) def test_slice_4(self, checkDFilterCount): dfilter = "eth.src[0] == 00" checkDFilterCount(dfilter, 1) def test_contains_1(self, checkDFilterCount): dfilter = "ipx.src.node contains a3" checkDFilterCount(dfilter, 1) def test_contains_2(self, checkDFilterCount): dfilter = "ipx.src.node contains a3:e3" checkDFilterCount(dfilter, 1) def test_contains_3(self, checkDFilterCount): dfilter = "ipx.src.node contains 00:aa:00:a3:e3:a4" checkDFilterCount(dfilter, 1) def test_contains_4(self, checkDFilterCount): dfilter = "ipx.src.node contains aa:e3" checkDFilterCount(dfilter, 0)
Python
wireshark/test/suite_dfilter/group_function.py
# Copyright (c) 2019 by Dario Lombardo <[email protected]> # # SPDX-License-Identifier: GPL-2.0-or-later import pytest from suite_dfilter.dfiltertest import * class TestFunctionString: trace_file = "dhcp.pcap" def test_matches_1(self, checkDFilterCount): dfilter = "string(frame.number) matches \"[13579]$\"" checkDFilterCount(dfilter, 2) def test_contains_1(self, checkDFilterCount): dfilter = "string(eth.src) contains \"00:08:74\"" checkDFilterCount(dfilter, 2) def test_fail_1(self, checkDFilterFail): # Invalid filter (only non-string fields are supported) dfilter = "string(dhcp.server) == hostname" error = 'String conversion for field "dhcp.server" is not supported' checkDFilterFail(dfilter, error) def test_fail_2(self, checkDFilterFail): # Invalid field: value dfilter = "string(123) == \"123\"" error = 'Only fields can be used as parameter for string()' checkDFilterFail(dfilter, error) def test_fail_3(self, checkDFilterFail): # Invalid field: protocol dfilter = "string(dhcp) == hostname" error = 'String conversion for field "dhcp" is not supported' checkDFilterFail(dfilter, error) def test_fail_4(self, checkDFilterFail): # Invalid field: bytes dfilter = "string(dhcp.option.value) == \"hostname\"" error = 'String conversion for field "dhcp.option.value" is not supported' checkDFilterFail(dfilter, error) class TestFunctionMaxMin: trace_file = "sip.pcapng" def test_min_1(self, checkDFilterCount): dfilter = 'min(udp.srcport, udp.dstport) == 5060' checkDFilterCount(dfilter, 5) def test_min_2(self, checkDFilterCount): dfilter = 'min(udp.srcport, udp.dstport) == 5070' checkDFilterCount(dfilter, 0) def test_max_1(self, checkDFilterCount): dfilter = 'max(udp.srcport, udp.dstport) == 5070' checkDFilterCount(dfilter, 3) def test_max_2(self, checkDFilterCount): dfilter = 'max(udp.srcport, udp.dstport) == 5060' checkDFilterCount(dfilter, 2) def test_max_3(self, checkDFilterCount): dfilter = 'max(udp.srcport, udp.dstport) < 5060' checkDFilterCount(dfilter, 1) def test_max_4(self, checkDFilterCount): dfilter = 'max(5060, udp.dstport) == udp.srcport' checkDFilterCount(dfilter, 2) def test_max_5(self, checkDFilterFail): error = 'Constant expression is invalid on the LHS' dfilter = 'max(5060, 5070) == udp.srcport' checkDFilterFail(dfilter, error) class TestFunctionAbs: trace_file = "dhcp.pcapng" def test_function_abs_1(self, checkDFilterCount): dfilter = 'udp.dstport == abs(-67)' checkDFilterCount(dfilter, 2)
Python
wireshark/test/suite_dfilter/group_integer.py
# Copyright (c) 2013 by Gilbert Ramirez <[email protected]> # # SPDX-License-Identifier: GPL-2.0-or-later import pytest from suite_dfilter.dfiltertest import * class TestDfilterInteger: trace_file = "ntp.pcap" def test_eq_1(self, checkDFilterCount): dfilter = "ip.version == 4" checkDFilterCount(dfilter, 1) def test_eq_2(self, checkDFilterCount): dfilter = "ip.version == 6" checkDFilterCount(dfilter, 0) def test_eq_3(self, checkDFilterFail): # Invalid filter (only one equals sign) dfilter = "ip.version = 4" error = '"=" was unexpected in this context.' checkDFilterFail(dfilter, error) def test_eq_4(self, checkDFilterFail): # Invalid filter dfilter = "ip.version == the quick brown fox jumps over the lazy dog" error = '"quick" was unexpected in this context.' checkDFilterFail(dfilter, error) def test_eq_5(self, checkDFilterFail): # Invalid filter dfilter = "ip.version == 4 the quick brown fox jumps over the lazy dog" error = '"the" was unexpected in this context.' checkDFilterFail(dfilter, error) def test_eq_6(self, checkDFilterCount): dfilter = "udp.srcport == 123" checkDFilterCount(dfilter, 1) def test_eq_7(self, checkDFilterCount): dfilter = "udp.srcport == 0173" checkDFilterCount(dfilter, 1) def test_eq_8(self, checkDFilterCount): dfilter = "udp.srcport == 0x7B" checkDFilterCount(dfilter, 1) def test_eq_9(self, checkDFilterCount): dfilter = "udp.srcport == 0b1111011" checkDFilterCount(dfilter, 1) def test_ne_1(self, checkDFilterCount): dfilter = "ip.version != 0" checkDFilterCount(dfilter, 1) def test_ne_2(self, checkDFilterCount): dfilter = "ip.version != 4" checkDFilterCount(dfilter, 0) def test_u_gt_1(self, checkDFilterCount): dfilter = "ip.version > 3" checkDFilterCount(dfilter, 1) def test_u_gt_2(self, checkDFilterCount): dfilter = "ip.version > 4" checkDFilterCount(dfilter, 0) def test_u_gt_3(self, checkDFilterCount): dfilter = "ip.version > 5" checkDFilterCount(dfilter, 0) def test_u_ge_1(self, checkDFilterCount): dfilter = "ip.version >= 3" checkDFilterCount(dfilter, 1) def test_u_ge_2(self, checkDFilterCount): dfilter = "ip.version >= 4" checkDFilterCount(dfilter, 1) def test_u_ge_3(self, checkDFilterCount): dfilter = "ip.version >= 5" checkDFilterCount(dfilter, 0) def test_u_lt_1(self, checkDFilterCount): dfilter = "ip.version < 3" checkDFilterCount(dfilter, 0) def test_u_lt_2(self, checkDFilterCount): dfilter = "ip.version < 4" checkDFilterCount(dfilter, 0) def test_u_lt_3(self, checkDFilterCount): dfilter = "ip.version < 5" checkDFilterCount(dfilter, 1) def test_u_le_1(self, checkDFilterCount): dfilter = "ip.version <= 3" checkDFilterCount(dfilter, 0) def test_u_le_2(self, checkDFilterCount): dfilter = "ip.version <= 4" checkDFilterCount(dfilter, 1) def test_u_le_3(self, checkDFilterCount): dfilter = "ip.version <= 5" checkDFilterCount(dfilter, 1) def test_s_gt_1(self, checkDFilterCount): dfilter = "ntp.precision > -12" checkDFilterCount(dfilter, 1) def test_s_gt_2(self, checkDFilterCount): dfilter = "ntp.precision > -11" checkDFilterCount(dfilter, 0) def test_s_gt_3(self, checkDFilterCount): dfilter = "ntp.precision > -10" checkDFilterCount(dfilter, 0) def test_s_ge_1(self, checkDFilterCount): dfilter = "ntp.precision >= -12" checkDFilterCount(dfilter, 1) def test_s_ge_2(self, checkDFilterCount): dfilter = "ntp.precision >= -11" checkDFilterCount(dfilter, 1) def test_s_ge_3(self, checkDFilterCount): dfilter = "ntp.precision >= -10" checkDFilterCount(dfilter, 0) def test_s_lt_1(self, checkDFilterCount): dfilter = "ntp.precision < -12" checkDFilterCount(dfilter, 0) def test_s_lt_2(self, checkDFilterCount): dfilter = "ntp.precision < -11" checkDFilterCount(dfilter, 0) def test_s_lt_3(self, checkDFilterCount): dfilter = "ntp.precision < -10" checkDFilterCount(dfilter, 1) def test_s_le_1(self, checkDFilterCount): dfilter = "ntp.precision <= -12" checkDFilterCount(dfilter, 0) def test_s_le_2(self, checkDFilterCount): dfilter = "ntp.precision <= -11" checkDFilterCount(dfilter, 1) def test_s_le_3(self, checkDFilterCount): dfilter = "ntp.precision <= -10" checkDFilterCount(dfilter, 1) def test_bool_eq_1(self, checkDFilterCount): dfilter = "ip.flags.df == 0" checkDFilterCount(dfilter, 1) def test_bool_eq_2(self, checkDFilterCount): dfilter = "ip.flags.df == 1" checkDFilterCount(dfilter, 0) def test_bool_ne_1(self, checkDFilterCount): dfilter = "ip.flags.df != 1" checkDFilterCount(dfilter, 1) def test_bool_ne_2(self, checkDFilterCount): dfilter = "ip.flags.df != 0" checkDFilterCount(dfilter, 0) class TestDfilterInteger1Byte: trace_file = "ipx_rip.pcap" def test_ipx_1(self, checkDFilterCount): dfilter = "ipx.src.net == 0x28" checkDFilterCount(dfilter, 1) def test_ipx_2(self, checkDFilterCount): dfilter = "ipx.src.net == 0x29" checkDFilterCount(dfilter, 0) class TestDfilterUint64: trace_file = "nfs.pcap" def test_uint64_1(self, checkDFilterCount): dfilter = "nfs.fattr3.size == 264032" checkDFilterCount(dfilter, 1) def test_uint64_2(self, checkDFilterCount): dfilter = "nfs.fattr3.size == 264000" checkDFilterCount(dfilter, 0)
Python
wireshark/test/suite_dfilter/group_ipv4.py
# Copyright (c) 2013 by Gilbert Ramirez <[email protected]> # # SPDX-License-Identifier: GPL-2.0-or-later import pytest from suite_dfilter.dfiltertest import * class TestDfilterIpv4: trace_file = "nfs.pcap" def test_eq_1(self, checkDFilterCount): dfilter = "ip.src == 172.25.100.14" checkDFilterCount(dfilter, 1) def test_eq_2(self, checkDFilterCount): dfilter = "ip.src == 255.255.255.255" checkDFilterCount(dfilter, 0) def test_ne_1(self, checkDFilterCount): dfilter = "ip.src != 172.25.100.14" checkDFilterCount(dfilter, 1) def test_ne_2(self, checkDFilterCount): dfilter = "ip.src != 255.255.255.255" checkDFilterCount(dfilter, 2) def test_gt_1(self, checkDFilterCount): dfilter = "ip.dst > 198.95.230.200" checkDFilterCount(dfilter, 0) def test_gt_2(self, checkDFilterCount): dfilter = "ip.dst > 198.95.230.20" checkDFilterCount(dfilter, 0) def test_gt_3(self, checkDFilterCount): dfilter = "ip.dst > 198.95.230.10" checkDFilterCount(dfilter, 1) def test_ge_1(self, checkDFilterCount): dfilter = "ip.dst >= 198.95.230.200" checkDFilterCount(dfilter, 0) def test_ge_2(self, checkDFilterCount): dfilter = "ip.dst >= 198.95.230.20" checkDFilterCount(dfilter, 1) def test_ge_3(self, checkDFilterCount): dfilter = "ip.dst >= 198.95.230.10" checkDFilterCount(dfilter, 1) def test_lt_1(self, checkDFilterCount): dfilter = "ip.src < 172.25.100.140" checkDFilterCount(dfilter, 1) def test_lt_2(self, checkDFilterCount): dfilter = "ip.src < 172.25.100.14" checkDFilterCount(dfilter, 0) def test_lt_3(self, checkDFilterCount): dfilter = "ip.src < 172.25.100.10" checkDFilterCount(dfilter, 0) def test_le_1(self, checkDFilterCount): dfilter = "ip.src <= 172.25.100.140" checkDFilterCount(dfilter, 1) def test_le_2(self, checkDFilterCount): dfilter = "ip.src <= 172.25.100.14" checkDFilterCount(dfilter, 1) def test_le_3(self, checkDFilterCount): dfilter = "ip.src <= 172.25.100.10" checkDFilterCount(dfilter, 0) def test_cidr_eq_1(self, checkDFilterCount): dfilter = "ip.src == 172.25.100.14/32" checkDFilterCount(dfilter, 1) def test_cidr_eq_2(self, checkDFilterCount): dfilter = "ip.src == 172.25.100.0/24" checkDFilterCount(dfilter, 1) def test_cidr_eq_3(self, checkDFilterCount): dfilter = "ip.src == 172.25.0.0/16" checkDFilterCount(dfilter, 1) def test_cidr_eq_4(self, checkDFilterCount): dfilter = "ip.src == 172.0.0.0/8" checkDFilterCount(dfilter, 1) def test_cidr_ne_1(self, checkDFilterCount): dfilter = "ip.src != 172.25.100.14/32" checkDFilterCount(dfilter, 1) def test_cidr_ne_2(self, checkDFilterCount): dfilter = "ip.src != 172.25.100.0/24" checkDFilterCount(dfilter, 1) def test_cidr_ne_3(self, checkDFilterCount): dfilter = "ip.src != 172.25.0.0/16" checkDFilterCount(dfilter, 1) def test_cidr_ne_4(self, checkDFilterCount): dfilter = "ip.src != 200.0.0.0/8" checkDFilterCount(dfilter, 2) def test_slice_1(self, checkDFilterCount): dfilter = "ip.src[0:2] == ac:19" checkDFilterCount(dfilter, 1) def test_slice_2(self, checkDFilterCount): dfilter = "ip.src[0:2] == 00:00" checkDFilterCount(dfilter, 0) def test_slice_3(self, checkDFilterCount): dfilter = "ip.src[2:2] == 64:0e" checkDFilterCount(dfilter, 1) def test_slice_4(self, checkDFilterCount): dfilter = "ip.src[2:2] == ff:ff" checkDFilterCount(dfilter, 0) def test_count_1(self, checkDFilterCount): dfilter = "count(ip.src) == 1" checkDFilterCount(dfilter, 2) def test_count_2(self, checkDFilterCount): dfilter = "count(ip.addr) == 2" checkDFilterCount(dfilter, 2)
Python
wireshark/test/suite_dfilter/group_ipv6.py
# Copyright (c) 2013 by Gilbert Ramirez <[email protected]> # # SPDX-License-Identifier: GPL-2.0-or-later import pytest from suite_dfilter.dfiltertest import * class TestDfilterIpv6: trace_file = "ipv6.pcap" def test_eq_1(self, checkDFilterCount): dfilter = "ipv6.dst == ff05::9999" checkDFilterCount(dfilter, 1) def test_eq_2(self, checkDFilterCount): dfilter = "ipv6.dst == ff05::9990" checkDFilterCount(dfilter, 0) def test_ne_1(self, checkDFilterCount): dfilter = "ipv6.dst != ff05::9990" checkDFilterCount(dfilter, 1) def test_ne_2(self, checkDFilterCount): dfilter = "ipv6.dst != ff05::9999" checkDFilterCount(dfilter, 0) def test_gt_1(self, checkDFilterCount): dfilter = "ipv6.dst > ff05::0000" checkDFilterCount(dfilter, 1) def test_gt_2(self, checkDFilterCount): dfilter = "ipv6.dst > ff05::9999" checkDFilterCount(dfilter, 0) def test_ge_1(self, checkDFilterCount): dfilter = "ipv6.dst >= ff05::9999" checkDFilterCount(dfilter, 1) def test_ge_2(self, checkDFilterCount): dfilter = "ipv6.dst >= ff05::a000" checkDFilterCount(dfilter, 0) def test_lt_1(self, checkDFilterCount): dfilter = "ipv6.dst < ff05::a000" checkDFilterCount(dfilter, 1) def test_lt_2(self, checkDFilterCount): dfilter = "ipv6.dst < ff05::9999" checkDFilterCount(dfilter, 0) def test_le_1(self, checkDFilterCount): dfilter = "ipv6.dst <= ff05::9999" checkDFilterCount(dfilter, 1) def test_le_2(self, checkDFilterCount): dfilter = "ipv6.dst <= ff05::9998" checkDFilterCount(dfilter, 0) def test_cidr_eq_1(self, checkDFilterCount): dfilter = "ipv6.dst == ff05::9999/128" checkDFilterCount(dfilter, 1) def test_cidr_eq_2(self, checkDFilterCount): dfilter = "ipv6.dst == ff05::0/64" checkDFilterCount(dfilter, 1) def test_cidr_eq_3(self, checkDFilterCount): dfilter = "ipv6.dst == ff05::ffff/112" checkDFilterCount(dfilter, 1) def test_cidr_eq_4(self, checkDFilterCount): dfilter = "ipv6.dst == ff04::0/64" checkDFilterCount(dfilter, 0) def test_cidr_ne_1(self, checkDFilterCount): dfilter = "ipv6.dst != ff05::9999/128" checkDFilterCount(dfilter, 0) def test_cidr_ne_2(self, checkDFilterCount): dfilter = "ipv6.dst != ff05::0/64" checkDFilterCount(dfilter, 0) def test_cidr_ne_3(self, checkDFilterCount): dfilter = "ipv6.dst != ff05::ffff/112" checkDFilterCount(dfilter, 0) def test_cidr_ne_4(self, checkDFilterCount): dfilter = "ipv6.dst != ff04::00/64" checkDFilterCount(dfilter, 1) def test_slice_1(self, checkDFilterCount): dfilter = "ipv6.dst[14:2] == 99:99" checkDFilterCount(dfilter, 1) def test_slice_2(self, checkDFilterCount): dfilter = "ipv6.dst[14:2] == 00:00" checkDFilterCount(dfilter, 0) def test_slice_3(self, checkDFilterCount): dfilter = "ipv6.dst[15:1] == 99" checkDFilterCount(dfilter, 1) def test_slice_4(self, checkDFilterCount): dfilter = "ipv6.dst[15:1] == 00" # # Test some addresses are parsed correctly # def test_unspecified_1(self, checkDFilterSucceed): dfilter = "ipv6.dst == ::" checkDFilterSucceed(dfilter) def test_unspecified_2(self, checkDFilterSucceed): dfilter = "ipv6.dst == ::/128" checkDFilterSucceed(dfilter) def test_loopback_1(self, checkDFilterSucceed): dfilter = "ipv6.dst == ::1" checkDFilterSucceed(dfilter) def test_loopback_2(self, checkDFilterSucceed): dfilter = "ipv6.dst == ::1/128" checkDFilterSucceed(dfilter) def test_compress_1(self, checkDFilterSucceed): dfilter = "ipv6.dst == ::2000" checkDFilterSucceed(dfilter) def test_compress_2(self, checkDFilterSucceed): dfilter = "ipv6.dst == ::2000/64" checkDFilterSucceed(dfilter) def test_compress_3(self, checkDFilterSucceed): dfilter = "ipv6.dst == ::1:2000" checkDFilterSucceed(dfilter) def test_compress_4(self, checkDFilterSucceed): dfilter = "ipv6.dst == 2000::" checkDFilterSucceed(dfilter) def test_compress_5(self, checkDFilterSucceed): dfilter = "ipv6.dst == 2000::/120" checkDFilterSucceed(dfilter) def test_compress_6(self, checkDFilterSucceed): dfilter = "ipv6.dst == 2000:1::" checkDFilterSucceed(dfilter) def test_ula_1(self, checkDFilterSucceed): dfilter = "ipv6.dst == fd93:c15b:7ae0:2e41:0000:0000:0000:0000" checkDFilterSucceed(dfilter) def test_ula_2(self, checkDFilterSucceed): dfilter = "ipv6.dst == fd93:c15b:7ae0:2e41:ffff:ffff:ffff:ffff" checkDFilterSucceed(dfilter) def test_ula_3(self, checkDFilterSucceed): dfilter = "ipv6.dst == fd93:c15b:7ae0:2e41:3f32:35c9:40aa:1243" checkDFilterSucceed(dfilter) def test_ula_4(self, checkDFilterSucceed): dfilter = "ipv6.dst == fd93:c15b:7ae0:2e41::2:1" checkDFilterSucceed(dfilter) def test_mapped_ipv4_1(self, checkDFilterSucceed): dfilter = "ipv6.dst == ::13.1.68.3" checkDFilterSucceed(dfilter) def test_mapped_ipv4_2(self, checkDFilterSucceed): dfilter = "ipv6.dst == ::FFFF:129.144.52.38" checkDFilterSucceed(dfilter)
Python
wireshark/test/suite_dfilter/group_membership.py
# Copyright (c) 2018 Peter Wu <[email protected]> # # SPDX-License-Identifier: GPL-2.0-or-later import pytest from suite_dfilter.dfiltertest import * class TestDfilterMembership: trace_file = "http.pcap" def test_membership_match_1(self, checkDFilterCount): dfilter = 'tcp.port in {80, 3267}' checkDFilterCount(dfilter, 1) def test_membership_match_2(self, checkDFilterCount): dfilter = 'tcp.port in {80,3267}' checkDFilterCount(dfilter, 1) def test_membership_match_3(self, checkDFilterCount): dfilter = 'tcp.port in {80 ,3267}' checkDFilterCount(dfilter, 1) def test_membership_match_4(self, checkDFilterCount): dfilter = 'tcp.port in {80 , 3267}' checkDFilterCount(dfilter, 1) def test_membership_match_5(self, checkDFilterCount): dfilter = 'tcp.port in { 80 , 3267 }' checkDFilterCount(dfilter, 1) def test_membership_any_1(self, checkDFilterCount): dfilter = 'any tcp.port in {80, 3267}' checkDFilterCount(dfilter, 1) def test_membership_any_2(self, checkDFilterCount): dfilter = 'any tcp.port in {70, 80, 90}' checkDFilterCount(dfilter, 1) def test_membership_all_1(self, checkDFilterCount): dfilter = 'all tcp.port in {80, 3267}' checkDFilterCount(dfilter, 1) def test_membership_all_2(self, checkDFilterCount): dfilter = 'all tcp.port in {70, 80, 90}' checkDFilterCount(dfilter, 0) def test_membership_range_match_1(self, checkDFilterCount): dfilter = 'tcp.port in {80..81}' checkDFilterCount(dfilter, 1) def test_membership_range_match_2(self, checkDFilterCount): dfilter = 'tcp.port in {80 ..81}' checkDFilterCount(dfilter, 1) def test_membership_range_match_3(self, checkDFilterCount): dfilter = 'tcp.port in {80.. 81}' checkDFilterCount(dfilter, 1) def test_membership_range_match_4(self, checkDFilterCount): dfilter = 'tcp.port in {80 .. 81}' checkDFilterCount(dfilter, 1) def test_membership_3_range_no_match(self, checkDFilterCount): dfilter = 'tcp.dstport in {1 .. 79, 81 .. 65535}' checkDFilterCount(dfilter, 0) def test_membership_4_range_no_match_multiple(self, checkDFilterCount): # Verifies that multiple fields cannot satisfy different conditions. dfilter = 'tcp.port in {1 .. 79,81 .. 3266,3268 .. 65535}' checkDFilterCount(dfilter, 0) def test_membership_5_negative_range_float(self, checkDFilterCount): dfilter = 'frame.time_delta in {-2.0 .. 0.0}' checkDFilterCount(dfilter, 1) def test_membership_6_both_negative_range_float(self, checkDFilterCount): dfilter = 'frame.time_delta in {-20 .. -0.7}' checkDFilterCount(dfilter, 0) def test_membership_7_string(self, checkDFilterCount): dfilter = 'http.request.method in {"GET", "HEAD"}' checkDFilterCount(dfilter, 1) def test_membership_8_ip_range(self, checkDFilterCount): dfilter = 'ip.addr in { 10.0.0.5 .. 10.0.0.9 , 10.0.0.1..10.0.0.1 }' checkDFilterCount(dfilter, 1) def test_membership_9_range_invalid_float(self, checkDFilterFail): # expression should be parsed as "0.1 .. .7" # .7 is the identifier (protocol) named "7" dfilter = 'frame.time_delta in {0.1...7}' error = '"." was unexpected in this context' checkDFilterFail(dfilter, error) def test_membership_10_bad_lhs_number(self, checkDFilterFail): dfilter = '123 in {ip}' error = 'Only a field may be tested for membership in a set.' checkDFilterFail(dfilter, error) def test_membership_11_bad_rhs_string(self, checkDFilterFail): dfilter = 'frame.number in {1, "foo"}' error = 'Unsigned integer (32 bits) cannot be converted from a string' checkDFilterFail(dfilter, error) def test_membership_12_value_string(self, checkDFilterCount): dfilter = 'tcp.checksum.status in {"Unverified", "Good"}' checkDFilterCount(dfilter, 1) def test_membership_arithmetic_1(self, checkDFilterCountWithSelectedFrame): dfilter = 'frame.time_epoch in {${frame.time_epoch}-46..${frame.time_epoch}+43}' checkDFilterCountWithSelectedFrame(dfilter, 1, 1)
Python
wireshark/test/suite_dfilter/group_scanner.py
# Copyright (c) 2013 by Gilbert Ramirez <[email protected]> # # SPDX-License-Identifier: GPL-2.0-or-later import pytest from suite_dfilter.dfiltertest import * class TestDfilterScanner: trace_file = "http.pcap" def test_dquote_1(self, checkDFilterCount): dfilter = 'http.request.method == "HEAD"' checkDFilterCount(dfilter, 1) def test_dquote_2(self, checkDFilterCount): dfilter = 'http.request.method == "\\x48EAD"' checkDFilterCount(dfilter, 1) def test_dquote_3(self, checkDFilterCount): dfilter = 'http.request.method == "\\x58EAD"' checkDFilterCount(dfilter, 0) def test_dquote_4(self, checkDFilterCount): dfilter = 'http.request.method == "\\110EAD"' checkDFilterCount(dfilter, 1) def test_dquote_5(self, checkDFilterCount): dfilter = 'http.request.method == "\\111EAD"' checkDFilterCount(dfilter, 0) def test_dquote_6(self, checkDFilterFail): dfilter = r'http.request.method == "\HEAD"' checkDFilterFail(dfilter, 'not a valid character escape sequence')
Python
wireshark/test/suite_dfilter/group_slice.py
# Copyright (c) 2013 by Gilbert Ramirez <[email protected]> # # SPDX-License-Identifier: GPL-2.0-or-later import pytest from suite_dfilter.dfiltertest import * class TestDfilterRange: trace_file = "ipx_rip.pcap" def test_slice_1_pos(self, checkDFilterCount): dfilter = "ipx.src.node[1] == aa" checkDFilterCount(dfilter, 1) def test_slice_2_pos(self, checkDFilterCount): dfilter = "ipx.src.node[1] == bb" checkDFilterCount(dfilter, 0) def test_slice_1_neg(self, checkDFilterCount): dfilter = "ipx[-2:] == 04:53" checkDFilterCount(dfilter, 1) def test_slice_1_hex_pos(self, checkDFilterCount): dfilter = "ipx.src.node[1] == 0xaa" checkDFilterCount(dfilter, 1) def test_slice_1_hex_neg(self, checkDFilterCount): dfilter = "ipx.src.node[1] == 0xbb" checkDFilterCount(dfilter, 0) def test_slice_2_pos(self, checkDFilterCount): dfilter = "ipx.src.node[3:2] == a3:e3" checkDFilterCount(dfilter, 1) def test_slice_2_neg(self, checkDFilterCount): dfilter = "ipx.src.node[3:2] == cc:dd" checkDFilterCount(dfilter, 0) def test_slice_string_1(self, checkDFilterFail): dfilter = "frame == \"00\"[1]" checkDFilterFail(dfilter, "Range is not supported for entity") def test_slice_unparsed_1(self, checkDFilterFail): dfilter = "frame == b[1]" checkDFilterFail(dfilter, "Range is not supported for entity") def test_slice_func_1(self, checkDFilterSucceed): dfilter = "string(ipx.src.node)[3:2] == \"cc:dd\"" checkDFilterSucceed(dfilter) # [i:j] i = start_offset, j = length # [i-j] i = start_offset, j = end_offset, inclusive. # [i] i = start_offset, length = 1 # [:j] start_offset = 0, length = j # [i:] start_offset = i, end_offset = end_of_field def test_slice_range_1(self, checkDFilterSucceed): # :5 is a length dfilter = "frame[5:5] == 11:22:33:44:55" checkDFilterSucceed(dfilter) def test_slice_range_2(self, checkDFilterSucceed): # end offset is inclusive dfilter = "frame[5-10] == 11:22:33:44:55:66" checkDFilterSucceed(dfilter) def test_slice_range_3(self, checkDFilterSucceed): dfilter = "frame[5] == 11" checkDFilterSucceed(dfilter) def test_slice_range_4(self, checkDFilterSucceed): dfilter = "frame[:20] contains be:ef" checkDFilterSucceed(dfilter) def test_slice_range_5(self, checkDFilterSucceed): dfilter = "frame[20:] contains :12345678" checkDFilterSucceed(dfilter) def test_slice_exists_1(self, checkDFilterCount): dfilter = "frame[59]" checkDFilterCount(dfilter, 1) def test_slice_exists_2(self, checkDFilterCount): dfilter = "frame[60]" checkDFilterCount(dfilter, 0) def test_slice_exists_3(self, checkDFilterCount): dfilter = "frame[50-59]" checkDFilterCount(dfilter, 1) def test_slice_exists_4(self, checkDFilterCount): dfilter = "frame[50-60]" checkDFilterCount(dfilter, 0)
Python
wireshark/test/suite_dfilter/group_string.py
# # Copyright (c) 2013 by Gilbert Ramirez <[email protected]> # # SPDX-License-Identifier: GPL-2.0-or-later import pytest from suite_dfilter.dfiltertest import * class TestDfilterString: trace_file = "http.pcap" def test_eq_1(self, checkDFilterCount): dfilter = 'http.request.method == "HEAD"' checkDFilterCount(dfilter, 1) def test_eq_2(self, checkDFilterCount): dfilter = 'http.request.method == "POST"' checkDFilterCount(dfilter, 0) def test_gt_1(self, checkDFilterCount): dfilter = 'http.request.method > "HEAC"' checkDFilterCount(dfilter, 1) def test_gt_2(self, checkDFilterCount): dfilter = 'http.request.method > "HEAD"' checkDFilterCount(dfilter, 0) def test_gt_3(self, checkDFilterCount): dfilter = 'http.request.method > "HEAE"' checkDFilterCount(dfilter, 0) def test_ge_1(self, checkDFilterCount): dfilter = 'http.request.method >= "HEAC"' checkDFilterCount(dfilter, 1) def test_ge_2(self, checkDFilterCount): dfilter = 'http.request.method >= "HEAD"' checkDFilterCount(dfilter, 1) def test_ge_3(self, checkDFilterCount): dfilter = 'http.request.method >= "HEAE"' checkDFilterCount(dfilter, 0) def test_lt_1(self, checkDFilterCount): dfilter = 'http.request.method < "HEAC"' checkDFilterCount(dfilter, 0) def test_lt_2(self, checkDFilterCount): dfilter = 'http.request.method < "HEAD"' checkDFilterCount(dfilter, 0) def test_lt_3(self, checkDFilterCount): dfilter = 'http.request.method < "HEAE"' checkDFilterCount(dfilter, 1) def test_le_1(self, checkDFilterCount): dfilter = 'http.request.method <= "HEAC"' checkDFilterCount(dfilter, 0) def test_le_2(self, checkDFilterCount): dfilter = 'http.request.method <= "HEAD"' checkDFilterCount(dfilter, 1) def test_le_3(self, checkDFilterCount): dfilter = 'http.request.method <= "HEAE"' checkDFilterCount(dfilter, 1) def test_slice_1(self, checkDFilterCount): dfilter = 'http.request.method[0] == "H"' checkDFilterCount(dfilter, 1) def test_slice_2(self, checkDFilterCount): dfilter = 'http.request.method[0] == "P"' checkDFilterCount(dfilter, 0) def test_slice_3(self, checkDFilterCount): dfilter = 'http.request.method[0:4] == "HEAD"' checkDFilterCount(dfilter, 1) def test_slice_4(self, checkDFilterCount): dfilter = 'http.request.method[0:4] != "HEAD"' checkDFilterCount(dfilter, 0) def test_slice_5(self, checkDFilterCount): dfilter = 'http.request.method[1:2] == "EA"' checkDFilterCount(dfilter, 1) def test_slice_6(self, checkDFilterCount): dfilter = 'http.request.method[1:2] > "EA"' checkDFilterCount(dfilter, 0) def test_slice_7(self, checkDFilterCount): dfilter = 'http.request.method[-1] == "D"' checkDFilterCount(dfilter, 1) def test_slice_8(self, checkDFilterCount): dfilter = 'http.request.method[-2] == "D"' checkDFilterCount(dfilter, 0) def xxxtest_stringz_1(self): return self.DFilterCount(pkt_tftp, 'tftp.type == "octet"', 1) def xxxtest_stringz_2(self): return self.DFilterCount(pkt_tftp, 'tftp.type == "junk"', 0) def test_contains_1(self, checkDFilterCount): dfilter = 'http.request.method contains "E"' checkDFilterCount(dfilter, 1) def test_contains_2(self, checkDFilterCount): dfilter = 'http.request.method contains "EA"' checkDFilterCount(dfilter, 1) def test_contains_3(self, checkDFilterCount): dfilter = 'http.request.method contains "HEAD"' checkDFilterCount(dfilter, 1) def test_contains_4(self, checkDFilterCount): dfilter = 'http.request.method contains "POST"' checkDFilterCount(dfilter, 0) def test_contains_5(self, checkDFilterCount): dfilter = 'http.request.method contains "\x50\x4f\x53\x54"' # "POST" checkDFilterCount(dfilter, 0) def test_contains_6(self, checkDFilterCount): dfilter = 'http.request.method contains "\x48\x45\x41\x44"' # "HEAD" checkDFilterCount(dfilter, 1) def test_contains_7(self, checkDFilterCount): dfilter = 'http.request.method contains 48:45:41:44' # "48:45:41:44" checkDFilterCount(dfilter, 0) def test_contains_fail_0(self, checkDFilterCount): dfilter = 'http.user_agent contains "update"' checkDFilterCount(dfilter, 0) def test_contains_fail_1(self, checkDFilterCount): dfilter = 'http.user_agent contains "UPDATE"' checkDFilterCount(dfilter, 0) def test_contains_upper_0(self, checkDFilterCount): dfilter = 'upper(http.user_agent) contains "UPDATE"' checkDFilterCount(dfilter, 1) def test_contains_upper_1(self, checkDFilterCount): dfilter = 'upper(http.user_agent) contains "update"' checkDFilterCount(dfilter, 0) def test_contains_upper_2(self, checkDFilterFail): dfilter = 'upper(tcp.seq) == 4' checkDFilterFail(dfilter, 'Only string type fields can be used') def test_contains_lower_0(self, checkDFilterCount): dfilter = 'lower(http.user_agent) contains "UPDATE"' checkDFilterCount(dfilter, 0) def test_contains_lower_1(self, checkDFilterCount): dfilter = 'lower(http.user_agent) contains "update"' checkDFilterCount(dfilter, 1) def test_eq_lower_1(self, checkDFilterFail): dfilter = 'lower(tcp.seq) == 4' checkDFilterFail(dfilter, 'Only string type fields can be used') def test_string_len(self, checkDFilterCount): dfilter = 'len(http.request.method) == 4' checkDFilterCount(dfilter, 1) def test_eq_unicode(self, checkDFilterCount): dfilter = 'tcp.flags.str == "·······AP···"' checkDFilterCount(dfilter, 1) def test_contains_unicode(self, checkDFilterCount): dfilter = 'tcp.flags.str contains "·······AP···"' checkDFilterCount(dfilter, 1) def test_value_string_1(self, checkDFilterCount): dfilter = 'tcp.checksum.status == "Unverified" || tcp.checksum.status == "Good"' checkDFilterCount(dfilter, 1) class TestDfilterStringz: trace_file = "tftp.pcap" def test_stringz_1(self, checkDFilterCount): dfilter = 'tftp.type == octet' checkDFilterCount(dfilter, 1) def test_stringz_2(self, checkDFilterCount): dfilter = 'tftp.type == "octet"' checkDFilterCount(dfilter, 1) def test_stringz_3(self, checkDFilterCount): dfilter = 'tftp.type == junk' checkDFilterCount(dfilter, 0) class TestDfilterStringIndex: trace_file = "data-utf8.pcap" def test_index_1(self, checkDFilterCount): dfilter = 'data.text[3] == "á"' prefs = "data.show_as_text:true" checkDFilterCount(dfilter, 1, prefs) def test_index_2(self, checkDFilterCount): dfilter = 'data.text[3] == "a"' prefs = "data.show_as_text:true" checkDFilterCount(dfilter, 0, prefs) def test_index_3(self, checkDFilterCount): dfilter = 'data.text[40:] == "cão preguiçoso"' prefs = "data.show_as_text:true" checkDFilterCount(dfilter, 1, prefs) def test_index_4(self, checkDFilterCount): # Byte offset dfilter = '@data.text[41:] == "cão preguiçoso"' prefs = "data.show_as_text:true" checkDFilterCount(dfilter, 1, prefs) def test_index_5(self, checkDFilterCount): # Byte offset dfilter = '@data.text[41:] == 63:c3:a3:6f:20:70:72:65:67:75:69:c3:a7:6f:73:6f' prefs = "data.show_as_text:true" checkDFilterCount(dfilter, 1, prefs) def test_strlen_1(self, checkDFilterCount): dfilter = 'len(data.text) == 54' prefs = "data.show_as_text:true" checkDFilterCount(dfilter, 1, prefs) def test_strlen_2(self, checkDFilterCount): # Byte length dfilter = 'len(@data.text) == 57' prefs = "data.show_as_text:true" checkDFilterCount(dfilter, 1, prefs)
Python
wireshark/test/suite_dfilter/group_syntax.py
# Copyright (c) 2013 by Gilbert Ramirez <[email protected]> # # SPDX-License-Identifier: GPL-2.0-or-later import pytest from suite_dfilter.dfiltertest import * class TestDfilterSyntax: trace_file = "http.pcap" def test_exists_1(self, checkDFilterCount): dfilter = "frame" checkDFilterCount(dfilter, 1) def test_exists_2(self, checkDFilterCount): # Identifier using minus dfilter = "mac-lte" checkDFilterCount(dfilter, 0) def test_commute_1(self, checkDFilterCount): dfilter = "ip.proto == 6" checkDFilterCount(dfilter, 1) def test_commute_2(self, checkDFilterCount): dfilter = "6 == ip.proto" checkDFilterCount(dfilter, 1) def test_commute_3(self, checkDFilterFail): dfilter = "6 == 7" error = "Constant expression is invalid" checkDFilterFail(dfilter, error) def test_func_1(self, checkDFilterCount): dfilter = "len(frame) == 207" checkDFilterCount(dfilter, 1) def test_value_string_1(self, checkDFilterSucceed): dfilter = 'eth.fcs.status=="Bad"' checkDFilterSucceed(dfilter) def test_matches_1(self, checkDFilterSucceed): dfilter = 'http.request.method matches "^HEAD"' checkDFilterSucceed(dfilter) def test_matches_2(self, checkDFilterFail): dfilter = 'http.request.method matches HEAD' checkDFilterFail(dfilter, 'requires a double quoted string') def test_matches_3(self, checkDFilterFail): dfilter = 'http.request.method matches "^HEAD" matches "^POST"' checkDFilterFail(dfilter, '"matches" was unexpected in this context.') def test_matches_4(self, checkDFilterCount): dfilter = r'http.host matches r"update\.microsoft\.c.."' checkDFilterCount(dfilter, 1) def test_matches_5(self, checkDFilterSucceed): # case insensitive dfilter = 'http.request.method matches "^head"' checkDFilterSucceed(dfilter) def test_equal_1(self, checkDFilterCount): dfilter = 'ip.addr == 10.0.0.5' checkDFilterCount(dfilter, 1) def test_equal_2(self, checkDFilterCount): dfilter = 'ip.addr == 207.46.134.94' checkDFilterCount(dfilter, 1) def test_equal_3(self, checkDFilterCount): dfilter = 'ip.addr == 10.0.0.5 or ip.addr == 207.46.134.94' checkDFilterCount(dfilter, 1) def test_equal_4(self, checkDFilterCount): dfilter = 'ip.addr == 10.0.0.5 and ip.addr == 207.46.134.94' checkDFilterCount(dfilter, 1) def test_not_equal_1(self, checkDFilterCount): dfilter = 'ip.addr != 10.0.0.5' checkDFilterCount(dfilter, 0) def test_not_equal_2(self, checkDFilterCount): dfilter = 'ip.addr != 207.46.134.94' checkDFilterCount(dfilter, 0) def test_not_equal_3(self, checkDFilterCount): dfilter = 'ip.addr != 10.0.0.5 and ip.addr != 207.46.134.94' checkDFilterCount(dfilter, 0) def test_not_equal_4(self, checkDFilterCount): dfilter = 'ip.addr != 10.0.0.5 or ip.addr != 207.46.134.94' checkDFilterCount(dfilter, 0) def test_deprecated_1(self, checkDFilterSucceed): dfilter = "bootp" checkDFilterSucceed(dfilter, "Deprecated token \"bootp\"") def test_charconst_bytes_1(self, checkDFilterCount): # Bytes as a character constant. dfilter = "frame contains 'H'" checkDFilterCount(dfilter, 1) def test_charconst_bytes_2(self, checkDFilterCount): dfilter = "frame[54] == 'H'" checkDFilterCount(dfilter, 1) def test_charconst_invalid(self, checkDFilterFail): dfilter = r"ip.proto == '\Z'" checkDFilterFail(dfilter, "isn't a valid character constant") def test_bool_1(self, checkDFilterCount): dfilter = "tcp.flags.push == 1" checkDFilterCount(dfilter, 1) def test_bool_2(self, checkDFilterCount): dfilter = "tcp.flags.push == True" checkDFilterCount(dfilter, 1) def test_bool_2(self, checkDFilterCount): dfilter = "tcp.flags.push == FALSE" checkDFilterCount(dfilter, 0) def test_misc_1(self, checkDFilterSucceed): # Issue #18418 dfilter = "icmp and ((icmp.type > 0 and icmp.type < 8) or icmp.type > 8)" checkDFilterSucceed(dfilter) def test_whitespace(self, checkDFilterSucceed): dfilter = '\ttcp.stream \r\n== 1' checkDFilterSucceed(dfilter) def test_func_name_clash1(self, checkDFilterFail): # "tcp" is a (non-existent) function, not a protocol error = "Function 'tcp' does not exist" dfilter = 'frame == tcp()' checkDFilterFail(dfilter, error) class TestDfilterEquality: trace_file = "sip.pcapng" def test_all_eq_1(self, checkDFilterCount): dfilter = "udp.port === 5060" checkDFilterCount(dfilter, 2) def test_any_ne_1(self, checkDFilterCount): dfilter = "udp.port !== 5060" checkDFilterCount(dfilter, 4) def test_any_eq_1(self, checkDFilterCount): dfilter = "udp.port == 5060" checkDFilterCount(dfilter, 5) def test_all_ne_1(self, checkDFilterCount): dfilter = "udp.port != 5060" checkDFilterCount(dfilter, 1) def test_root_1(self, checkDFilterCount): dfilter = "udp.srcport == .udp.dstport" checkDFilterCount(dfilter, 2) def test_literal_3(self, checkDFilterCount): dfilter = "frame[0:10] contains :00:01:6c" checkDFilterCount(dfilter, 1) def test_literal_4(self, checkDFilterCount): dfilter = "frame[0:10] contains :00016c" checkDFilterCount(dfilter, 1) def test_literal_5(self, checkDFilterCount): dfilter = "frame[0:10] contains :00.01.6c" checkDFilterCount(dfilter, 1) def test_literal_6(self, checkDFilterCount): dfilter = "frame[0:10] contains :00-01-6c" checkDFilterCount(dfilter, 1) def test_rhs_bias_1(self, checkDFilterCount): # Protocol "Fibre Channel" on the RHS dfilter = 'frame[37] == fc' checkDFilterCount(dfilter, 0) def test_rhs_bias_2(self, checkDFilterCount): # Byte 0xFC on the RHS dfilter = 'frame[37] == :fc' checkDFilterCount(dfilter, 1) def test_rhs_bias_3(self, checkDFilterCount): # Byte 0xFC on the RHS dfilter = 'frame[37] == fc:' checkDFilterCount(dfilter, 1) def test_rhs_bias_4(self, checkDFilterCount): # Protocol "Fibre Channel" on the RHS dfilter = 'frame[37] == .fc' checkDFilterCount(dfilter, 0) def test_rhs_bias_5(self, checkDFilterSucceed): # Protocol "Fibre Channel" on the RHS (with warning) dfilter = 'frame contains fc' checkDFilterSucceed(dfilter, 'Interpreting "fc" as Fibre Channel') def test_rhs_bias_6(self, checkDFilterSucceed): # Protocol "Fibre Channel" on the RHS (without warning) dfilter = 'frame contains .fc' checkDFilterSucceed(dfilter) def test_rhs_bias_7(self, checkDFilterSucceed): # Byte 0xFC on the RHS dfilter = 'frame contains fc:' checkDFilterSucceed(dfilter) class TestDfilterBitwise: trace_file = "http.pcap" def test_exists_1(self, checkDFilterCount): dfilter = "tcp.flags & 0x8" checkDFilterCount(dfilter, 1) def test_exists_2(self, checkDFilterCount): dfilter = "eth[0] & 1" checkDFilterCount(dfilter, 0) def test_equal_1(self, checkDFilterCount): dfilter = "tcp.flags & 0x0F == 8" checkDFilterCount(dfilter, 1) def test_equal_2(self, checkDFilterCount): dfilter = "tcp.srcport != tcp.dstport & 0x0F" checkDFilterCount(dfilter, 1) class TestDfilterUnaryMinus: trace_file = "http.pcap" def test_minus_const_1(self, checkDFilterCount): dfilter = "tcp.window_size_scalefactor == -1" checkDFilterCount(dfilter, 1) def test_minus_const_2(self, checkDFilterCount): dfilter = "tcp.window_size_scalefactor == -2" checkDFilterCount(dfilter, 0) def test_plus_const_1(self, checkDFilterCount): dfilter = "tcp.window_size_scalefactor == +1" checkDFilterCount(dfilter, 0) def test_unary_1(self, checkDFilterCount): dfilter = "tcp.window_size_scalefactor == -tcp.dstport" checkDFilterCount(dfilter, 0) def test_unary_2(self, checkDFilterCount): dfilter = "tcp.window_size_scalefactor == +tcp.dstport" checkDFilterCount(dfilter, 0) def test_unary_3(self, checkDFilterFail): error = 'Constant expression is invalid on the LHS' dfilter = "-2 == tcp.dstport" checkDFilterFail(dfilter, error) def test_unary_4(self, checkDFilterCount): dfilter = "tcp.window_size_scalefactor == -{tcp.dstport * 20}" checkDFilterCount(dfilter, 0) def test_unary_invalid_1(self, checkDFilterFail): error = 'FT_PROTOCOL cannot be negated' dfilter = "-tcp" checkDFilterFail(dfilter, error) class TestDfilterArithmetic: trace_file = "dhcp.pcap" def test_add_1(self, checkDFilterCount): dfilter = "udp.dstport == udp.srcport + 1" checkDFilterCount(dfilter, 2) def test_add_2(self, checkDFilterCount): dfilter = "udp.dstport == 66 + 1" checkDFilterCount(dfilter, 2) def test_add_3(self, checkDFilterCount): dfilter = "udp.dstport == 66+1" checkDFilterCount(dfilter, 2) def test_add_4(self, checkDFilterFail): error = 'Unknown type for left side of +' dfilter = "1 + 2 == frame.number" checkDFilterFail(dfilter, error) def test_add_5(self, checkDFilterFail): error = 'Unknown type for left side of +' dfilter = "1 + 2 == 2 + 1" checkDFilterFail(dfilter, error) def test_add_6(self, checkDFilterFail): error = 'Unknown type for left side of -' dfilter = "1 - 2" checkDFilterFail(dfilter, error) def test_sub_1(self, checkDFilterCount): dfilter = "udp.srcport == udp.dstport - 1" checkDFilterCount(dfilter, 2) def test_sub_2(self, checkDFilterCount): dfilter = "udp.dstport == 68 - 1" checkDFilterCount(dfilter, 2) def test_sub_3(self, checkDFilterCount): dfilter = "udp.dstport == 68-1" checkDFilterCount(dfilter, 2) def test_sub_4(self, checkDFilterCount): dfilter = "udp.length == ip.len - 20" checkDFilterCount(dfilter, 4) def test_expr_1(self, checkDFilterCount): dfilter = 'udp.port * { 10 / {5 - 4} } == udp.port * { {50 + 50} / 2 - 40 }' checkDFilterCount(dfilter, 4) def test_expr_2(self, checkDFilterCount): dfilter = 'udp.dstport * { udp.srcport / {5 - 4} } == udp.srcport * { 2 * udp.dstport - 68 }' checkDFilterCount(dfilter, 2) class TestDfilterFieldReference: trace_file = "ipoipoip.pcap" def test_ref_1(self, checkDFilterCountWithSelectedFrame): dfilter = 'frame.number < ${frame.number}' # select frame 2, expect 1 frames out of 2. checkDFilterCountWithSelectedFrame(dfilter, 1, 2) def test_ref_2(self, checkDFilterCountWithSelectedFrame): dfilter = 'ip.src#3 == ${ip.src#4}' # select frame 1, expect 1 frames out of 2. checkDFilterCountWithSelectedFrame(dfilter, 1, 1) class TestDfilterLayer: trace_file = "ipoipoip.pcap" def test_layer_1(self, checkDFilterCount): dfilter = 'ip.addr#2 == 4.4.4.4' checkDFilterCount(dfilter, 1) def test_layer_2(self, checkDFilterCount): dfilter = 'ip.addr#5' checkDFilterCount(dfilter, 1) def test_layer_3(self, checkDFilterCount): dfilter = 'ip.addr#6' checkDFilterCount(dfilter, 0) def test_layer_4(self, checkDFilterCount): dfilter = 'ip.dst#[2-4] == 8.8.8.8' checkDFilterCount(dfilter, 1) def test_layer_5(self, checkDFilterCount): dfilter = 'ip.dst#[-1] == 8.8.8.8' checkDFilterCount(dfilter, 0) def test_layer_6(self, checkDFilterCount): dfilter = 'ip.dst#[-1] == 9.9.9.9' checkDFilterCount(dfilter, 1) def test_layer_7(self, checkDFilterCount): dfilter = 'ip.dst#[-5] == 2.2.2.2' checkDFilterCount(dfilter, 1) class TestDfilterQuantifiers: trace_file = "ipoipoip.pcap" def test_any_1(self, checkDFilterCount): dfilter = 'any ip.addr > 1.1.1.1' checkDFilterCount(dfilter, 2) def test_all_1(self, checkDFilterCount): dfilter = 'all ip.addr > 1.1.1.1' checkDFilterCount(dfilter, 1) class TestDfilterRawModifier: trace_file = "s7comm-fuzz.pcapng.gz" def test_regular(self, checkDFilterCount): dfilter = 's7comm.blockinfo.blocktype == "0\uFFFD"' checkDFilterCount(dfilter, 3) def test_raw1(self, checkDFilterCount): dfilter = '@s7comm.blockinfo.blocktype == 30:aa' checkDFilterCount(dfilter, 2) def test_raw2(self, checkDFilterCount): dfilter = '@s7comm.blockinfo.blocktype == 30:fe' checkDFilterCount(dfilter, 1) def test_raw_ref(self, checkDFilterCountWithSelectedFrame): dfilter = '@s7comm.blockinfo.blocktype == ${@s7comm.blockinfo.blocktype}' # select frame 3, expect 2 frames out of 3. checkDFilterCountWithSelectedFrame(dfilter, 2, 3) class TestDfilterRawSlice: trace_file = "http.pcap" def test_raw_slice1(self, checkDFilterFail): dfilter = 'tcp.port[1] == 0xc3' checkDFilterFail(dfilter, "cannot be sliced") def test_raw_slice2(self, checkDFilterCount): dfilter = '@tcp.port[1] == 0xc3' checkDFilterCount(dfilter, 1) def test_raw_slice3(self, checkDFilterFail): dfilter = 'tcp.port[0:] == 0c:c3' checkDFilterFail(dfilter, "cannot be sliced") def test_raw_slice4(self, checkDFilterCount): dfilter = '@tcp.port[0:] == 0c:c3' checkDFilterCount(dfilter, 1) class TestDfilterXor: trace_file = "ipoipoip.pcap" def test_xor_1(self, checkDFilterCount): dfilter = 'ip.src == 7.7.7.7 xor ip.dst == 7.7.7.7' checkDFilterCount(dfilter, 1) def test_xor_2(self, checkDFilterCount): dfilter = 'ip.src == 7.7.7.7 ^^ ip.dst == 7.7.7.7' checkDFilterCount(dfilter, 1) def test_xor_3(self, checkDFilterCount): dfilter = 'ip.src == 9.9.9.9 xor ip.dst == 9.9.9.9' checkDFilterCount(dfilter, 0) def test_xor_4(self, checkDFilterCount): dfilter = 'ip.src == 9.9.9.9 ^^ ip.dst == 9.9.9.9' checkDFilterCount(dfilter, 0)
Python
wireshark/test/suite_dfilter/group_time.py
# Copyright (c) 2013 by Gilbert Ramirez <[email protected]> # # SPDX-License-Identifier: GPL-2.0-or-later import pytest from suite_dfilter.dfiltertest import * class TestDfilterTime: trace_file = "http.pcap" def test_eq_1(self, checkDFilterCount): dfilter = 'frame.time == "Dec 31, 2002 13:55:31.3"' checkDFilterCount(dfilter, 1) def test_eq_2(self, checkDFilterCount): dfilter = 'frame.time == "Jan 31, 2002 13:55:31.3"' checkDFilterCount(dfilter, 0) def test_eq_3(self, checkDFilterCount): dfilter = 'frame.time == "2002-12-31 13:55:31.3"' checkDFilterCount(dfilter, 1) def test_eq_4(self, checkDFilterCount): dfilter = 'frame.time == 1041342931.3' checkDFilterCount(dfilter, 1) def test_ne_1(self, checkDFilterCount): dfilter = 'frame.time != "Dec 31, 2002 13:55:31.3"' checkDFilterCount(dfilter, 0) def test_ne_2(self, checkDFilterCount): dfilter = 'frame.time != "Jan 31, 2002 13:55:31.3"' checkDFilterCount(dfilter, 1) def test_gt_1(self, checkDFilterCount): dfilter = 'frame.time > "Dec 31, 2002 13:54:31.3"' checkDFilterCount(dfilter, 1) def test_gt_2(self, checkDFilterCount): dfilter = 'frame.time > "Dec 31, 2002 13:55:31.3"' checkDFilterCount(dfilter, 0) def test_gt_3(self, checkDFilterCount): dfilter = 'frame.time > "Dec 31, 2002 13:56:31.3"' checkDFilterCount(dfilter, 0) def test_ge_1(self, checkDFilterCount): dfilter = 'frame.time >= "Dec 31, 2002 13:54:31.3"' checkDFilterCount(dfilter, 1) def test_ge_2(self, checkDFilterCount): dfilter = 'frame.time >= "Dec 31, 2002 13:55:31.3"' checkDFilterCount(dfilter, 1) def test_ge_3(self, checkDFilterCount): dfilter = 'frame.time >= "Dec 31, 2002 13:56:31.3"' checkDFilterCount(dfilter, 0) def test_lt_1(self, checkDFilterCount): dfilter = 'frame.time < "Dec 31, 2002 13:54:31.3"' checkDFilterCount(dfilter, 0) def test_lt_2(self, checkDFilterCount): dfilter = 'frame.time < "Dec 31, 2002 13:55:31.3"' checkDFilterCount(dfilter, 0) def test_lt_3(self, checkDFilterCount): dfilter = 'frame.time < "Dec 31, 2002 13:56:31.3"' checkDFilterCount(dfilter, 1) def test_le_1(self, checkDFilterCount): dfilter = 'frame.time <= "Dec 31, 2002 13:54:31.3"' checkDFilterCount(dfilter, 0) def test_le_2(self, checkDFilterCount): dfilter = 'frame.time <= "Dec 31, 2002 13:55:31.3"' checkDFilterCount(dfilter, 1) def test_le_3(self, checkDFilterCount): dfilter = 'frame.time <= "Dec 31, 2002 13:56:31.3"' checkDFilterCount(dfilter, 1) def test_utc_time_1(self, checkDFilterCount): dfilter = 'frame.time == "Dec 31, 2002 13:55:31.3 UTC"' checkDFilterCount(dfilter, 1) def test_bad_time_1(self, checkDFilterFail): # This is an error, only UTC timezone can be used dfilter = 'frame.time == "Dec 31, 2002 13:56:31.3 WET"' error = 'Unexpected data after time value' checkDFilterFail(dfilter, error) def test_bad_time_2(self, checkDFilterFail): # Miliseconds can only occur after seconds. dfilter = 'frame.time == "2002-12-31 13:55.3"' error = 'requires a seconds field' checkDFilterFail(dfilter, error) def test_bad_time_3(self, checkDFilterFail): # Reject months in a different locale (mrt is March in nl_NL.UTF-8). dfilter = 'frame.time == "mrt 1, 2000 00:00:00"' error = '"mrt 1, 2000 00:00:00" is not a valid absolute time. Example: "Nov 12, 1999 08:55:44.123" or "2011-07-04 12:34:56"' checkDFilterFail(dfilter, error) class TestDfilterTimeRelative: trace_file = "nfs.pcap" def test_relative_time_1(self, checkDFilterCount): dfilter = "frame.time_delta == 0.7" checkDFilterCount(dfilter, 1) def test_relative_time_2(self, checkDFilterCount): dfilter = "frame.time_delta > 0.7" checkDFilterCount(dfilter, 0) def test_relative_time_3(self, checkDFilterCount): dfilter = "frame.time_delta < 0.7" checkDFilterCount(dfilter, 1)
Python
wireshark/test/suite_dfilter/group_tvb.py
# Copyright (c) 2013 by Gilbert Ramirez <[email protected]> # # SPDX-License-Identifier: GPL-2.0-or-later import pytest from suite_dfilter.dfiltertest import * class TestDfilterProtocol: trace_file = "http.pcap" def test_slice_1(self, checkDFilterCount): dfilter = "ip[0:2] == 45:00" checkDFilterCount(dfilter, 1) def test_slice_2(self, checkDFilterCount): dfilter = "ip[0:2] == 00:00" checkDFilterCount(dfilter, 0) def test_slice_3(self, checkDFilterCount): dfilter = "ip[2:2] == 00:c1" checkDFilterCount(dfilter, 1) def test_contains_1(self, checkDFilterCount): dfilter = "eth contains 6b" checkDFilterCount(dfilter, 1) def test_contains_2(self, checkDFilterCount): dfilter = "eth contains 09:6b:88" checkDFilterCount(dfilter, 1) def test_contains_3(self, checkDFilterCount): dfilter = "eth contains 00:e0:81:00:b0:28:00:09:6b:88:f5:c9:08:00" checkDFilterCount(dfilter, 1) def test_contains_4(self, checkDFilterCount): dfilter = "eth contains ff:ff:ff" checkDFilterCount(dfilter, 0) def test_contains_5(self, checkDFilterCount): dfilter = 'http contains "HEAD"' checkDFilterCount(dfilter, 1) def test_protocol_1(self, checkDFilterSucceed): dfilter = 'frame contains aa.bb.ff' checkDFilterSucceed(dfilter) def test_protocol_2(self, checkDFilterFail): dfilter = 'frame contains aa.bb.hh' checkDFilterFail(dfilter, 'not a valid protocol or protocol field') def test_protocol_3(self, checkDFilterFail): dfilter = 'ip.port == 5' checkDFilterFail(dfilter, '"ip.port" is not a valid protocol or protocol field')
Python
wireshark/test/suite_dissectors/dissectorstest.py
# # Wireshark dissector tests # By Atli Guðmundsson <[email protected]> # # SPDX-License-Identifier: GPL-2.0-or-later import inspect import json import subprocess import pytest class _dissection_validator_real: ''' Collects a set of byte bundles, matching json objects and a protocol name and verifies that a byte bundle converts into the matching json object using the following execution chain: byte bundle -> text2pcap -> tshark <protocol> -> json Note: The idea for this approach came about when it was realized that calling text2pcap and tshark for each byte bundle resulted in unacceptable overhead during execution of the unittests. ''' def __init__(self, protocol, request, cmd_tshark, cmd_text2pcap, result_file, env): self.dissection_list = [] self.protocol = protocol self.cmd_tshark = cmd_tshark self.cmd_text2pcap = cmd_text2pcap self.test_case = request.instance self.result_file = result_file self.env = env def add_dissection(self, byte_list, expected_result, line_no=None): '''Adds a byte bundle and an expected result to the set of byte bundles to verify. byte bundles must be iterable.''' hex_string = ' '.join('{:02x}'.format(ele) for ele in bytes(byte_list)) if line_no is None: caller = inspect.getframeinfo(inspect.stack()[1][0]) line_no = caller.lineno self.dissection_list.append((line_no, hex_string, expected_result)) # Uncomment the following lines to record in a text file all the dissector byte # bundles, in the order they are presented: # # with open("full.txt", 'a') as f: # f.write("0 {}\n".format(hex_string)) # Then use the following command to convert full.txt into a pcap file, # replacing <port> with the default port of your protocol: # # text2pcap -u <port>,<port> full.txt out.pcap def check_dissections(self): '''Processes and verifies all added byte bundles and their expected results. At the end of processing the current set is emptied.''' text_file = self.result_file('txt') pcap_file = self.result_file('pcap') # create our text file of hex encoded messages with open(text_file, 'w') as f: for line_no, hex_string, expected_result in self.dissection_list: f.write("0 {}\n".format(hex_string)) # generate our pcap file by feeding the messages to text2pcap subprocess.check_call(( self.cmd_text2pcap, '-u', '1234,1234', text_file, pcap_file ), env=self.env) # generate our dissection from our pcap file tshark_stdout = subprocess.check_output(( self.cmd_tshark, '-r', pcap_file, '-T', 'json', '-d', 'udp.port==1234,{}'.format(self.protocol), '-J', self.protocol ), encoding='utf-8', env=self.env) dissections = json.loads(tshark_stdout) for (line_no, hex_string, expected_result), dissection in zip(self.dissection_list, dissections): # strip away everything except the protocol result = dissection['_source']['layers'] assert self.protocol in result result = result[self.protocol] # verify that the dissection is as expected assert expected_result == result, \ "expected != result, while dissecting [{}] from line {}.".format(hex_string, line_no) # cleanup for next test self.dissection_list = [] @pytest.fixture def dissection_validator(request, cmd_tshark, cmd_text2pcap, result_file, test_env): def generate_validator(protocol): retval = _dissection_validator_real( protocol, request, cmd_tshark, cmd_text2pcap, result_file, test_env) return retval return generate_validator
Python
wireshark/test/suite_dissectors/group_asterix.py
# # Wireshark ASTERIX dissector tests # By Atli Guðmundsson <[email protected]> # # SPDX-License-Identifier: GPL-2.0-or-later # '''ASTERIX dissector tests''' import inspect import pytest # Wireshark modules from suite_dissectors.dissectorstest import * class TestAsterix: def test_for_asterix(self, dissection_validator): '''Verifies that the asterix dissector is installed and accessible''' tester = dissection_validator('asterix') tester.add_dissection( [0x13, 0x00, 0x03], { "asterix.category": "19", "asterix.length": "3" } ) tester.check_dissections() class _asterix_validator_real: def __init__(self, category, dissection_validator): self.category = category self.validator = dissection_validator("asterix") def add_dissection(self, byte_list, field, expected_message, line_no=None): '''pre-wrap asterix category messages with proper asterix structure''' total_length = len(byte_list) + 3 byte_list = [ self.category, (total_length // 256) % 256, total_length % 256 ] + byte_list expected_result = { "asterix.category": "{}".format(self.category), "asterix.length": "{}".format(total_length), "asterix.message": { "asterix.fspec": "", field: expected_message } } if line_no is None: caller = inspect.getframeinfo(inspect.stack()[1][0]) line_no = caller.lineno self.validator.add_dissection(byte_list, expected_result, line_no) def check_dissections(self): self.validator.check_dissections() @pytest.fixture def asterix_validator(dissection_validator): def generate_asterix_validator(category): retval = _asterix_validator_real(category, dissection_validator) return retval return generate_asterix_validator class _asterix_re_validator_real(_asterix_validator_real): def __init__(self, category, re_byte_list, dissection_validator): super().__init__(category, dissection_validator) self.re_byte_list = re_byte_list def add_re_dissection(self, byte_list, field, expected_message, line_no=None): '''pre-wrap asterix RE messages with proper asterix RE structure''' re_length = len(byte_list) + 1 byte_list = self.re_byte_list + [ re_length % 256 ] + byte_list expected_result = { "asterix.re_field_len": "{}".format(re_length), "asterix.fspec": "", "asterix.{:03}_RE_{}".format(self.category, field): expected_message } if line_no is None: caller = inspect.getframeinfo(inspect.stack()[1][0]) line_no = caller.lineno self.add_dissection(byte_list, "asterix.{:03}_RE".format( self.category), expected_result, line_no) @pytest.fixture def asterix_re_validator(dissection_validator): def generate_re_asterix_validator(category, re_byte_list): retval = _asterix_re_validator_real( category, re_byte_list, dissection_validator) return retval return generate_re_asterix_validator def fspec_local(key, idx, value): result = { "asterix.fspec": "", "asterix.{}".format(key): { "asterix.{}_{}".format(key, idx): value } } return result def fspec_global(key, idx, value): result = { "asterix.fspec": "", "asterix.{}".format(key): { "asterix.{}".format(idx): value } } return result def dict_local(vmap, key, idx, value): result = vmap.copy() result["asterix.{}_{}".format(key, idx)] = value return result def dict_global(vmap, key, value): result = vmap.copy() result["asterix.{}".format(key)] = value return result def dict_fspec_local(vmap, key, idx, value): result = { "asterix.fspec": "", "asterix.{}".format(key): dict_local(vmap, key, idx, value) } return result def dict_fspec_global(vmap, key, idx, value): result = { "asterix.fspec": "", "asterix.{}".format(key): dict_global(vmap, idx, value) } return result def counter_local(vmap, counter, key, idx, value): result = { "asterix.fspec": "", "asterix.{}".format(key): { "asterix.counter": counter, "asterix.{}".format(key): dict_local(vmap, key, idx, value) } } return result class TestCategory019: ''' Unittest case for ASTERIX Category 019 Online specification: https://www.eurocontrol.int/publications/cat019-multilateration-system-status-messages-part-18 Part 18 : Category 019 (1.3) Multilateration System Status Messages Standard User Application Profile FRN Data Item Information Length 1 I019/010 Data Source Identifier 2 2 I019/000 Message Type 1 3 I019/140 Time of Day 3 4 I019/550 System Status 1 5 I019/551 Tracking Processor Detailed Status 1 6 I019/552 Remote Sensor Detailed Status 1+ 7 I019/553 Reference Transponder Detailed Status 1+ FX - Field Extension Indicator - 8 I019/600 Position of the MLT System Reference point 8 9 I019/610 Height of the MLT System Reference point 2 10 I019/620 WGS-84 Undulation 1 11 - Spare - 12 - Spare - 13 RE Reserved Expansion Field - 14 SP Special Purpose Field - FX - Field Extension Indicator - ''' def test_for_fields(self, asterix_validator): '''verifies existence of all fields and their maximum value''' validator = asterix_validator(19) validator.add_dissection( [0x80, 0xff, 0x00], "asterix.019_010", { "asterix.019_010_SAC": "0xff", "asterix.019_010_SIC": "0x00" } ) validator.add_dissection( [0x80, 0x00, 0xff], "asterix.019_010", { "asterix.019_010_SAC": "0x00", "asterix.019_010_SIC": "0xff" } ) validator.add_dissection( [0x40, 0x03], "asterix.019_000", { "asterix.019_000_VALUE": "3" } ) validator.add_dissection( [0x20, 0xa8, 0xbf, 0xff], "asterix.019_140", { "asterix.019_140_VALUE": "86399.9921875" } ) validator.add_dissection( [0x10, 0xc0], "asterix.019_550", { "asterix.019_550_NOGO": "3", "asterix.019_550_OVL": "0", "asterix.019_550_TSV": "0", "asterix.019_550_TTF": "0" } ) validator.add_dissection( [0x10, 0x20], "asterix.019_550", { "asterix.019_550_NOGO": "0", "asterix.019_550_OVL": "1", "asterix.019_550_TSV": "0", "asterix.019_550_TTF": "0" } ) validator.add_dissection( [0x10, 0x10], "asterix.019_550", { "asterix.019_550_NOGO": "0", "asterix.019_550_OVL": "0", "asterix.019_550_TSV": "1", "asterix.019_550_TTF": "0" } ) validator.add_dissection( [0x10, 0x08], "asterix.019_550", { "asterix.019_550_NOGO": "0", "asterix.019_550_OVL": "0", "asterix.019_550_TSV": "0", "asterix.019_550_TTF": "1" } ) validator.add_dissection( [0x08, 0x80], "asterix.019_551", { "asterix.019_551_TP1A": "1", "asterix.019_551_TP1B": "0", "asterix.019_551_TP2A": "0", "asterix.019_551_TP2B": "0", "asterix.019_551_TP3A": "0", "asterix.019_551_TP3B": "0", "asterix.019_551_TP4A": "0", "asterix.019_551_TP4B": "0" } ) validator.add_dissection( [0x08, 0x40], "asterix.019_551", { "asterix.019_551_TP1A": "0", "asterix.019_551_TP1B": "1", "asterix.019_551_TP2A": "0", "asterix.019_551_TP2B": "0", "asterix.019_551_TP3A": "0", "asterix.019_551_TP3B": "0", "asterix.019_551_TP4A": "0", "asterix.019_551_TP4B": "0" } ) validator.add_dissection( [0x08, 0x20], "asterix.019_551", { "asterix.019_551_TP1A": "0", "asterix.019_551_TP1B": "0", "asterix.019_551_TP2A": "1", "asterix.019_551_TP2B": "0", "asterix.019_551_TP3A": "0", "asterix.019_551_TP3B": "0", "asterix.019_551_TP4A": "0", "asterix.019_551_TP4B": "0" } ) validator.add_dissection( [0x08, 0x10], "asterix.019_551", { "asterix.019_551_TP1A": "0", "asterix.019_551_TP1B": "0", "asterix.019_551_TP2A": "0", "asterix.019_551_TP2B": "1", "asterix.019_551_TP3A": "0", "asterix.019_551_TP3B": "0", "asterix.019_551_TP4A": "0", "asterix.019_551_TP4B": "0" } ) validator.add_dissection( [0x08, 0x08], "asterix.019_551", { "asterix.019_551_TP1A": "0", "asterix.019_551_TP1B": "0", "asterix.019_551_TP2A": "0", "asterix.019_551_TP2B": "0", "asterix.019_551_TP3A": "1", "asterix.019_551_TP3B": "0", "asterix.019_551_TP4A": "0", "asterix.019_551_TP4B": "0" } ) validator.add_dissection( [0x08, 0x04], "asterix.019_551", { "asterix.019_551_TP1A": "0", "asterix.019_551_TP1B": "0", "asterix.019_551_TP2A": "0", "asterix.019_551_TP2B": "0", "asterix.019_551_TP3A": "0", "asterix.019_551_TP3B": "1", "asterix.019_551_TP4A": "0", "asterix.019_551_TP4B": "0" } ) validator.add_dissection( [0x08, 0x02], "asterix.019_551", { "asterix.019_551_TP1A": "0", "asterix.019_551_TP1B": "0", "asterix.019_551_TP2A": "0", "asterix.019_551_TP2B": "0", "asterix.019_551_TP3A": "0", "asterix.019_551_TP3B": "0", "asterix.019_551_TP4A": "1", "asterix.019_551_TP4B": "0" } ) validator.add_dissection( [0x08, 0x01], "asterix.019_551", { "asterix.019_551_TP1A": "0", "asterix.019_551_TP1B": "0", "asterix.019_551_TP2A": "0", "asterix.019_551_TP2B": "0", "asterix.019_551_TP3A": "0", "asterix.019_551_TP3B": "0", "asterix.019_551_TP4A": "0", "asterix.019_551_TP4B": "1" } ) validator.add_dissection( [0x04, 0x00], "asterix.019_552", { "asterix.counter": "0" } ) validator.add_dissection( [0x04, 0x01, 0xff, 0x00], "asterix.019_552", { "asterix.counter": "1", "asterix.019_552": { "asterix.019_552_RSI": "0xff", "asterix.019_552_RS1090": "0", "asterix.019_552_TX1030": "0", "asterix.019_552_TX1090": "0", "asterix.019_552_RSS": "0", "asterix.019_552_RSO": "0" } } ) validator.add_dissection( [0x04, 0x01, 0x00, 0x40], "asterix.019_552", { "asterix.counter": "1", "asterix.019_552": { "asterix.019_552_RSI": "0x00", "asterix.019_552_RS1090": "1", "asterix.019_552_TX1030": "0", "asterix.019_552_TX1090": "0", "asterix.019_552_RSS": "0", "asterix.019_552_RSO": "0" } } ) validator.add_dissection( [0x04, 0x01, 0x00, 0x20], "asterix.019_552", { "asterix.counter": "1", "asterix.019_552": { "asterix.019_552_RSI": "0x00", "asterix.019_552_RS1090": "0", "asterix.019_552_TX1030": "1", "asterix.019_552_TX1090": "0", "asterix.019_552_RSS": "0", "asterix.019_552_RSO": "0" } } ) validator.add_dissection( [0x04, 0x01, 0x00, 0x10], "asterix.019_552", { "asterix.counter": "1", "asterix.019_552": { "asterix.019_552_RSI": "0x00", "asterix.019_552_RS1090": "0", "asterix.019_552_TX1030": "0", "asterix.019_552_TX1090": "1", "asterix.019_552_RSS": "0", "asterix.019_552_RSO": "0" } } ) validator.add_dissection( [0x04, 0x01, 0x00, 0x08], "asterix.019_552", { "asterix.counter": "1", "asterix.019_552": { "asterix.019_552_RSI": "0x00", "asterix.019_552_RS1090": "0", "asterix.019_552_TX1030": "0", "asterix.019_552_TX1090": "0", "asterix.019_552_RSS": "1", "asterix.019_552_RSO": "0" } } ) validator.add_dissection( [0x04, 0x01, 0x00, 0x04], "asterix.019_552", { "asterix.counter": "1", "asterix.019_552": { "asterix.019_552_RSI": "0x00", "asterix.019_552_RS1090": "0", "asterix.019_552_TX1030": "0", "asterix.019_552_TX1090": "0", "asterix.019_552_RSS": "0", "asterix.019_552_RSO": "1" } } ) validator.add_dissection( [0x04, 0x03, 0x12, 0x34, 0x56, 0x78, 0x9a, 0x0c], "asterix.019_552", { "asterix.counter": "3", "asterix.019_552": { "asterix.019_552_RSI": "18", "asterix.019_552_RS1090": "0", "asterix.019_552_TX1030": "1", "asterix.019_552_TX1090": "1", "asterix.019_552_RSS": "0", "asterix.019_552_RSO": "1" }, "asterix.019_552": { "asterix.019_552_RSI": "86", "asterix.019_552_RS1090": "1", "asterix.019_552_TX1030": "1", "asterix.019_552_TX1090": "1", "asterix.019_552_RSS": "1", "asterix.019_552_RSO": "0" }, "asterix.019_552": { "asterix.019_552_RSI": "0x9a", "asterix.019_552_RS1090": "0", "asterix.019_552_TX1030": "0", "asterix.019_552_TX1090": "0", "asterix.019_552_RSS": "1", "asterix.019_552_RSO": "1" } } ) validator.add_dissection( [0x02, 0xc0], "asterix.019_553", { "asterix.019_553_REFTR1": "3", "asterix.019_553_REFTR2": "0", "asterix.FX": "0" } ) validator.add_dissection( [0x02, 0x0c], "asterix.019_553", { "asterix.019_553_REFTR1": "0", "asterix.019_553_REFTR2": "3", "asterix.FX": "0" } ) '''TODO: check this testcase, it has too many subitems validator.add_dissection( [0x02, 0x01, 0x01, 0x0c], "asterix.019_553", { "asterix.019_553_Ref_Trans_1_Status": "0", "asterix.019_553_Ref_Trans_2_Status": "0", "asterix.019_553_Ref_Trans_3_Status": "0", "asterix.019_553_Ref_Trans_4_Status": "0", "asterix.019_553_Ref_Trans_5_Status": "0", "asterix.019_553_Ref_Trans_6_Status": "3", "asterix.FX": "0" } ) ''' validator.add_dissection( [0x01, 0x80, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], "asterix.019_600", { "asterix.019_600_LAT": "90", "asterix.019_600_LON": "0" } ) validator.add_dissection( [0x01, 0x80, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], "asterix.019_600", { "asterix.019_600_LAT": "-90", "asterix.019_600_LON": "0" } ) validator.add_dissection( [0x01, 0x80, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00], "asterix.019_600", { "asterix.019_600_LAT": "0", "asterix.019_600_LON": "180" } ) validator.add_dissection( [0x01, 0x80, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00], "asterix.019_600", { "asterix.019_600_LAT": "0", "asterix.019_600_LON": "-180" } ) validator.add_dissection( [0x01, 0x40, 0x7f, 0xff], "asterix.019_610", { "asterix.019_610_VALUE": "8191.75" } ) validator.add_dissection( [0x01, 0x40, 0x80, 0x00], "asterix.019_610", { "asterix.019_610_VALUE": "-8192" } ) validator.add_dissection( [0x01, 0x20, 0x7f], "asterix.019_620", { "asterix.019_620_VALUE": "127" } ) validator.add_dissection( [0x01, 0x20, 0x81], "asterix.019_620", { "asterix.019_620_VALUE": "-127" } ) validator.check_dissections() def test_undefined_value_handling(self, asterix_validator): '''verifies that the dissector can dissect undefined field values by setting the maximum value of bits or by setting all undefined bits''' validator = asterix_validator(19) validator.add_dissection( [0x40, 0xff], "asterix.019_000", { "asterix.019_000_VALUE": "255" } ) validator.add_dissection( [0x20, 0xff, 0xff, 0xff], "asterix.019_140", { "asterix.019_140_VALUE": "131071.9921875" } ) validator.add_dissection( [0x10, 0x07], "asterix.019_550", { "asterix.019_550_NOGO": "0", "asterix.019_550_OVL": "0", "asterix.019_550_TSV": "0", "asterix.019_550_TTF": "0" } ) validator.add_dissection( [0x04, 0x01, 0x00, 0x83], "asterix.019_552", { "asterix.counter": "1", "asterix.019_552": { "asterix.019_552_RSI": "0x00", "asterix.019_552_RS1090": "0", "asterix.019_552_TX1030": "0", "asterix.019_552_TX1090": "0", "asterix.019_552_RSS": "0", "asterix.019_552_RSO": "0" } } ) validator.add_dissection( [0x02, 0x32], "asterix.019_553", { "asterix.019_553_REFTR1": "0", "asterix.019_553_REFTR2": "0", "asterix.FX": "0" } ) '''TODO: check this testcase, it has too many subitems validator.add_dissection( [0x02, 0x33, 0x33, 0x32], "asterix.019_553", { "asterix.019_553_REFTR1": "0", "asterix.019_553_REFTR2": "0", "asterix.019_553_REFTR3": "0", "asterix.019_553_REFTR4": "0", "asterix.019_553_Ref_Trans_5_Status": "0", "asterix.019_553_Ref_Trans_6_Status": "0", "asterix.FX": "0" } ) ''' validator.add_dissection( [0x01, 0x80, 0x7f, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00], "asterix.019_600", { "asterix.019_600_LAT": "359.999999832362", "asterix.019_600_LON": "0" } ) validator.add_dissection( [0x01, 0x80, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], "asterix.019_600", { "asterix.019_600_LAT": "-360", "asterix.019_600_LON": "0" } ) validator.add_dissection( [0x01, 0x80, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xff, 0xff, 0xff], "asterix.019_600", { "asterix.019_600_LAT": "0", "asterix.019_600_LON": "359.999999832362" } ) validator.add_dissection( [0x01, 0x80, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00], "asterix.019_600", { "asterix.019_600_LAT": "0", "asterix.019_600_LON": "-360" } ) validator.add_dissection( [0x01, 0x20, 0x80], "asterix.019_620", { "asterix.019_620_VALUE": "-128" } ) validator.add_dissection( [0x01, 0x10], "asterix.spare", "" ) validator.add_dissection( [0x01, 0x08], "asterix.spare", "" ) '''TODO: re-enable RE and SP tests when implemented validator.add_dissection( [0x01, 0x04, 0x02, 0x00], "asterix.019_RE", { "asterix.re_field_len": "2", "asterix.fspec": "" } ) validator.add_dissection( [0x01, 0x04, 0x10, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff], "asterix.019_RE", { "asterix.fspec": "", "asterix.re_field_len": "16" } ) validator.add_dissection( [0x01, 0x02, 0x01], "asterix.019_SP", "" ) validator.add_dissection( [0x01, 0x02, 0x10, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff], "asterix.019_SP", "" ) ''' validator.check_dissections() class TestCategory034: ''' Unittest case for ASTERIX Category 034 Online specification: https://www.eurocontrol.int/publications/cat034-monoradar-service-messages-part-2b-next-version-cat-002 Part 2b Transmission of Monoradar Service Messages Standard User Application Profile FRN Data Item Information Length 1 I034/010 Data Source Identifier 2 2 I034/000 Message Type 1 3 I034/030 Time-of-Day 3 4 I034/020 Sector Number 1 5 I034/041 Antenna Rotation Period 2 6 I034/050 System Configuration and Status 1+ 7 I034/060 System Processing Mode 1+ FX N/A. Field Extension Indicator N/A. 8 I034/070 Message Count Values (1+2*N) 9 I034/100 Generic Polar Window 8 10 I034/110 Data Filter 1 11 I034/120 3D-Position of Data Source 8 12 I034/090 Collimation Error 2 13 RE-Data Item Reserved Expansion Field 1+1+ 14 SP-Data Item Special Purpose Field 1+1+ FX N/A. Field Extension Indicator n.a. ''' def test_for_fields(self, asterix_validator): '''verifies existence of all fields and their maximum value''' validator = asterix_validator(34) validator.add_dissection( [0x80, 0xff, 0x00], "asterix.034_010", { "asterix.034_010_SAC": "0xff", "asterix.034_010_SIC": "0x00" } ) validator.add_dissection( [0x80, 0x00, 0xff], "asterix.034_010", { "asterix.034_010_SAC": "0x00", "asterix.034_010_SIC": "0xff" } ) validator.add_dissection( [0x40, 0x04], "asterix.034_000", { "asterix.034_000_VALUE": "4" } ) validator.add_dissection( [0x20, 0xa8, 0xbf, 0xff], "asterix.034_030", { "asterix.034_030_VALUE": "86399.9921875" } ) validator.add_dissection( [0x10, 0xff], "asterix.034_020", { "asterix.034_020_VALUE": "358.59375" } ) validator.add_dissection( [0x08, 0xff, 0xff], "asterix.034_041", { "asterix.034_041_VALUE": "511.9921875" } ) x_050_COM = { "asterix.034_050_COM_NOGO": "0", "asterix.034_050_COM_RDPC": "0", "asterix.034_050_COM_RDPR": "0", "asterix.034_050_COM_OVLRDP": "0", "asterix.034_050_COM_OVLXMT": "0", "asterix.034_050_COM_MSC": "0", "asterix.034_050_COM_TSV": "0" } validator.add_dissection( [0x04, 0x80, 0x80], "asterix.034_050", dict_fspec_local(x_050_COM, "034_050_COM", "NOGO", "1") ) validator.add_dissection( [0x04, 0x80, 0x40], "asterix.034_050", dict_fspec_local(x_050_COM, "034_050_COM", "RDPC", "1") ) validator.add_dissection( [0x04, 0x80, 0x20], "asterix.034_050", dict_fspec_local(x_050_COM, "034_050_COM", "RDPR", "1") ) validator.add_dissection( [0x04, 0x80, 0x10], "asterix.034_050", dict_fspec_local(x_050_COM, "034_050_COM", "OVLRDP", "1") ) validator.add_dissection( [0x04, 0x80, 0x08], "asterix.034_050", dict_fspec_local(x_050_COM, "034_050_COM", "OVLXMT", "1") ) validator.add_dissection( [0x04, 0x80, 0x04], "asterix.034_050", dict_fspec_local(x_050_COM, "034_050_COM", "MSC", "1") ) validator.add_dissection( [0x04, 0x80, 0x02], "asterix.034_050", dict_fspec_local(x_050_COM, "034_050_COM", "TSV", "1") ) x_050_PSR = { "asterix.034_050_PSR_ANT": "0", "asterix.034_050_PSR_CHAB": "0", "asterix.034_050_PSR_OVL": "0", "asterix.034_050_PSR_MSC": "0" } validator.add_dissection( [0x04, 0x10, 0x80], "asterix.034_050", dict_fspec_local(x_050_PSR, "034_050_PSR", "ANT", "1") ) validator.add_dissection( [0x04, 0x10, 0x60], "asterix.034_050", dict_fspec_local(x_050_PSR, "034_050_PSR", "CHAB", "3") ) validator.add_dissection( [0x04, 0x10, 0x10], "asterix.034_050", dict_fspec_local(x_050_PSR, "034_050_PSR", "OVL", "1") ) validator.add_dissection( [0x04, 0x10, 0x08], "asterix.034_050", dict_fspec_local(x_050_PSR, "034_050_PSR", "MSC", "1") ) x_050_SSR = { "asterix.034_050_SSR_ANT": "0", "asterix.034_050_SSR_CHAB": "0", "asterix.034_050_SSR_OVL": "0", "asterix.034_050_SSR_MSC": "0" } validator.add_dissection( [0x04, 0x08, 0x80], "asterix.034_050", dict_fspec_local(x_050_SSR, "034_050_SSR", "ANT", "1") ) validator.add_dissection( [0x04, 0x08, 0x60], "asterix.034_050", dict_fspec_local(x_050_SSR, "034_050_SSR", "CHAB", "3") ) validator.add_dissection( [0x04, 0x08, 0x10], "asterix.034_050", dict_fspec_local(x_050_SSR, "034_050_SSR", "OVL", "1") ) validator.add_dissection( [0x04, 0x08, 0x08], "asterix.034_050", dict_fspec_local(x_050_SSR, "034_050_SSR", "MSC", "1") ) x_050_MDS = { "asterix.034_050_MDS_ANT": "0", "asterix.034_050_MDS_CHAB": "0", "asterix.034_050_MDS_OVLSUR": "0", "asterix.034_050_MDS_MSC": "0", "asterix.034_050_MDS_SCF": "0", "asterix.034_050_MDS_DLF": "0", "asterix.034_050_MDS_OVLSCF": "0", "asterix.034_050_MDS_OVLDLF": "0" } validator.add_dissection( [0x04, 0x04, 0x80, 0x00], "asterix.034_050", dict_fspec_local(x_050_MDS, "034_050_MDS", "ANT", "1") ) validator.add_dissection( [0x04, 0x04, 0x60, 0x00], "asterix.034_050", dict_fspec_local(x_050_MDS, "034_050_MDS", "CHAB", "3") ) validator.add_dissection( [0x04, 0x04, 0x10, 0x00], "asterix.034_050", dict_fspec_local(x_050_MDS, "034_050_MDS", "OVLSUR", "1") ) validator.add_dissection( [0x04, 0x04, 0x08, 0x00], "asterix.034_050", dict_fspec_local(x_050_MDS, "034_050_MDS", "MSC", "1") ) validator.add_dissection( [0x04, 0x04, 0x04, 0x00], "asterix.034_050", dict_fspec_local(x_050_MDS, "034_050_MDS", "SCF", "1") ) validator.add_dissection( [0x04, 0x04, 0x02, 0x00], "asterix.034_050", dict_fspec_local(x_050_MDS, "034_050_MDS", "DLF", "1") ) validator.add_dissection( [0x04, 0x04, 0x01, 0x00], "asterix.034_050", dict_fspec_local(x_050_MDS, "034_050_MDS", "OVLSCF", "1") ) validator.add_dissection( [0x04, 0x04, 0x00, 0x80], "asterix.034_050", dict_fspec_local(x_050_MDS, "034_050_MDS", "OVLDLF", "1") ) x_060_COM = { "asterix.034_060_COM_REDRDP": "0", "asterix.034_060_COM_REDXMT": "0" } validator.add_dissection( [0x02, 0x80, 0x70], "asterix.034_060", dict_fspec_local(x_060_COM, "034_060_COM", "REDRDP", "7") ) validator.add_dissection( [0x02, 0x80, 0x0e], "asterix.034_060", dict_fspec_local(x_060_COM, "034_060_COM", "REDXMT", "7") ) x_060_PSR = { "asterix.034_060_PSR_POL": "0", "asterix.034_060_PSR_REDRAD": "0", "asterix.034_060_PSR_STC": "0" } validator.add_dissection( [0x02, 0x10, 0x80], "asterix.034_060", dict_fspec_local(x_060_PSR, "034_060_PSR", "POL", "1") ) validator.add_dissection( [0x02, 0x10, 0x70], "asterix.034_060", dict_fspec_local(x_060_PSR, "034_060_PSR", "REDRAD", "7") ) validator.add_dissection( [0x02, 0x10, 0x0c], "asterix.034_060", dict_fspec_local(x_060_PSR, "034_060_PSR", "STC", "3") ) validator.add_dissection( [0x02, 0x08, 0xe0], "asterix.034_060", fspec_local("034_060_SSR", "REDRAD", "7") ) x_060_06 = { "asterix.034_060_MDS_REDRAD": "0", "asterix.034_060_MDS_CLU": "0" } validator.add_dissection( [0x02, 0x04, 0xe0], "asterix.034_060", dict_fspec_local(x_060_06, "034_060_MDS", "REDRAD", "7") ) validator.add_dissection( [0x02, 0x04, 0x10], "asterix.034_060", dict_fspec_local(x_060_06, "034_060_MDS", "CLU", "1") ) x_070 = { "asterix.034_070_TYP": "0", "asterix.034_070_COUNT": "0" } validator.add_dissection( [0x01, 0x80, 0x01, 0x80, 0x00], "asterix.034_070", { "asterix.counter": "1", "asterix.034_070": dict_local(x_070, "034_070", "TYP", "16") } ) validator.add_dissection( [0x01, 0x80, 0x03, 0x80, 0x00, 0x87, 0xff, 0x07, 0xff], "asterix.034_070", { "asterix.counter": "3", "asterix.034_070": dict_local(x_070, "034_070", "TYPE", "16"), "asterix.034_070": { "asterix.034_070_TYP": "16", "asterix.034_070_COUNT": "2047" }, "asterix.034_070": dict_local(x_070, "034_070", "COUNT", "2047"), } ) x_100 = { "asterix.034_100_RHOST": "0", "asterix.034_100_RHOEND": "0", "asterix.034_100_THETAST": "0", "asterix.034_100_THETAEND": "0" } validator.add_dissection( [0x01, 0x40, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], "asterix.034_100", dict_local(x_100, "034_100", "RHOST", "255.99609375") ) validator.add_dissection( [0x01, 0x40, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00], "asterix.034_100", dict_local(x_100, "034_100", "RHOEND", "255.99609375") ) validator.add_dissection( [0x01, 0x40, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00], "asterix.034_100", dict_local(x_100, "034_100", "THETAST", "359.994506835938") ) validator.add_dissection( [0x01, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff], "asterix.034_100", dict_local(x_100, "034_100", "THETAEND", "359.994506835938") ) validator.add_dissection( [0x01, 0x20, 0x09], "asterix.034_110", { "asterix.034_110_VALUE": "9" } ) x_120 = { "asterix.034_120_HGT": "0", "asterix.034_120_LAT": "0", "asterix.034_120_LON": "0" } validator.add_dissection( [0x01, 0x10, 0x7f, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], "asterix.034_120", dict_local(x_120, "034_120", "HGT", "32767") ) validator.add_dissection( [0x01, 0x10, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], "asterix.034_120", dict_local(x_120, "034_120", "HGT", "32768") ) validator.add_dissection( [0x01, 0x10, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00], "asterix.034_120", dict_local(x_120, "034_120", "LAT", "90") ) validator.add_dissection( [0x01, 0x10, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00], "asterix.034_120", dict_local(x_120, "034_120", "LAT", "-90") ) validator.add_dissection( [0x01, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xff, 0xff], "asterix.034_120", dict_local(x_120, "034_120", "LON", "179.999978542328") ) validator.add_dissection( [0x01, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00], "asterix.034_120", dict_local(x_120, "034_120", "LON", "-180") ) '''TODO: re-enable RE and SP tests when implemented x_090 = { "asterix.034_090_RE": "0", "asterix.034_090_AE": "0" } validator.add_dissection( [0x01, 0x08, 0x7f, 0x00], "asterix.034_090", dict_local(x_090, "034_090", "RE", "0.9921875") ) validator.add_dissection( [0x01, 0x08, 0x80, 0x00], "asterix.034_090", dict_local(x_090, "034_090", "RE", "-1") ) validator.add_dissection( [0x01, 0x08, 0x00, 0x80], "asterix.034_090", dict_local(x_090, "034_090", "AE", "-2.8125") ) ''' validator.check_dissections() def test_undefined_value_handling(self, asterix_validator): '''verifies that the dissector can dissect undefined field values by setting the maximum value of bits or by setting all undefined bits''' validator = asterix_validator(34) validator.add_dissection( [0x40, 0xff], "asterix.034_000", { "asterix.034_000_VALUE": "255" } ) validator.add_dissection( [0x20, 0xff, 0xff, 0xff], "asterix.034_030", { "asterix.034_030_VALUE": "131071.9921875" } ) validator.add_dissection( [0x04, 0x63, 0x00], "asterix.034_050", { "asterix.fspec": "", "asterix.spare": "" } ) validator.add_dissection( [0x04, 0x80, 0x01], "asterix.034_050", { "asterix.fspec": "", "asterix.034_050_COM": { "asterix.034_050_COM_NOGO": "0", "asterix.034_050_COM_RDPC": "0", "asterix.034_050_COM_RDPR": "0", "asterix.034_050_COM_OVLRDP": "0", "asterix.034_050_COM_OVLXMT": "0", "asterix.034_050_COM_MSC": "0", "asterix.034_050_COM_TSV": "0" } } ) validator.add_dissection( [0x04, 0x10, 0x07], "asterix.034_050", { "asterix.fspec": "", "asterix.034_050_PSR": { "asterix.034_050_PSR_ANT": "0", "asterix.034_050_PSR_CHAB": "0", "asterix.034_050_PSR_OVL": "0", "asterix.034_050_PSR_MSC": "0" } } ) validator.add_dissection( [0x04, 0x08, 0x07], "asterix.034_050", { "asterix.fspec": "", "asterix.034_050_SSR": { "asterix.034_050_SSR_ANT": "0", "asterix.034_050_SSR_CHAB": "0", "asterix.034_050_SSR_OVL": "0", "asterix.034_050_SSR_MSC": "0" } } ) validator.add_dissection( [0x04, 0x04, 0x00, 0x7f], "asterix.034_050", { "asterix.fspec": "", "asterix.034_050_MDS": { "asterix.034_050_MDS_ANT": "0", "asterix.034_050_MDS_CHAB": "0", "asterix.034_050_MDS_OVLSUR": "0", "asterix.034_050_MDS_MSC": "0", "asterix.034_050_MDS_SCF": "0", "asterix.034_050_MDS_DLF": "0", "asterix.034_050_MDS_OVLSCF": "0", "asterix.034_050_MDS_OVLDLF": "0" } } ) validator.add_dissection( [0x02, 0x63, 0x00], "asterix.034_060", { "asterix.fspec": "", "asterix.spare": "" } ) validator.add_dissection( [0x02, 0x80, 0x81], "asterix.034_060", { "asterix.fspec": "", "asterix.034_060_COM": { "asterix.034_060_COM_REDRDP": "0", "asterix.034_060_COM_REDXMT": "0" } } ) validator.add_dissection( [0x02, 0x10, 0x03], "asterix.034_060", { "asterix.fspec": "", "asterix.034_060_PSR": { "asterix.034_060_PSR_POL": "0", "asterix.034_060_PSR_REDRAD": "0", "asterix.034_060_PSR_STC": "0" } } ) validator.add_dissection( [0x02, 0x08, 0x1f], "asterix.034_060", fspec_local("034_060_SSR", "REDRAD", "0") ) validator.add_dissection( [0x02, 0x04, 0x0f], "asterix.034_060", { "asterix.fspec": "", "asterix.034_060_MDS": { "asterix.034_060_MDS_REDRAD": "0", "asterix.034_060_MDS_CLU": "0" } } ) x_070 = { "asterix.034_070_TYP": "0", "asterix.034_070_COUNT": "0" } validator.add_dissection( [0x01, 0x80, 0x01, 0xf8, 0x00], "asterix.034_070", { "asterix.counter": "1", "asterix.034_070": dict_local(x_070, "034_070", "TYP", "31") } ) validator.add_dissection( [0x01, 0x20, 0xff], "asterix.034_110", { "asterix.034_110_VALUE": "255" } ) '''TODO: re-enable RE and SP tests when implemented validator.add_dissection( [0x01, 0x04, 0x02, 0xfe], "asterix.034_RE", { "asterix.re_field_len": "2", "asterix.fspec": "" } ) validator.add_dissection( [0x01, 0x02, 0x01], "asterix.034_SP", "" ) validator.add_dissection( [0x01, 0x02, 0x11, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff], "asterix.034_SP", "" ) ''' validator.check_dissections() class TestCategory048: ''' Unittest case for ASTERIX Category 048 Online specification: https://www.eurocontrol.int/publications/cat048-monoradar-target-reports-part-4-next-version-cat-001 https://www.eurocontrol.int/publications/cat048-reserved-expansion-field-part-4-appendix Part 4 Category 048 Monoradar Target Reports Standard User Application Profile FRN Data Item Information Length 1 I048/010 Data Source Identifier 2 2 I048/140 Time-of-Day 3 3 I048/020 Target Report Descriptor 1+ 4 I048/040 Measured Position in Slant Polar Coordinates 4 5 I048/070 Mode-3/A Code in Octal Representation 2 6 I048/090 Flight Level in Binary Representation 2 7 I048/130 Radar Plot Characteristics 1+1+ FX n.a. Field Extension Indicator n.a. 8 I048/220 Aircraft Address 3 9 I048/240 Aircraft Identification 6 10 I048/250 Mode S MB Data 1+8*n 11 I048/161 Track Number 2 12 I048/042 Calculated Position in Cartesian Coordinates 4 13 I048/200 Calculated Track Velocity in Polar Representation 4 14 I048/170 Track Status 1+ FX n.a. Field Extension Indicator n.a. 15 I048/210 Track Quality 4 16 I048/030 Warning/Error Conditions 1+ 17 I048/080 Mode-3/A Code Confidence Indicator 2 18 I048/100 Mode-C Code and Confidence Indicator 4 19 I048/110 Height Measured by 3D Radar 2 20 I048/120 Radial Doppler Speed 1+ 21 I048/230 Communications / ACAS Capability and Flight Status 2 FX n.a. Field Extension Indicator n.a. 22 I048/260 ACAS Resolution Advisory Report 7 23 I048/055 Mode-1 Code in Octal Representation 1 24 I048/050 Mode-2 Code in Octal Representation 2 25 I048/065 Mode-1 Code Confidence Indicator 1 26 I048/060 Mode-2 Code Confidence Indicator 2 27 SP-Data Item Special Purpose Field 1+1+ 28 RE-Data Item Reserved Expansion Field 1+1+ FX n.a. Field Extension Indicator n.a. ''' def test_for_fields(self, asterix_re_validator): '''verifies existence of all fields and their maximum value''' validator = asterix_re_validator(48, [0x01, 0x01, 0x01, 0x02]) validator.add_dissection( [0x80, 0xff, 0x00], "asterix.048_010", { "asterix.048_010_SAC": "0xff", "asterix.048_010_SIC": "0x00" } ) validator.add_dissection( [0x80, 0x00, 0xff], "asterix.048_010", { "asterix.048_010_SAC": "0x00", "asterix.048_010_SIC": "0xff" } ) validator.add_dissection( [0x40, 0xa8, 0xbf, 0xff], "asterix.048_140", { "asterix.048_140_VALUE": "86399.9921875" } ) x_020 = { "asterix.048_020_TYP": "0", "asterix.048_020_SIM": "0", "asterix.048_020_RDP": "0", "asterix.048_020_SPI": "0", "asterix.048_020_RAB": "0", "asterix.FX": "0" } validator.add_dissection( [0x20, 0xe0], "asterix.048_020", dict_local(x_020, "048_020", "TYP", "7") ) validator.add_dissection( [0x20, 0x08], "asterix.048_020", dict_local(x_020, "048_020", "RDP", "1") ) validator.add_dissection( [0x20, 0x04], "asterix.048_020", dict_local(x_020, "048_020", "SPI", "1") ) validator.add_dissection( [0x20, 0x02], "asterix.048_020", dict_local(x_020, "048_020", "RAB", "1") ) x_020.update({ "asterix.048_020_TST": "0", "asterix.048_020_ERR": "0", "asterix.048_020_XPP": "0", "asterix.048_020_ME": "0", "asterix.048_020_MI": "0", "asterix.048_020_FOEFRI": "0" }) validator.add_dissection( [0x20, 0x01, 0x80], "asterix.048_020", dict_local(x_020, "048_020", "TST", "1") ) validator.add_dissection( [0x20, 0x01, 0x40], "asterix.048_020", dict_local(x_020, "048_020", "ERR", "1") ) validator.add_dissection( [0x20, 0x01, 0x20], "asterix.048_020", dict_local(x_020, "048_020", "XPP", "1") ) validator.add_dissection( [0x20, 0x01, 0x10], "asterix.048_020", dict_local(x_020, "048_020", "ME", "1") ) validator.add_dissection( [0x20, 0x01, 0x08], "asterix.048_020", dict_local(x_020, "048_020", "MI", "1") ) validator.add_dissection( [0x20, 0x01, 0x06], "asterix.048_020", dict_local(x_020, "048_020", "FOEFRI", "3") ) x_040 = { "asterix.048_040_RHO": "0", "asterix.048_040_THETA": "0" } validator.add_dissection( [0x10, 0xff, 0xff, 0x00, 0x00], "asterix.048_040", dict_local(x_040, "048_040", "RHO", "255.99609375") ) validator.add_dissection( [0x10, 0x00, 0x00, 0xff, 0xff], "asterix.048_040", dict_local(x_040, "048_040", "THETA", "359.994506835938") ) x_070 = { "asterix.048_070_V": "0", "asterix.048_070_G": "0", "asterix.048_070_L": "0", "asterix.048_070_MODE3A": "0" } validator.add_dissection( [0x08, 0x80, 0x00], "asterix.048_070", dict_local(x_070, "048_070", "V", "1") ) validator.add_dissection( [0x08, 0x40, 0x00], "asterix.048_070", dict_local(x_070, "048_070", "G", "1") ) validator.add_dissection( [0x08, 0x20, 0x00], "asterix.048_070", dict_local(x_070, "048_070", "L", "1") ) validator.add_dissection( [0x08, 0x0e, 0x00], "asterix.048_070", dict_local(x_070, "048_070", "MODE3A", "3584") # 07000 ) validator.add_dissection( [0x08, 0x01, 0xc0], "asterix.048_070", dict_local(x_070, "048_070", "MODE3A", "448") # 0700 ) validator.add_dissection( [0x08, 0x00, 0x38], "asterix.048_070", dict_local(x_070, "048_070", "MODE3A", "56") # 070 ) validator.add_dissection( [0x08, 0x00, 0x07], "asterix.048_070", dict_local(x_070, "048_070", "MODE3A", "7") # 07 ) x_090 = { "asterix.048_090_V": "0", "asterix.048_090_G": "0", "asterix.048_090_FL": "0" } validator.add_dissection( [0x04, 0x80, 0x00], "asterix.048_090", dict_local(x_090, "048_090", "V", "1") ) validator.add_dissection( [0x04, 0x40, 0x00], "asterix.048_090", dict_local(x_090, "048_090", "G", "1") ) validator.add_dissection( [0x04, 0x1f, 0xff], "asterix.048_090", dict_local(x_090, "048_090", "FL", "2047.75") ) validator.add_dissection( [0x04, 0x20, 0x00], "asterix.048_090", dict_local(x_090, "048_090", "FL", "2048") ) validator.add_dissection( [0x02, 0x80, 0xff], "asterix.048_130", fspec_local("048_130_SRL", "VALUE", "11.2060546875") ) validator.add_dissection( [0x02, 0x40, 0xff], "asterix.048_130", fspec_local("048_130_SRR", "VALUE", "255") ) validator.add_dissection( [0x02, 0x20, 0x7f], "asterix.048_130", fspec_local("048_130_SAM", "VALUE", "127") ) validator.add_dissection( [0x02, 0x20, 0x80], "asterix.048_130", fspec_local("048_130_SAM", "VALUE", "-128") ) validator.add_dissection( [0x02, 0x10, 0xff], "asterix.048_130", fspec_local("048_130_PRL", "VALUE", "11.2060546875") ) validator.add_dissection( [0x02, 0x08, 0x7f], "asterix.048_130", fspec_local("048_130_PAM", "VALUE", "127") ) validator.add_dissection( [0x02, 0x08, 0x80], "asterix.048_130", fspec_local("048_130_PAM", "VALUE", "-128") ) validator.add_dissection( [0x02, 0x04, 0x7f], "asterix.048_130", fspec_local("048_130_RPD", "VALUE", "0.49609375") ) validator.add_dissection( [0x02, 0x04, 0x80], "asterix.048_130", fspec_local("048_130_RPD", "VALUE", "-0.5") ) validator.add_dissection( [0x02, 0x02, 0x7f], "asterix.048_130", fspec_local("048_130_APD", "VALUE", "2.79052734375") ) validator.add_dissection( [0x02, 0x02, 0x80], "asterix.048_130", fspec_local("048_130_APD", "VALUE", "-2.8125") ) validator.add_dissection( [0x01, 0x80, 0xff, 0xff, 0xff], "asterix.048_220", { "asterix.048_220_VALUE": '0xffffff' } ) validator.add_dissection( [0x01, 0x80, 0xff, 0xff, 0xff], "asterix.048_220", { "asterix.048_220_VALUE": '0xffffff' } ) validator.add_dissection( [0x01, 0x40, 0x04, 0x20, 0xda, 0x83, 0x0c, 0x79], "asterix.048_240", { "asterix.048_240_VALUE": "ABCZ 019" } ) x_250 = { "asterix.048_250_MBDATA": "00:00:00:00:00:00:00", "asterix.048_250_BDS1": "0", "asterix.048_250_BDS2": "0" } validator.add_dissection( [0x01, 0x20, 0x01, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x00], "asterix.048_250", { "asterix.counter": "1", "asterix.048_250": dict_global(x_250, "048_250_MBDATA", '0x0011223344556677'), } ) '''TODO: result seems correct, check dict format validator.add_dissection( [0x01, 0x20, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0], "asterix.048_250", { "asterix.counter": "1", "asterix.048_250": dict_global(x_250, "048_250_BDS1", "15"), } ) validator.add_dissection( [0x01, 0x20, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f], "asterix.048_250", { "asterix.counter": "1", "asterix.048_250": dict_global(x_250, "BDS2", "15"), } ) validator.add_dissection( [0x01, 0x20, 0x03, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f], "asterix.048_250", { "asterix.counter": "3", "asterix.048_250": dict_global(x_250, "048_250_MBDATA", '0x0011223344556677'), "asterix.048_250": dict_global(x_250, "048_250_BDS1", "15"), "asterix.048_250": dict_global(x_250, "048_250_BDS2", "15"), } ) ''' validator.add_dissection( [0x01, 0x10, 0x0f, 0xff], "asterix.048_161", { "asterix.048_161_TRN": "4095" } ) x_042 = { "asterix.048_042_X": "0", "asterix.048_042_Y": "0" } validator.add_dissection( [0x01, 0x08, 0x7f, 0xff, 0x00, 0x00], "asterix.048_042", dict_local(x_042, "048_042", "X", "255.9921875") ) validator.add_dissection( [0x01, 0x08, 0x80, 0x00, 0x00, 0x00], "asterix.048_042", dict_local(x_042, "048_042", "X", "-256") ) validator.add_dissection( [0x01, 0x08, 0x00, 0x0, 0x7f, 0xff], "asterix.048_042", dict_local(x_042, "048_042", "Y", "255.9921875") ) validator.add_dissection( [0x01, 0x08, 0x00, 0x0, 0x80, 0x00], "asterix.048_042", dict_local(x_042, "048_042", "Y", "-256") ) x_200 = { "asterix.048_200_GSP": "0", "asterix.048_200_HDG": "0" } validator.add_dissection( [0x01, 0x04, 0xff, 0xff, 0x00, 0x00], "asterix.048_200", dict_local(x_200, "048_200", "GSP", "3.99993896484375") ) validator.add_dissection( [0x01, 0x04, 0x00, 0x00, 0xff, 0xff], "asterix.048_200", dict_local(x_200, "048_200", "HDG", "359.994506835938") ) x_170 = { "asterix.048_170_CNF": "0", "asterix.048_170_RAD": "0", "asterix.048_170_DOU": "0", "asterix.048_170_MAH": "0", "asterix.048_170_CDM": "0", "asterix.FX": "0" } validator.add_dissection( [0x01, 0x02, 0x80], "asterix.048_170", dict_local(x_170, "048_170", "CNF", "1") ) validator.add_dissection( [0x01, 0x02, 0x60], "asterix.048_170", dict_local(x_170, "048_170", "RAD", "3") ) validator.add_dissection( [0x01, 0x02, 0x10], "asterix.048_170", dict_local(x_170, "048_170", "DOU", "1") ) validator.add_dissection( [0x01, 0x02, 0x08], "asterix.048_170", dict_local(x_170, "048_170", "MAH", "1") ) validator.add_dissection( [0x01, 0x02, 0x06], "asterix.048_170", dict_local(x_170, "048_170", "CDM", "3") ) x_170.update({ "asterix.048_170_TRE": "0", "asterix.048_170_GHO": "0", "asterix.048_170_SUP": "0", "asterix.048_170_TCC": "0" }) validator.add_dissection( [0x01, 0x02, 0x01, 0x80], "asterix.048_170", dict_local(x_170, "048_170", "TRE", "1") ) validator.add_dissection( [0x01, 0x02, 0x01, 0x40], "asterix.048_170", dict_local(x_170, "048_170", "GHO", "1") ) validator.add_dissection( [0x01, 0x02, 0x01, 0x20], "asterix.048_170", dict_local(x_170, "048_170", "SUP", "1") ) validator.add_dissection( [0x01, 0x02, 0x01, 0x10], "asterix.048_170", dict_local(x_170, "048_170", "TCC", "1") ) x_210 = { "asterix.048_210_SIGX": "0", "asterix.048_210_SIGY": "0", "asterix.048_210_SIGV": "0", "asterix.048_210_SIGH": "0" } validator.add_dissection( [0x01, 0x01, 0x80, 0xff, 0x00, 0x00, 0x00], "asterix.048_210", dict_local(x_210, "048_210", "SIGX", "1.9921875") ) validator.add_dissection( [0x01, 0x01, 0x80, 0x00, 0xff, 0x00, 0x00], "asterix.048_210", dict_local(x_210, "048_210", "SIGY", "1.9921875") ) validator.add_dissection( [0x01, 0x01, 0x80, 0x00, 0x00, 0xff, 0x00], "asterix.048_210", dict_local(x_210, "048_210", "SIGV", "0.01556396484375") ) validator.add_dissection( [0x01, 0x01, 0x80, 0x00, 0x00, 0x00, 0xff], "asterix.048_210", dict_local(x_210, "048_210", "SIGH", "22.412109375") ) validator.add_dissection( [0x01, 0x01, 0x40, 0x2e], "asterix.048_030", { "asterix.048_030_Subitem": "23", "asterix.FX": "0" } ) '''TODO: check this test, not according to the specs validator.add_dissection( [0x01, 0x01, 0x40, 0x2f, 0x03, 0x05, 0x06], "asterix.048_030", { "asterix.048_030_WE": "23", "asterix.048_030_1_WE": "1", "asterix.048_030_2_WE": "2", "asterix.048_030_3_WE": "3", "asterix.FX": "0" } ) ''' x_080 = { "asterix.048_080_QA4": "0", "asterix.048_080_QA2": "0", "asterix.048_080_QA1": "0", "asterix.048_080_QB4": "0", "asterix.048_080_QB2": "0", "asterix.048_080_QB1": "0", "asterix.048_080_QC4": "0", "asterix.048_080_QC2": "0", "asterix.048_080_QC1": "0", "asterix.048_080_QD4": "0", "asterix.048_080_QD2": "0", "asterix.048_080_QD1": "0" } validator.add_dissection( [0x01, 0x01, 0x20, 0x08, 0x00], "asterix.048_080", dict_local(x_080, "048_080", "QA4", "1") ) validator.add_dissection( [0x01, 0x01, 0x20, 0x04, 0x00], "asterix.048_080", dict_local(x_080, "048_080", "QA2", "1") ) validator.add_dissection( [0x01, 0x01, 0x20, 0x02, 0x00], "asterix.048_080", dict_local(x_080, "048_080", "QA1", "1") ) validator.add_dissection( [0x01, 0x01, 0x20, 0x01, 0x00], "asterix.048_080", dict_local(x_080, "048_080", "QB4", "1") ) validator.add_dissection( [0x01, 0x01, 0x20, 0x00, 0x80], "asterix.048_080", dict_local(x_080, "048_080", "QB2", "1") ) validator.add_dissection( [0x01, 0x01, 0x20, 0x00, 0x40], "asterix.048_080", dict_local(x_080, "048_080", "QB1", "1") ) validator.add_dissection( [0x01, 0x01, 0x20, 0x00, 0x20], "asterix.048_080", dict_local(x_080, "048_080", "QC4", "1") ) validator.add_dissection( [0x01, 0x01, 0x20, 0x00, 0x10], "asterix.048_080", dict_local(x_080, "048_080", "QC2", "1") ) validator.add_dissection( [0x01, 0x01, 0x20, 0x00, 0x08], "asterix.048_080", dict_local(x_080, "048_080", "QC1", "1") ) validator.add_dissection( [0x01, 0x01, 0x20, 0x00, 0x04], "asterix.048_080", dict_local(x_080, "048_080", "QD4", "1") ) validator.add_dissection( [0x01, 0x01, 0x20, 0x00, 0x02], "asterix.048_080", dict_local(x_080, "048_080", "QD2", "1") ) validator.add_dissection( [0x01, 0x01, 0x20, 0x00, 0x01], "asterix.048_080", dict_local(x_080, "048_080", "QD1", "1") ) '''TODO: A,B,C,D values need to go to single subitem 'MODEC' x_100 = { "asterix.048_100_V": "0", "asterix.048_100_G": "0", "asterix.048_100_C1": "0", "asterix.048_100_A1": "0", "asterix.048_100_C2": "0", "asterix.048_100_A2": "0", "asterix.048_100_C4": "0", "asterix.048_100_A4": "0", "asterix.048_100_B1": "0", "asterix.048_100_D1": "0", "asterix.048_100_B2": "0", "asterix.048_100_D2": "0", "asterix.048_100_B4": "0", "asterix.048_100_D4": "0", "asterix.048_100_QC1": "0", "asterix.048_100_QA1": "0", "asterix.048_100_QC2": "0", "asterix.048_100_QA2": "0", "asterix.048_100_QC4": "0", "asterix.048_100_QA4": "0", "asterix.048_100_QB1": "0", "asterix.048_100_QD1": "0", "asterix.048_100_QB2": "0", "asterix.048_100_QD2": "0", "asterix.048_100_QB4": "0", "asterix.048_100_QD4": "0" } validator.add_dissection( [0x01, 0x01, 0x10, 0x80, 0x00, 0x00, 0x00], "asterix.048_100", dict_local(x_100, "048_100", "V", "1") ) validator.add_dissection( [0x01, 0x01, 0x10, 0x40, 0x00, 0x00, 0x00], "asterix.048_100", dict_local(x_100, "048_100", "G", "1") ) validator.add_dissection( [0x01, 0x01, 0x10, 0x08, 0x00, 0x00, 0x00], "asterix.048_100", dict_local(x_100, "048_100", "C1", "1") ) validator.add_dissection( [0x01, 0x01, 0x10, 0x04, 0x00, 0x00, 0x00], "asterix.048_100", dict_local(x_100, "048_100", "A1", "1") ) validator.add_dissection( [0x01, 0x01, 0x10, 0x02, 0x00, 0x00, 0x00], "asterix.048_100", dict_local(x_100, "048_100", "C2", "1") ) validator.add_dissection( [0x01, 0x01, 0x10, 0x01, 0x00, 0x00, 0x00], "asterix.048_100", dict_local(x_100, "048_100", "A2", "1") ) validator.add_dissection( [0x01, 0x01, 0x10, 0x00, 0x80, 0x00, 0x00], "asterix.048_100", dict_local(x_100, "048_100", "C4", "1") ) validator.add_dissection( [0x01, 0x01, 0x10, 0x00, 0x40, 0x00, 0x00], "asterix.048_100", dict_local(x_100, "048_100", "A4", "1") ) validator.add_dissection( [0x01, 0x01, 0x10, 0x00, 0x20, 0x00, 0x00], "asterix.048_100", dict_local(x_100, "048_100", "B1", "1") ) validator.add_dissection( [0x01, 0x01, 0x10, 0x00, 0x10, 0x00, 0x00], "asterix.048_100", dict_local(x_100, "048_100", "D1", "1") ) validator.add_dissection( [0x01, 0x01, 0x10, 0x00, 0x08, 0x00, 0x00], "asterix.048_100", dict_local(x_100, "048_100", "B2", "1") ) validator.add_dissection( [0x01, 0x01, 0x10, 0x00, 0x04, 0x00, 0x00], "asterix.048_100", dict_local(x_100, "048_100", "D2", "1") ) validator.add_dissection( [0x01, 0x01, 0x10, 0x00, 0x02, 0x00, 0x00], "asterix.048_100", dict_local(x_100, "048_100", "B4", "1") ) validator.add_dissection( [0x01, 0x01, 0x10, 0x00, 0x01, 0x00, 0x00], "asterix.048_100", dict_local(x_100, "048_100", "D4", "1") ) validator.add_dissection( [0x01, 0x01, 0x10, 0x00, 0x00, 0x08, 0x00], "asterix.048_100", dict_local(x_100, "048_100", "QC1", "1") ) validator.add_dissection( [0x01, 0x01, 0x10, 0x00, 0x00, 0x04, 0x00], "asterix.048_100", dict_local(x_100, "048_100", "QA1", "1") ) validator.add_dissection( [0x01, 0x01, 0x10, 0x00, 0x00, 0x02, 0x00], "asterix.048_100", dict_local(x_100, "048_100", "QC2", "1") ) validator.add_dissection( [0x01, 0x01, 0x10, 0x00, 0x00, 0x01, 0x00], "asterix.048_100", dict_local(x_100, "048_100", "QA2", "1") ) validator.add_dissection( [0x01, 0x01, 0x10, 0x00, 0x00, 0x00, 0x80], "asterix.048_100", dict_local(x_100, "048_100", "QC4", "1") ) validator.add_dissection( [0x01, 0x01, 0x10, 0x00, 0x00, 0x00, 0x40], "asterix.048_100", dict_local(x_100, "048_100", "QA4", "1") ) validator.add_dissection( [0x01, 0x01, 0x10, 0x00, 0x00, 0x00, 0x20], "asterix.048_100", dict_local(x_100, "048_100", "QB1", "1") ) validator.add_dissection( [0x01, 0x01, 0x10, 0x00, 0x00, 0x00, 0x10], "asterix.048_100", dict_local(x_100, "048_100", "QD1", "1") ) validator.add_dissection( [0x01, 0x01, 0x10, 0x00, 0x00, 0x00, 0x08], "asterix.048_100", dict_local(x_100, "048_100", "QB2", "1") ) validator.add_dissection( [0x01, 0x01, 0x10, 0x00, 0x00, 0x00, 0x04], "asterix.048_100", dict_local(x_100, "048_100", "QD2", "1") ) validator.add_dissection( [0x01, 0x01, 0x10, 0x00, 0x00, 0x00, 0x02], "asterix.048_100", dict_local(x_100, "048_100", "QB4", "1") ) validator.add_dissection( [0x01, 0x01, 0x10, 0x00, 0x00, 0x00, 0x01], "asterix.048_100", dict_local(x_100, "048_100", "QD4", "1") ) ''' validator.add_dissection( [0x01, 0x01, 0x08, 0x1f, 0xff], "asterix.048_110", { "asterix.048_110_3DH": "204775" } ) validator.add_dissection( [0x01, 0x01, 0x08, 0x20, 0x00], "asterix.048_110", { "asterix.048_110_3DH": "-204800" } ) x_120_01 = { "asterix.048_120_CAL_D": "0", "asterix.048_120_CAL_CAL": "0" } validator.add_dissection( [0x01, 0x01, 0x04, 0x80, 0x80, 0x00], "asterix.048_120", dict_fspec_local(x_120_01, "048_120_CAL", "D", "1") ) validator.add_dissection( [0x01, 0x01, 0x04, 0x80, 0x01, 0xff], "asterix.048_120", dict_fspec_local(x_120_01, "048_120_CAL", "CAL", "511") ) validator.add_dissection( [0x01, 0x01, 0x04, 0x80, 0x02, 0x00], "asterix.048_120", dict_fspec_local(x_120_01, "048_120_CAL", "CAL", "-512") ) x_120_RDS = { "asterix.048_120_RDS_DOP": "0", "asterix.048_120_RDS_AMB": "0", "asterix.048_120_RDS_FRQ": "0" } validator.add_dissection( [0x01, 0x01, 0x04, 0x40, 0x01, 0x7f, 0xff, 0x00, 0x00, 0x00, 0x00], "asterix.048_120", counter_local(x_120_RDS, "1", "048_120_RDS", "DOP", "32767") ) validator.add_dissection( [0x01, 0x01, 0x04, 0x40, 0x01, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00], "asterix.048_120", counter_local(x_120_RDS, "1", "048_120_RDS", "DOP", "32768") ) validator.add_dissection( [0x01, 0x01, 0x04, 0x40, 0x01, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00], "asterix.048_120", counter_local(x_120_RDS, "1", "048_120_RDS", "AMB", "65535") ) validator.add_dissection( [0x01, 0x01, 0x04, 0x40, 0x01, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff], "asterix.048_120", counter_local(x_120_RDS, "1", "048_120_RDS", "FRQ", "65535") ) validator.add_dissection( [0x01, 0x01, 0x04, 0x40, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff], "asterix.048_120", { "asterix.fspec": "", "asterix.048_120_RDS": { "asterix.counter": "3", "asterix.048_120_RDS": dict_local(x_120_RDS, "048_120_RDS", "DOP", "-32768"), "asterix.048_120_RDS": dict_local(x_120_RDS, "048_120_RDS", "AMB", "65535"), "asterix.048_120_RDS": dict_local(x_120_RDS, "048_120_RDS", "FRQ", "65535") } } ) x_230 = { "asterix.048_230_COM": "0", "asterix.048_230_STAT": "0", "asterix.048_230_SI": "0", "asterix.048_230_MSSC": "0", "asterix.048_230_ARC": "0", "asterix.048_230_AIC": "0", "asterix.048_230_B1A": "0", "asterix.048_230_B1B": "0" } validator.add_dissection( [0x01, 0x01, 0x02, 0xe0, 0x00], "asterix.048_230", dict_local(x_230, "048_230", "COM", "7") ) validator.add_dissection( [0x01, 0x01, 0x02, 0x1c, 0x00], "asterix.048_230", dict_local(x_230, "048_230", "STAT", "7") ) validator.add_dissection( [0x01, 0x01, 0x02, 0x02, 0x00], "asterix.048_230", dict_local(x_230, "048_230", "SI", "1") ) validator.add_dissection( [0x01, 0x01, 0x02, 0x00, 0x80], "asterix.048_230", dict_local(x_230, "048_230", "MSSC", "1") ) validator.add_dissection( [0x01, 0x01, 0x02, 0x00, 0x40], "asterix.048_230", dict_local(x_230, "048_230", "ARC", "1") ) validator.add_dissection( [0x01, 0x01, 0x02, 0x00, 0x20], "asterix.048_230", dict_local(x_230, "048_230", "AIC", "1") ) validator.add_dissection( [0x01, 0x01, 0x02, 0x00, 0x10], "asterix.048_230", dict_local(x_230, "048_230", "B1A", "1") ) validator.add_dissection( [0x01, 0x01, 0x02, 0x00, 0x0f], "asterix.048_230", dict_local(x_230, "048_230", "B1B", "15") ) validator.add_dissection( [0x01, 0x01, 0x01, 0x80, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77], "asterix.048_260", { "asterix.048_260_VALUE": '0x0011223344556677' } ) x_055 = { "asterix.048_055_V": "0", "asterix.048_055_G": "0", "asterix.048_055_L": "0", "asterix.048_055_MODE1": "0" } validator.add_dissection( [0x01, 0x01, 0x01, 0x40, 0x80], "asterix.048_055", dict_local(x_055, "048_055", "V", "1") ) validator.add_dissection( [0x01, 0x01, 0x01, 0x40, 0x40], "asterix.048_055", dict_local(x_055, "048_055", "G", "1") ) validator.add_dissection( [0x01, 0x01, 0x01, 0x40, 0x20], "asterix.048_055", dict_local(x_055, "048_055", "L", "1") ) validator.add_dissection( [0x01, 0x01, 0x01, 0x40, 0x1f], "asterix.048_055", dict_local(x_055, "048_055", "MODE1", "31") ) x_050 = { "asterix.048_050_V": "0", "asterix.048_050_G": "0", "asterix.048_050_L": "0", "asterix.048_050_MODE2": "0" } validator.add_dissection( [0x01, 0x01, 0x01, 0x20, 0x80, 0x00], "asterix.048_050", dict_local(x_050, "048_050", "V", "1") ) validator.add_dissection( [0x01, 0x01, 0x01, 0x20, 0x40, 0x00], "asterix.048_050", dict_local(x_050, "048_050", "G", "1") ) validator.add_dissection( [0x01, 0x01, 0x01, 0x20, 0x20, 0x00], "asterix.048_050", dict_local(x_050, "048_050", "L", "1") ) validator.add_dissection( [0x01, 0x01, 0x01, 0x20, 0x0f, 0xff], "asterix.048_050", dict_local(x_050, "048_050", "MODE2", "4095") ) x_065 = { "asterix.048_065_QA4": "0", "asterix.048_065_QA2": "0", "asterix.048_065_QA1": "0", "asterix.048_065_QB2": "0", "asterix.048_065_QB1": "0" } validator.add_dissection( [0x01, 0x01, 0x01, 0x10, 0x10], "asterix.048_065", dict_local(x_065, "048_065", "QA4", "1") ) validator.add_dissection( [0x01, 0x01, 0x01, 0x10, 0x08], "asterix.048_065", dict_local(x_065, "048_065", "QA2", "1") ) validator.add_dissection( [0x01, 0x01, 0x01, 0x10, 0x04], "asterix.048_065", dict_local(x_065, "048_065", "QA1", "1") ) validator.add_dissection( [0x01, 0x01, 0x01, 0x10, 0x02], "asterix.048_065", dict_local(x_065, "048_065", "QB2", "1") ) validator.add_dissection( [0x01, 0x01, 0x01, 0x10, 0x01], "asterix.048_065", dict_local(x_065, "048_065", "QB1", "1") ) x_060 = { "asterix.048_060_QA4": "0", "asterix.048_060_QA2": "0", "asterix.048_060_QA1": "0", "asterix.048_060_QB4": "0", "asterix.048_060_QB2": "0", "asterix.048_060_QB1": "0", "asterix.048_060_QC4": "0", "asterix.048_060_QC2": "0", "asterix.048_060_QC1": "0", "asterix.048_060_QD4": "0", "asterix.048_060_QD2": "0", "asterix.048_060_QD1": "0" } validator.add_dissection( [0x01, 0x01, 0x01, 0x08, 0x08, 0x00], "asterix.048_060", dict_local(x_060, "048_060", "QA4", "1") ) validator.add_dissection( [0x01, 0x01, 0x01, 0x08, 0x04, 0x00], "asterix.048_060", dict_local(x_060, "048_060", "QA2", "1") ) validator.add_dissection( [0x01, 0x01, 0x01, 0x08, 0x02, 0x00], "asterix.048_060", dict_local(x_060, "048_060", "QA1", "1") ) validator.add_dissection( [0x01, 0x01, 0x01, 0x08, 0x01, 0x00], "asterix.048_060", dict_local(x_060, "048_060", "QB4", "1") ) validator.add_dissection( [0x01, 0x01, 0x01, 0x08, 0x00, 0x80], "asterix.048_060", dict_local(x_060, "048_060", "QB2", "1") ) validator.add_dissection( [0x01, 0x01, 0x01, 0x08, 0x00, 0x40], "asterix.048_060", dict_local(x_060, "048_060", "QB1", "1") ) validator.add_dissection( [0x01, 0x01, 0x01, 0x08, 0x00, 0x20], "asterix.048_060", dict_local(x_060, "048_060", "QC4", "1") ) validator.add_dissection( [0x01, 0x01, 0x01, 0x08, 0x00, 0x10], "asterix.048_060", dict_local(x_060, "048_060", "QC2", "1") ) validator.add_dissection( [0x01, 0x01, 0x01, 0x08, 0x00, 0x08], "asterix.048_060", dict_local(x_060, "048_060", "QC1", "1") ) validator.add_dissection( [0x01, 0x01, 0x01, 0x08, 0x00, 0x04], "asterix.048_060", dict_local(x_060, "048_060", "QD4", "1") ) validator.add_dissection( [0x01, 0x01, 0x01, 0x08, 0x00, 0x02], "asterix.048_060", dict_local(x_060, "048_060", "QD2", "1") ) validator.add_dissection( [0x01, 0x01, 0x01, 0x08, 0x00, 0x01], "asterix.048_060", dict_local(x_060, "048_060", "QD1", "1") ) validator.add_dissection( [0x01, 0x01, 0x01, 0x04, 0x01], "asterix.048_SP", "" ) validator.add_dissection( [0x01, 0x01, 0x01, 0x04, 0x10, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff], "asterix.048_SP", "" ) '''TODO: re-enable RE and SP tests when implemented x_re_md5 = { "asterix.048_RE_MD5_01_M5": "0", "asterix.048_RE_MD5_01_ID": "0", "asterix.048_RE_MD5_01_DA": "0", "asterix.048_RE_MD5_01_M1": "0", "asterix.048_RE_MD5_01_M2": "0", "asterix.048_RE_MD5_01_M3": "0", "asterix.048_RE_MD5_01_MC": "0" } validator.add_re_dissection( [0x80, 0x80, 0x80], "MD5", dict_fspec_local(x_re_md5, "048_RE_MD5_01", "M5", "1") ) validator.add_re_dissection( [0x80, 0x80, 0x40], "MD5", dict_fspec_local(x_re_md5, "048_RE_MD5_01", "ID", "1") ) validator.add_re_dissection( [0x80, 0x80, 0x20], "MD5", dict_fspec_local(x_re_md5, "048_RE_MD5_01", "DA", "1") ) validator.add_re_dissection( [0x80, 0x80, 0x10], "MD5", dict_fspec_local(x_re_md5, "048_RE_MD5_01", "M1", "1") ) validator.add_re_dissection( [0x80, 0x80, 0x08], "MD5", dict_fspec_local(x_re_md5, "048_RE_MD5_01", "M2", "1") ) validator.add_re_dissection( [0x80, 0x80, 0x04], "MD5", dict_fspec_local(x_re_md5, "048_RE_MD5_01", "M3", "1") ) validator.add_re_dissection( [0x80, 0x80, 0x02], "MD5", dict_fspec_local(x_re_md5, "048_RE_MD5_01", "MC", "1") ) x_re_pmn = { "asterix.048_RE_MD5_02_PIN": "0", "asterix.048_RE_MD5_02_NAV": "0", "asterix.048_RE_MD5_02_NAT": "0", "asterix.048_RE_MD5_02_MIS": "0" } validator.add_re_dissection( [0x80, 0x40, 0x3f, 0xff, 0x00, 0x00], "MD5", dict_fspec_local(x_re_pmn, "048_RE_MD5_02", "PIN", "16383") ) validator.add_re_dissection( [0x80, 0x40, 0x00, 0x00, 0x20, 0x00], "MD5", dict_fspec_local(x_re_pmn, "048_RE_MD5_02", "NAV", "1") ) validator.add_re_dissection( [0x80, 0x40, 0x00, 0x00, 0x1f, 0x00], "MD5", dict_fspec_local(x_re_pmn, "048_RE_MD5_02", "NAT", "31") ) validator.add_re_dissection( [0x80, 0x40, 0x00, 0x00, 0x00, 0x3f], "MD5", dict_fspec_local(x_re_pmn, "048_RE_MD5_02", "MIS", "63") ) x_re_pos = { "asterix.048_RE_MD5_03_LAT": "0", "asterix.048_RE_MD5_03_LON": "0" } validator.add_re_dissection( [0x80, 0x20, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00], "MD5", dict_fspec_local(x_re_pos, "048_RE_MD5_03", "LAT", "90") ) validator.add_re_dissection( [0x80, 0x20, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00], "MD5", dict_fspec_local(x_re_pos, "048_RE_MD5_03", "LAT", "-90") ) validator.add_re_dissection( [0x80, 0x20, 0x00, 0x00, 0x00, 0x7f, 0xff, 0xff], "MD5", dict_fspec_local(x_re_pos, "048_RE_MD5_03", "LON", "179.999978542328") ) validator.add_re_dissection( [0x80, 0x20, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00], "MD5", dict_fspec_local(x_re_pos, "048_RE_MD5_03", "LON", "-180") ) x_re_ga = { "asterix.048_RE_MD5_04_RES": "0", "asterix.048_RE_MD5_04_GA": "0" } validator.add_re_dissection( [0x80, 0x10, 0x40, 0x00], "MD5", dict_fspec_local(x_re_ga, "048_RE_MD5_04", "RES", "1") ) validator.add_re_dissection( [0x80, 0x10, 0x1f, 0xff], "MD5", dict_fspec_local(x_re_ga, "048_RE_MD5_04", "GA", "204775") ) validator.add_re_dissection( [0x80, 0x10, 0x20, 0x00], "MD5", dict_fspec_local(x_re_ga, "048_RE_MD5_04", "GA", "-204800") ) x_re_em1 = { "asterix.048_RE_MD5_05_V": "0", "asterix.048_RE_MD5_05_G": "0", "asterix.048_RE_MD5_05_L": "0", "asterix.048_RE_MD5_05_MODE3A": "0" } validator.add_re_dissection( [0x80, 0x08, 0x80, 0x00], "MD5", dict_fspec_local(x_re_em1, "048_RE_MD5_05", "V", "1") ) validator.add_re_dissection( [0x80, 0x08, 0x40, 0x00], "MD5", dict_fspec_local(x_re_em1, "048_RE_MD5_05", "G", "1") ) validator.add_re_dissection( [0x80, 0x08, 0x20, 0x00], "MD5", dict_fspec_local(x_re_em1, "048_RE_MD5_05", "L", "1") ) validator.add_re_dissection( [0x80, 0x08, 0x0f, 0xff], "MD5", dict_fspec_local(x_re_em1, "048_RE_MD5_05", "MODE3A", "4095") ) validator.add_re_dissection( [0x80, 0x04, 0x7f], "MD5", fspec_local("048_RE_MD5_06", "TOS", "0.9921875") ) validator.add_re_dissection( [0x80, 0x04, 0x80], "MD5", fspec_local("048_RE_MD5_06", "TOS", "-1") ) x_re_xp = { "asterix.048_RE_MD5_07_XP": "0", "asterix.048_RE_MD5_07_X5": "0", "asterix.048_RE_MD5_07_XC": "0", "asterix.048_RE_MD5_07_X3": "0", "asterix.048_RE_MD5_07_X2": "0", "asterix.048_RE_MD5_07_X1": "0" } validator.add_re_dissection( [0x80, 0x02, 0x20], "MD5", dict_fspec_local(x_re_xp, "048_RE_MD5_07", "XP", "1") ) validator.add_re_dissection( [0x80, 0x02, 0x10], "MD5", dict_fspec_local(x_re_xp, "048_RE_MD5_07", "X5", "1") ) validator.add_re_dissection( [0x80, 0x02, 0x08], "MD5", dict_fspec_local(x_re_xp, "048_RE_MD5_07", "XC", "1") ) validator.add_re_dissection( [0x80, 0x02, 0x04], "MD5", dict_fspec_local(x_re_xp, "048_RE_MD5_07", "X3", "1") ) validator.add_re_dissection( [0x80, 0x02, 0x02], "MD5", dict_fspec_local(x_re_xp, "048_RE_MD5_07", "X2", "1") ) validator.add_re_dissection( [0x80, 0x02, 0x01], "MD5", dict_fspec_local(x_re_xp, "048_RE_MD5_07", "X1", "1") ) x_re_md5 = { "asterix.048_RE_M5N_01_M5": "0", "asterix.048_RE_M5N_01_ID": "0", "asterix.048_RE_M5N_01_DA": "0", "asterix.048_RE_M5N_01_M1": "0", "asterix.048_RE_M5N_01_M2": "0", "asterix.048_RE_M5N_01_M3": "0", "asterix.048_RE_M5N_01_MC": "0" } validator.add_re_dissection( [0x40, 0x80, 0x80], "M5N", dict_fspec_local(x_re_md5, "048_RE_M5N_01", "M5", "1") ) validator.add_re_dissection( [0x40, 0x80, 0x40], "M5N", dict_fspec_local(x_re_md5, "048_RE_M5N_01", "ID", "1") ) validator.add_re_dissection( [0x40, 0x80, 0x20], "M5N", dict_fspec_local(x_re_md5, "048_RE_M5N_01", "DA", "1") ) validator.add_re_dissection( [0x40, 0x80, 0x10], "M5N", dict_fspec_local(x_re_md5, "048_RE_M5N_01", "M1", "1") ) validator.add_re_dissection( [0x40, 0x80, 0x08], "M5N", dict_fspec_local(x_re_md5, "048_RE_M5N_01", "M2", "1") ) validator.add_re_dissection( [0x40, 0x80, 0x04], "M5N", dict_fspec_local(x_re_md5, "048_RE_M5N_01", "M3", "1") ) validator.add_re_dissection( [0x40, 0x80, 0x02], "M5N", dict_fspec_local(x_re_md5, "048_RE_M5N_01", "MC", "1") ) x_re_pmn = { "asterix.048_RE_M5N_02_PIN": "0", "asterix.048_RE_M5N_02_NOV": "0", "asterix.048_RE_M5N_02_NO": "0" } validator.add_re_dissection( [0x40, 0x40, 0x3f, 0xff, 0x00, 0x00], "M5N", dict_fspec_local(x_re_pmn, "048_RE_M5N_02", "PIN", "16383") ) validator.add_re_dissection( [0x40, 0x40, 0x00, 0x00, 0x08, 0x00], "M5N", dict_fspec_local(x_re_pmn, "048_RE_M5N_02", "NOV", "1") ) validator.add_re_dissection( [0x40, 0x40, 0x00, 0x00, 0x07, 0xff], "M5N", dict_fspec_local(x_re_pmn, "048_RE_M5N_02", "NO", "2047") ) x_re_pos = { "asterix.048_RE_M5N_03_LAT": "0", "asterix.048_RE_M5N_03_LON": "0" } validator.add_re_dissection( [0x40, 0x20, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00], "M5N", dict_fspec_local(x_re_pos, "048_RE_M5N_03", "LAT", "90") ) validator.add_re_dissection( [0x40, 0x20, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00], "M5N", dict_fspec_local(x_re_pos, "048_RE_M5N_03", "LAT", "-90") ) validator.add_re_dissection( [0x40, 0x20, 0x00, 0x00, 0x00, 0x7f, 0xff, 0xff], "M5N", dict_fspec_local(x_re_pos, "048_RE_M5N_03", "LON", "179.999978542328") ) validator.add_re_dissection( [0x40, 0x20, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00], "M5N", dict_fspec_local(x_re_pos, "048_RE_M5N_03", "LON", "-180") ) x_re_ga = { "asterix.048_RE_M5N_04_RES": "0", "asterix.048_RE_M5N_04_GA": "0" } validator.add_re_dissection( [0x40, 0x10, 0x40, 0x00], "M5N", dict_fspec_local(x_re_ga, "048_RE_M5N_04", "RES", "1") ) validator.add_re_dissection( [0x40, 0x10, 0x1f, 0xff], "M5N", dict_fspec_local(x_re_ga, "048_RE_M5N_04", "GA", "204775") ) validator.add_re_dissection( [0x40, 0x10, 0x20, 0x00], "M5N", dict_fspec_local(x_re_ga, "048_RE_M5N_04", "GA", "-204800") ) x_re_em1 = { "asterix.048_RE_M5N_05_V": "0", "asterix.048_RE_M5N_05_G": "0", "asterix.048_RE_M5N_05_L": "0", "asterix.048_RE_M5N_05_MODE3A": "0" } validator.add_re_dissection( [0x40, 0x08, 0x80, 0x00], "M5N", dict_fspec_local(x_re_em1, "048_RE_M5N_05", "V", "1") ) validator.add_re_dissection( [0x40, 0x08, 0x40, 0x00], "M5N", dict_fspec_local(x_re_em1, "048_RE_M5N_05", "G", "1") ) validator.add_re_dissection( [0x40, 0x08, 0x20, 0x00], "M5N", dict_fspec_local(x_re_em1, "048_RE_M5N_05", "L", "1") ) validator.add_re_dissection( [0x40, 0x08, 0x0f, 0xff], "M5N", dict_fspec_local(x_re_em1, "048_RE_M5N_05", "MODE3A", "4095") ) validator.add_re_dissection( [0x40, 0x04, 0x7f], "M5N", fspec_local("048_RE_M5N_06", "TOS", "0.9921875") ) validator.add_re_dissection( [0x40, 0x04, 0x80], "M5N", fspec_local("048_RE_M5N_06", "TOS", "-1") ) x_re_xp = { "asterix.048_RE_M5N_07_XP": "0", "asterix.048_RE_M5N_07_X5": "0", "asterix.048_RE_M5N_07_XC": "0", "asterix.048_RE_M5N_07_X3": "0", "asterix.048_RE_M5N_07_X2": "0", "asterix.048_RE_M5N_07_X1": "0" } validator.add_re_dissection( [0x40, 0x02, 0x20], "M5N", dict_fspec_local(x_re_xp, "048_RE_M5N_07", "XP", "1") ) validator.add_re_dissection( [0x40, 0x02, 0x10], "M5N", dict_fspec_local(x_re_xp, "048_RE_M5N_07", "X5", "1") ) validator.add_re_dissection( [0x40, 0x02, 0x08], "M5N", dict_fspec_local(x_re_xp, "048_RE_M5N_07", "XC", "1") ) validator.add_re_dissection( [0x40, 0x02, 0x04], "M5N", dict_fspec_local(x_re_xp, "048_RE_M5N_07", "X3", "1") ) validator.add_re_dissection( [0x40, 0x02, 0x02], "M5N", dict_fspec_local(x_re_xp, "048_RE_M5N_07", "X2", "1") ) validator.add_re_dissection( [0x40, 0x02, 0x01], "M5N", dict_fspec_local(x_re_xp, "048_RE_M5N_07", "X1", "1") ) validator.add_re_dissection( [0x40, 0x01, 0x80, 0x1f], "M5N", fspec_local("048_RE_M5N_08", "FOM", "31") ) validator.add_re_dissection( [0x20, 0x06], "M4E", { "asterix.048_RE_M4E_FOE_FRI": "3", "asterix.FX": "0" } ) validator.add_re_dissection( [0x10, 0x80, 0xff], "RPC", fspec_local("048_RE_RPC_01", "SCO", "255") ) validator.add_re_dissection( [0x10, 0x40, 0xff, 0xff], "RPC", fspec_local("048_RE_RPC_02", "SCR", "6553.5") ) validator.add_re_dissection( [0x10, 0x20, 0xff, 0xff], "RPC", fspec_local("048_RE_RPC_03", "RW", "255.99609375") ) validator.add_re_dissection( [0x10, 0x10, 0xff, 0xff], "RPC", fspec_local("048_RE_RPC_04", "AR", "255.99609375") ) validator.add_re_dissection( [0x08, 0xff, 0xff, 0xff], "ERR", { "asterix.048_RE_ERR_RHO": "65535.99609375" } ) ''' validator.check_dissections() def test_undefined_value_handling(self, asterix_re_validator): '''verifies that the dissector can dissect undefined field values by setting the maximum value of bits or by setting all undefined bits''' validator = asterix_re_validator(48, [0x01, 0x01, 0x01, 0x02]) validator.add_dissection( [0x08, 0x10, 0x00], "asterix.048_070", { "asterix.048_070_V": "0", "asterix.048_070_G": "0", "asterix.048_070_L": "0", "asterix.048_070_MODE3A": "0" } ) validator.add_dissection( [0x01, 0x10, 0xf0, 0x00], "asterix.048_161", { "asterix.048_161_TRN": "0" } ) validator.add_dissection( [0x01, 0x02, 0x01, 0x0e], "asterix.048_170", { "asterix.048_170_CNF": "0", "asterix.048_170_RAD": "0", "asterix.048_170_DOU": "0", "asterix.048_170_MAH": "0", "asterix.048_170_CDM": "0", "asterix.048_170_TRE": "0", "asterix.048_170_GHO": "0", "asterix.048_170_SUP": "0", "asterix.048_170_TCC": "0", "asterix.FX": "0" } ) validator.add_dissection( [0x01, 0x01, 0x40, 0xfe], "asterix.048_030", { "asterix.048_030_Subitem": "127", "asterix.FX": "0" } ) validator.add_dissection( [0x01, 0x01, 0x20, 0xf0, 0x00], "asterix.048_080", { "asterix.048_080_QA4": "0", "asterix.048_080_QA2": "0", "asterix.048_080_QA1": "0", "asterix.048_080_QB4": "0", "asterix.048_080_QB2": "0", "asterix.048_080_QB1": "0", "asterix.048_080_QC4": "0", "asterix.048_080_QC2": "0", "asterix.048_080_QC1": "0", "asterix.048_080_QD4": "0", "asterix.048_080_QD2": "0", "asterix.048_080_QD1": "0" } ) '''TODO: A,B,C,D values need to go to single subitem 'MODEC' validator.add_dissection( [0x01, 0x01, 0x10, 0x30, 0x00, 0xf0, 0x00], "asterix.048_100", { "asterix.048_100_V": "0", "asterix.048_100_G": "0", "asterix.048_100_A1": "0", "asterix.048_100_C2": "0", "asterix.048_100_A2": "0", "asterix.048_100_C4": "0", "asterix.048_100_A4": "0", "asterix.048_100_B1": "0", "asterix.048_100_D1": "0", "asterix.048_100_B2": "0", "asterix.048_100_D2": "0", "asterix.048_100_B4": "0", "asterix.048_100_D4": "0", "asterix.048_100_QC1": "0", "asterix.048_100_QA1": "0", "asterix.048_100_QC2": "0", "asterix.048_100_QA2": "0", "asterix.048_100_QC4": "0", "asterix.048_100_QA4": "0", "asterix.048_100_QB1": "0", "asterix.048_100_QD1": "0", "asterix.048_100_QB2": "0", "asterix.048_100_QD2": "0", "asterix.048_100_QB4": "0", "asterix.048_100_QD4": "0" } ) validator.add_dissection( [0x01, 0x01, 0x04, 0x80, 0x7c, 0x00], "asterix.048_120", { "asterix.fspec": "", "asterix.048_120_01": { "asterix.048_120_01_D": "0", "asterix.048_120_01_CAL": "0" } } ) ''' validator.add_dissection( [0x01, 0x01, 0x04, 0x3e], "asterix.048_120", { "asterix.fspec": "" } ) validator.add_dissection( [0x01, 0x01, 0x02, 0x01, 0x00], "asterix.048_230", { "asterix.048_230_COM": "0", "asterix.048_230_STAT": "0", "asterix.048_230_SI": "0", "asterix.048_230_MSSC": "0", "asterix.048_230_ARC": "0", "asterix.048_230_AIC": "0", "asterix.048_230_B1A": "0", "asterix.048_230_B1B": "0" } ) validator.add_dissection( [0x01, 0x01, 0x01, 0x20, 0x10, 0x00], "asterix.048_050", { "asterix.048_050_V": "0", "asterix.048_050_G": "0", "asterix.048_050_L": "0", "asterix.048_050_MODE2": "0" } ) validator.add_dissection( [0x01, 0x01, 0x01, 0x10, 0xe0], "asterix.048_065", { "asterix.048_065_QA4": "0", "asterix.048_065_QA2": "0", "asterix.048_065_QA1": "0", "asterix.048_065_QB2": "0", "asterix.048_065_QB1": "0" } ) '''TODO: re-enable RE and SP tests when implemented x_re_md5 = { "asterix.048_RE_MD5_01_M5": "0", "asterix.048_RE_MD5_01_ID": "0", "asterix.048_RE_MD5_01_DA": "0", "asterix.048_RE_MD5_01_M1": "0", "asterix.048_RE_MD5_01_M2": "0", "asterix.048_RE_MD5_01_M3": "0", "asterix.048_RE_MD5_01_MC": "0" } validator.add_re_dissection( [0x80, 0x80, 0x01, 0x00], "MD5", dict_fspec_local(x_re_md5, "048_RE_MD5_01", "M5", "0") ) x_re_pmn = { "asterix.048_RE_MD5_02_PIN": "0", "asterix.048_RE_MD5_02_NAV": "0", "asterix.048_RE_MD5_02_NAT": "0", "asterix.048_RE_MD5_02_MIS": "0" } validator.add_re_dissection( [0x80, 0x40, 0xc0, 0x00, 0xc0, 0xc0], "MD5", dict_fspec_local(x_re_pmn, "048_RE_MD5_02", "PIN", "0") ) x_re_em1 = { "asterix.048_RE_MD5_05_V": "0", "asterix.048_RE_MD5_05_G": "0", "asterix.048_RE_MD5_05_L": "0", "asterix.048_RE_MD5_05_MODE3A": "0" } validator.add_re_dissection( [0x80, 0x08, 0x10, 0x00], "MD5", dict_fspec_local(x_re_em1, "048_RE_MD5_05", "V", "0") ) x_re_md5 = { "asterix.048_RE_M5N_01_M5": "0", "asterix.048_RE_M5N_01_ID": "0", "asterix.048_RE_M5N_01_DA": "0", "asterix.048_RE_M5N_01_M1": "0", "asterix.048_RE_M5N_01_M2": "0", "asterix.048_RE_M5N_01_M3": "0", "asterix.048_RE_M5N_01_MC": "0" } validator.add_re_dissection( [0x40, 0x80, 0x01, 0x00], "M5N", dict_fspec_local(x_re_md5, "048_RE_M5N_01", "M5", "0") ) x_re_pmn = { "asterix.048_RE_M5N_02_PIN": "0", "asterix.048_RE_M5N_02_NOV": "0", "asterix.048_RE_M5N_02_NO": "0" } validator.add_re_dissection( [0x40, 0x40, 0xc0, 0x00, 0xf0, 0x00], "M5N", dict_fspec_local(x_re_pmn, "048_RE_M5N_02", "PIN", "0") ) x_re_em1 = { "asterix.048_RE_M5N_05_V": "0", "asterix.048_RE_M5N_05_G": "0", "asterix.048_RE_M5N_05_L": "0", "asterix.048_RE_M5N_05_MODE3A": "0" } validator.add_re_dissection( [0x40, 0x08, 0x10, 0x00], "M5N", dict_fspec_local(x_re_em1, "048_RE_M5N_05", "V", "0") ) validator.add_re_dissection( [0x40, 0x01, 0x80, 0xe0], "M5N", fspec_local("048_RE_M5N_08", "FOM", "0") ) validator.add_re_dissection( [0x20, 0xf8], "M4E", { "asterix.048_RE_M4E_FOE_FRI": "0", "asterix.FX": "0" } ) validator.add_re_dissection( [0x20, 0x01, 0x00], "M4E", { "asterix.048_RE_M4E_FOE_FRI": "0", "asterix.FX": "1" } ) ''' validator.check_dissections() class TestCategory063: ''' Unittest case for ASTERIX Category 063 Online specification: https://www.eurocontrol.int/publications/cat063-sensor-status-messages-part-10 Part 10: Category 63 (1.4) Sensor Status Messages Standard User Application Profile FRN Data Item Information Length 1 I063/010 Data Source Identifier 2 2 I063/015 Service Identification 1 3 I063/030 Time of Message 3 4 I063/050 Sensor Identifier 2 5 I063/060 Sensor Configuration and Status 1+1 6 I063/070 Time Stamping Bias 2 7 I063/080 SSR/Mode S Range Gain and Bias 4 FX - Field extension indicator - 8 I063/081 SSR/Mode S Azimuth Bias 2 9 I063/090 PSR Range Gain and Bias 4 10 I063/091 PSR Azimuth Bias 2 11 I063/092 PSR Elevation Bias 2 12 - spare - 13 RE Reserved Expansion Field 1+1+ 14 SP Special Purpose Field 1+1+ FX - Field extension indicator - ''' def test_for_fields(self, asterix_validator): '''verifies existence of all fields and their maximum value''' validator = asterix_validator(63) validator.add_dissection( [0x80, 0xff, 0x00], "asterix.063_010", { "asterix.063_010_SAC": "0xff", "asterix.063_010_SIC": "0x00" } ) validator.add_dissection( [0x80, 0x00, 0xff], "asterix.063_010", { "asterix.063_010_SAC": "0x00", "asterix.063_010_SIC": "0xff" } ) validator.add_dissection( [0x40, 0xff], "asterix.063_015", { "asterix.063_015_VALUE": "0xff" } ) validator.add_dissection( [0x20, 0xa8, 0xbf, 0xff], "asterix.063_030", { "asterix.063_030_VALUE": "86399.9921875" } ) validator.add_dissection( [0x10, 0xff, 0x00], "asterix.063_050", { "asterix.063_050_SAC": "0xff", "asterix.063_050_SIC": "0x00" } ) validator.add_dissection( [0x10, 0x00, 0xff], "asterix.063_050", { "asterix.063_050_SAC": "0x00", "asterix.063_050_SIC": "0xff" } ) validator.add_dissection( [0x08, 0xc0], "asterix.063_060", { "asterix.063_060_CON": "3", "asterix.063_060_PSR": "0", "asterix.063_060_SSR": "0", "asterix.063_060_MDS": "0", "asterix.063_060_ADS": "0", "asterix.063_060_MLT": "0", "asterix.FX": "0" } ) validator.add_dissection( [0x08, 0x20], "asterix.063_060", { "asterix.063_060_CON": "0", "asterix.063_060_PSR": "1", "asterix.063_060_SSR": "0", "asterix.063_060_MDS": "0", "asterix.063_060_ADS": "0", "asterix.063_060_MLT": "0", "asterix.FX": "0" } ) validator.add_dissection( [0x08, 0x10], "asterix.063_060", { "asterix.063_060_CON": "0", "asterix.063_060_PSR": "0", "asterix.063_060_SSR": "1", "asterix.063_060_MDS": "0", "asterix.063_060_ADS": "0", "asterix.063_060_MLT": "0", "asterix.FX": "0" } ) validator.add_dissection( [0x08, 0x08], "asterix.063_060", { "asterix.063_060_CON": "0", "asterix.063_060_PSR": "0", "asterix.063_060_SSR": "0", "asterix.063_060_MDS": "1", "asterix.063_060_ADS": "0", "asterix.063_060_MLT": "0", "asterix.FX": "0" } ) validator.add_dissection( [0x08, 0x04], "asterix.063_060", { "asterix.063_060_CON": "0", "asterix.063_060_PSR": "0", "asterix.063_060_SSR": "0", "asterix.063_060_MDS": "0", "asterix.063_060_ADS": "1", "asterix.063_060_MLT": "0", "asterix.FX": "0" } ) validator.add_dissection( [0x08, 0x02], "asterix.063_060", { "asterix.063_060_CON": "0", "asterix.063_060_PSR": "0", "asterix.063_060_SSR": "0", "asterix.063_060_MDS": "0", "asterix.063_060_ADS": "0", "asterix.063_060_MLT": "1", "asterix.FX": "0" } ) validator.add_dissection( [0x08, 0x01, 0x80], "asterix.063_060", { "asterix.063_060_CON": "0", "asterix.063_060_PSR": "0", "asterix.063_060_SSR": "0", "asterix.063_060_MDS": "0", "asterix.063_060_ADS": "0", "asterix.063_060_MLT": "0", "asterix.063_060_OPS": "1", "asterix.063_060_ODP": "0", "asterix.063_060_OXT": "0", "asterix.063_060_MSC": "0", "asterix.063_060_TSV": "0", "asterix.063_060_NPW": "0", "asterix.FX": "0" } ) validator.add_dissection( [0x08, 0x01, 0x40], "asterix.063_060", { "asterix.063_060_CON": "0", "asterix.063_060_PSR": "0", "asterix.063_060_SSR": "0", "asterix.063_060_MDS": "0", "asterix.063_060_ADS": "0", "asterix.063_060_MLT": "0", "asterix.063_060_OPS": "0", "asterix.063_060_ODP": "1", "asterix.063_060_OXT": "0", "asterix.063_060_MSC": "0", "asterix.063_060_TSV": "0", "asterix.063_060_NPW": "0", "asterix.FX": "0" } ) validator.add_dissection( [0x08, 0x01, 0x20], "asterix.063_060", { "asterix.063_060_CON": "0", "asterix.063_060_PSR": "0", "asterix.063_060_SSR": "0", "asterix.063_060_MDS": "0", "asterix.063_060_ADS": "0", "asterix.063_060_MLT": "0", "asterix.063_060_OPS": "0", "asterix.063_060_ODP": "0", "asterix.063_060_OXT": "1", "asterix.063_060_MSC": "0", "asterix.063_060_TSV": "0", "asterix.063_060_NPW": "0", "asterix.FX": "0" } ) validator.add_dissection( [0x08, 0x01, 0x10], "asterix.063_060", { "asterix.063_060_CON": "0", "asterix.063_060_PSR": "0", "asterix.063_060_SSR": "0", "asterix.063_060_MDS": "0", "asterix.063_060_ADS": "0", "asterix.063_060_MLT": "0", "asterix.063_060_OPS": "0", "asterix.063_060_ODP": "0", "asterix.063_060_OXT": "0", "asterix.063_060_MSC": "1", "asterix.063_060_TSV": "0", "asterix.063_060_NPW": "0", "asterix.FX": "0" } ) validator.add_dissection( [0x08, 0x01, 0x08], "asterix.063_060", { "asterix.063_060_CON": "0", "asterix.063_060_PSR": "0", "asterix.063_060_SSR": "0", "asterix.063_060_MDS": "0", "asterix.063_060_ADS": "0", "asterix.063_060_MLT": "0", "asterix.063_060_OPS": "0", "asterix.063_060_ODP": "0", "asterix.063_060_OXT": "0", "asterix.063_060_MSC": "0", "asterix.063_060_TSV": "1", "asterix.063_060_NPW": "0", "asterix.FX": "0" } ) validator.add_dissection( [0x08, 0x01, 0x04], "asterix.063_060", { "asterix.063_060_CON": "0", "asterix.063_060_PSR": "0", "asterix.063_060_SSR": "0", "asterix.063_060_MDS": "0", "asterix.063_060_ADS": "0", "asterix.063_060_MLT": "0", "asterix.063_060_OPS": "0", "asterix.063_060_ODP": "0", "asterix.063_060_OXT": "0", "asterix.063_060_MSC": "0", "asterix.063_060_TSV": "0", "asterix.063_060_NPW": "1", "asterix.FX": "0" } ) validator.add_dissection( [0x04, 0xff, 0xff], "asterix.063_070", { "asterix.063_070_VALUE": "-1" } ) validator.add_dissection( [0x02, 0x7f, 0xff, 0x00, 0x00], "asterix.063_080", { "asterix.063_080_SRG": "0.32767", "asterix.063_080_SRB": "0" } ) validator.add_dissection( [0x02, 0x80, 0x00, 0x00, 0x00], "asterix.063_080", { "asterix.063_080_SRG": "-0.32768", "asterix.063_080_SRB": "0" } ) validator.add_dissection( [0x02, 0x00, 0x00, 0x7f, 0xff], "asterix.063_080", { "asterix.063_080_SRG": "0", "asterix.063_080_SRB": "255.9921875" } ) validator.add_dissection( [0x02, 0x00, 0x00, 0x80, 0x00], "asterix.063_080", { "asterix.063_080_SRG": "0", "asterix.063_080_SRB": "-256" } ) validator.add_dissection( [0x01, 0x80, 0x7f, 0xff], "asterix.063_081", { "asterix.063_081_VALUE": "179.994506835938" } ) validator.add_dissection( [0x01, 0x80, 0x80, 0x00], "asterix.063_081", { "asterix.063_081_VALUE": "-180" } ) validator.add_dissection( [0x01, 0x40, 0x7f, 0xff, 0x00, 0x00], "asterix.063_090", { "asterix.063_090_PRG": "0.32767", "asterix.063_090_PRB": "0" } ) validator.add_dissection( [0x01, 0x40, 0x80, 0x00, 0x00, 0x00], "asterix.063_090", { "asterix.063_090_PRG": "-0.32768", "asterix.063_090_PRB": "0" } ) validator.add_dissection( [0x01, 0x40, 0x00, 0x00, 0x7f, 0xff], "asterix.063_090", { "asterix.063_090_PRG": "0", "asterix.063_090_PRB": "255.9921875" } ) validator.add_dissection( [0x01, 0x40, 0x00, 0x00, 0x80, 0x00], "asterix.063_090", { "asterix.063_090_PRG": "0", "asterix.063_090_PRB": "-256" } ) validator.add_dissection( [0x01, 0x20, 0x7f, 0xff], "asterix.063_091", { "asterix.063_091_VALUE": "179.994506835938" } ) validator.add_dissection( [0x01, 0x20, 0x80, 0x00], "asterix.063_091", { "asterix.063_091_VALUE": "-180" } ) validator.add_dissection( [0x01, 0x10, 0x7f, 0xff], "asterix.063_092", { "asterix.063_092_VALUE": "179.994506835938" } ) validator.add_dissection( [0x01, 0x10, 0x80, 0x00], "asterix.063_092", { "asterix.063_092_VALUE": "-180" } ) validator.check_dissections() def test_undefined_value_handling(self, asterix_validator): '''verifies that the dissector can dissect undefined field values by setting the maximum value of bits or by setting all undefined bits''' validator = asterix_validator(63) validator.add_dissection( [0x01, 0x08], "asterix.spare", "" ) '''TODO: re-enable RE and SP tests when implemented validator.add_dissection( [0x01, 0x04, 0x02, 0x00], "asterix.063_RE", { "asterix.re_field_len": "2", "asterix.fspec": "" } ) validator.add_dissection( [0x01, 0x04, 0x10, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff], "asterix.063_RE", { "asterix.fspec": "", "asterix.re_field_len": "16" } ) validator.add_dissection( [0x01, 0x02, 0x01], "asterix.063_SP", "" ) validator.add_dissection( [0x01, 0x02, 0x10, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff], "asterix.063_SP", "" ) ''' validator.check_dissections() class TestCategory065: ''' Unittest case for ASTERIX Category 065 Online specification: https://www.eurocontrol.int/publications/cat065-surveillance-data-processing-system-sdps-service-status-messages-part-15 https://www.eurocontrol.int/publications/cat065-coding-rules-reserved-expansion-field-part-15-appendix Part 15 Category 65 (1.4) SDPS Service Status Reports Standard User Application Profile FRN Data Item Information Length 1 I065/010 Data Source Identifier 2 2 I065/000 Message Type 1 3 I065/015 Service Identification 1 4 I065/030 Time of Message 3 5 I065/020 Batch Number 1 6 I065/040 SDPS Configuration and Status 1 7 I065/050 Service Status Report 1 FX - Field extension indicator - 8 - Spare - 9 - Spare - 10 - Spare - 11 - Spare - 12 - Spare - 13 RE Reserved Expansion Field 1+1+ 14 SP Special Purpose Field 1+1+ FX - Field extension indicator - ''' def test_for_fields(self, asterix_validator): '''verifies existence of all fields and their maximum value''' validator = asterix_validator(65) validator.add_dissection( [0x80, 0xff, 0x00], "asterix.065_010", { "asterix.065_010_SAC": "0xff", "asterix.065_010_SIC": "0x00" } ) validator.add_dissection( [0x80, 0x00, 0xff], "asterix.065_010", { "asterix.065_010_SAC": "0x00", "asterix.065_010_SIC": "0xff" } ) validator.add_dissection( [0x40, 0x03], "asterix.065_000", { "asterix.065_000_VALUE": "3" } ) validator.add_dissection( [0x20, 0xff], "asterix.065_015", { "asterix.065_015_VALUE": "0xff" } ) validator.add_dissection( [0x10, 0xa8, 0xbf, 0xff], "asterix.065_030", { "asterix.065_030_VALUE": "86399.9921875" } ) validator.add_dissection( [0x08, 0xff], "asterix.065_020", { "asterix.065_020_VALUE": "255" } ) validator.add_dissection( [0x04, 0xc0], "asterix.065_040", { "asterix.065_040_NOGO": "3", "asterix.065_040_OVL": "0", "asterix.065_040_TSV": "0", "asterix.065_040_PSS": "0", "asterix.065_040_STTN": "0" } ) validator.add_dissection( [0x04, 0x20], "asterix.065_040", { "asterix.065_040_NOGO": "0", "asterix.065_040_OVL": "1", "asterix.065_040_TSV": "0", "asterix.065_040_PSS": "0", "asterix.065_040_STTN": "0" } ) validator.add_dissection( [0x04, 0x10], "asterix.065_040", { "asterix.065_040_NOGO": "0", "asterix.065_040_OVL": "0", "asterix.065_040_TSV": "1", "asterix.065_040_PSS": "0", "asterix.065_040_STTN": "0" } ) validator.add_dissection( [0x04, 0x0c], "asterix.065_040", { "asterix.065_040_NOGO": "0", "asterix.065_040_OVL": "0", "asterix.065_040_TSV": "0", "asterix.065_040_PSS": "3", "asterix.065_040_STTN": "0" } ) validator.add_dissection( [0x04, 0x02], "asterix.065_040", { "asterix.065_040_NOGO": "0", "asterix.065_040_OVL": "0", "asterix.065_040_TSV": "0", "asterix.065_040_PSS": "0", "asterix.065_040_STTN": "1" } ) validator.add_dissection( [0x02, 0xff], "asterix.065_050", { "asterix.065_050_VALUE": "255" } ) '''TODO: re-enable RE and SP tests when implemented validator.add_dissection( [0x01, 0x04, 0x02, 0x00], "asterix.065_RE", { "asterix.re_field_len": "2", "asterix.fspec": "" } ) validator.add_dissection( [0x01, 0x04, 0x0a, 0x80, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], "asterix.065_RE", { "asterix.re_field_len": "10", "asterix.fspec": "", "asterix.065_RE_SRP": { "asterix.065_RE_SRP_Latitude": "90", "asterix.065_RE_SRP_Longitude": "0" } } ) validator.add_dissection( [0x01, 0x04, 0x0a, 0x80, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], "asterix.065_RE", { "asterix.re_field_len": "10", "asterix.fspec": "", "asterix.065_RE_SRP": { "asterix.065_RE_SRP_Latitude": "-90", "asterix.065_RE_SRP_Longitude": "0" } } ) validator.add_dissection( [0x01, 0x04, 0x0a, 0x80, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00], "asterix.065_RE", { "asterix.re_field_len": "10", "asterix.fspec": "", "asterix.065_RE_SRP": { "asterix.065_RE_SRP_Latitude": "0", "asterix.065_RE_SRP_Longitude": "180" } } ) validator.add_dissection( [0x01, 0x04, 0x0a, 0x80, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00], "asterix.065_RE", { "asterix.re_field_len": "10", "asterix.fspec": "", "asterix.065_RE_SRP": { "asterix.065_RE_SRP_Latitude": "0", "asterix.065_RE_SRP_Longitude": "-180" } } ) validator.add_dissection( [0x01, 0x04, 0x04, 0x40, 0xff, 0xfc], "asterix.065_RE", { "asterix.re_field_len": "4", "asterix.fspec": "", "asterix.065_RE_ARL": { "asterix.065_RE_ARL_ARL": "65532" } } ) ''' validator.check_dissections() def test_undefined_value_handling(self, asterix_validator): '''verifies that the dissector can dissect undefined field values by setting the maximum value of bits or by setting all undefined bits''' validator = asterix_validator(65) validator.add_dissection( [0x40, 0xff], "asterix.065_000", { "asterix.065_000_VALUE": "255" } ) validator.add_dissection( [0x10, 0xff, 0xff, 0xff], "asterix.065_030", { "asterix.065_030_VALUE": "131071.9921875" } ) validator.add_dissection( [0x04, 0x01], "asterix.065_040", { "asterix.065_040_NOGO": "0", "asterix.065_040_OVL": "0", "asterix.065_040_TSV": "0", "asterix.065_040_PSS": "0", "asterix.065_040_STTN": "0" } ) validator.add_dissection( [0x01, 0x80], "asterix.spare", "" ) validator.add_dissection( [0x01, 0x40], "asterix.spare", "" ) validator.add_dissection( [0x01, 0x20], "asterix.spare", "" ) validator.add_dissection( [0x01, 0x10], "asterix.spare", "" ) validator.add_dissection( [0x01, 0x08], "asterix.spare", "" ) '''TODO: re-enable RE and SP tests when implemented validator.add_dissection( [0x01, 0x04, 0x0a, 0x80, 0x7f, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00], "asterix.065_RE", { "asterix.re_field_len": "10", "asterix.fspec": "", "asterix.065_RE_SRP": { "asterix.065_RE_SRP_Latitude": "359.999999832362", "asterix.065_RE_SRP_Longitude": "0" } } ) validator.add_dissection( [0x01, 0x04, 0x0a, 0x80, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], "asterix.065_RE", { "asterix.re_field_len": "10", "asterix.fspec": "", "asterix.065_RE_SRP": { "asterix.065_RE_SRP_Latitude": "-360", "asterix.065_RE_SRP_Longitude": "0" } } ) validator.add_dissection( [0x01, 0x04, 0x0a, 0x80, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xff, 0xff, 0xff], "asterix.065_RE", { "asterix.re_field_len": "10", "asterix.fspec": "", "asterix.065_RE_SRP": { "asterix.065_RE_SRP_Latitude": "0", "asterix.065_RE_SRP_Longitude": "359.999999832362" } } ) validator.add_dissection( [0x01, 0x04, 0x0a, 0x80, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00], "asterix.065_RE", { "asterix.re_field_len": "10", "asterix.fspec": "", "asterix.065_RE_SRP": { "asterix.065_RE_SRP_Latitude": "0", "asterix.065_RE_SRP_Longitude": "-360" } } ) validator.add_dissection( [0x01, 0x04, 0x04, 0x40, 0xff, 0xff], "asterix.065_RE", { "asterix.re_field_len": "4", "asterix.fspec": "", "asterix.065_RE_ARL": { "asterix.065_RE_ARL_ARL": "65535" } } ) validator.add_dissection( [0x01, 0x02, 0x01], "asterix.065_SP", "" ) validator.add_dissection( [0x01, 0x02, 0x10, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff], "asterix.065_SP", "" ) ''' validator.check_dissections()
Python
wireshark/test/suite_dissectors/group_netperfmeter.py
# # Wireshark tests # # Copyright 2021 by Thomas Dreibholz <dreibh [AT] simula.no> # # SPDX-License-Identifier: GPL-2.0-or-later # '''NetPerfMeter tests''' import subprocess import pytest class TestNetperfmeter: def test_netperfmeter_test_control(self, cmd_tshark, capture_file, test_env): '''Checks whether the NetPerfMeter dissector correctly handles NetPerfMeter Control via SCTP.''' # Test: Identify and decode NetPerfMeter Control via SCTP stdout = subprocess.check_output((cmd_tshark, '-r', capture_file('netperfmeter.pcapng.gz'), '-Y', 'sctp && netperfmeter && ((netperfmeter.message_type != 5) && (netperfmeter.message_type != 4))' ), encoding='utf-8', env=test_env) result = ''.join([x.strip()+"\n" for x in stdout.splitlines()]) assert """\ 8 0.019316433 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 260 NetPerfMeter Add Flow 10 0.038537718 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 88 NetPerfMeter Acknowledge 14 0.326752277 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 88 NetPerfMeter Acknowledge 18 0.333703948 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 276 SACK (Ack=1, Arwnd=106496) NetPerfMeter Add Flow 19 0.340092259 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=1, Arwnd=106496) NetPerfMeter Acknowledge 23 0.547510935 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 88 NetPerfMeter Acknowledge 24 0.548336846 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 276 SACK (Ack=3, Arwnd=106496) NetPerfMeter Add Flow 25 0.556582544 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=2, Arwnd=106496) NetPerfMeter Acknowledge 28 0.768799828 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 88 NetPerfMeter Acknowledge 29 0.769562835 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 276 SACK (Ack=5, Arwnd=106496) NetPerfMeter Add Flow 30 0.777872331 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=3, Arwnd=106496) NetPerfMeter Acknowledge 33 0.986925179 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 88 NetPerfMeter Acknowledge 37 0.992962317 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 276 SACK (Ack=7, Arwnd=106496) NetPerfMeter Add Flow 38 1.000163511 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=4, Arwnd=106496) NetPerfMeter Acknowledge 41 1.245101828 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 88 NetPerfMeter Acknowledge 45 1.248598897 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 276 SACK (Ack=9, Arwnd=106496) NetPerfMeter Add Flow 46 1.257101874 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=5, Arwnd=106496) NetPerfMeter Acknowledge 49 1.502117462 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 88 NetPerfMeter Acknowledge 53 1.509411259 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 276 SACK (Ack=11, Arwnd=106496) NetPerfMeter Add Flow 54 1.518356124 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=6, Arwnd=106496) NetPerfMeter Acknowledge 57 1.762124577 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 88 NetPerfMeter Acknowledge 61 1.768546288 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 276 SACK (Ack=13, Arwnd=106496) NetPerfMeter Add Flow 62 1.776275446 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=7, Arwnd=106496) NetPerfMeter Acknowledge 65 1.996204594 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 88 NetPerfMeter Acknowledge 69 2.003084950 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 276 SACK (Ack=15, Arwnd=106496) NetPerfMeter Add Flow 70 2.012723649 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=8, Arwnd=106496) NetPerfMeter Acknowledge 73 2.253277911 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 88 NetPerfMeter Acknowledge 77 2.259089003 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 276 SACK (Ack=17, Arwnd=106496) NetPerfMeter Add Flow 78 2.267758027 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=9, Arwnd=106496) NetPerfMeter Acknowledge 81 2.513148441 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 88 NetPerfMeter Acknowledge 85 2.519444777 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 276 SACK (Ack=19, Arwnd=106496) NetPerfMeter Add Flow 86 2.526479512 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=10, Arwnd=106496) NetPerfMeter Acknowledge 89 2.772395957 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 88 NetPerfMeter Acknowledge 93 2.781575331 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 276 SACK (Ack=21, Arwnd=106496) NetPerfMeter Add Flow 94 2.789065601 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=11, Arwnd=106496) NetPerfMeter Acknowledge 97 2.998736571 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 88 NetPerfMeter Acknowledge 101 3.005046187 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 276 SACK (Ack=23, Arwnd=106496) NetPerfMeter Add Flow 102 3.011025634 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=12, Arwnd=106496) NetPerfMeter Acknowledge 105 3.255120658 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 88 NetPerfMeter Acknowledge 109 3.262979723 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 276 SACK (Ack=25, Arwnd=106496) NetPerfMeter Add Flow 110 3.270638348 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=13, Arwnd=106496) NetPerfMeter Acknowledge 113 3.518145868 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 88 NetPerfMeter Acknowledge 118 3.536880998 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 276 SACK (Ack=27, Arwnd=106496) NetPerfMeter Add Flow 119 3.541489068 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=14, Arwnd=106496) NetPerfMeter Acknowledge 123 3.776536632 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 88 NetPerfMeter Acknowledge 124 3.777268092 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 276 SACK (Ack=29, Arwnd=106496) NetPerfMeter Add Flow 125 3.784200653 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=15, Arwnd=106496) NetPerfMeter Acknowledge 128 3.995220129 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 88 NetPerfMeter Acknowledge 129 3.995907203 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 276 SACK (Ack=31, Arwnd=106496) NetPerfMeter Add Flow 131 4.006264635 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=16, Arwnd=106496) NetPerfMeter Acknowledge 135 4.215292054 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 88 NetPerfMeter Acknowledge 136 4.216018889 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 276 SACK (Ack=33, Arwnd=106496) NetPerfMeter Add Flow 137 4.222906817 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=17, Arwnd=106496) NetPerfMeter Acknowledge 141 4.430858169 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 88 NetPerfMeter Acknowledge 142 4.431619137 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 276 SACK (Ack=35, Arwnd=106496) NetPerfMeter Add Flow 143 4.439186831 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=18, Arwnd=106496) NetPerfMeter Acknowledge 147 4.647960736 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 88 NetPerfMeter Acknowledge 148 4.648753903 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 276 SACK (Ack=37, Arwnd=106496) NetPerfMeter Add Flow 149 4.654062259 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=19, Arwnd=106496) NetPerfMeter Acknowledge 153 4.861696359 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 88 NetPerfMeter Acknowledge 158 4.881874024 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 276 SACK (Ack=39, Arwnd=106496) NetPerfMeter Add Flow 159 4.886932549 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=20, Arwnd=106496) NetPerfMeter Acknowledge 163 5.095411239 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 88 NetPerfMeter Acknowledge 184 5.101147570 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 96 SACK (Ack=41, Arwnd=106496) NetPerfMeter Start Measurement 227 5.315482367 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 88 NetPerfMeter Acknowledge 2084 15.615367349 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 80 NetPerfMeter Stop Measurement 2086 16.091680420 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 88 NetPerfMeter Acknowledge 2087 16.092542043 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2088 16.092542469 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2089 16.092542579 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2090 16.092542691 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2093 16.098744445 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2095 16.099492702 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2096 16.099493075 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2097 16.099493204 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2098 16.099493337 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2101 16.108240278 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2102 16.109665125 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2103 16.109665219 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2104 16.109665258 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2105 16.109665298 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2106 16.109665335 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2107 16.109665374 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2108 16.109665413 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2109 16.109665451 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2114 16.115534573 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2116 16.117085522 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2117 16.117085740 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2118 16.117085774 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2119 16.117085808 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2120 16.117085841 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2121 16.117085874 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2122 16.117085906 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2123 16.117085940 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2125 16.117208639 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2129 16.117847682 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2131 16.120936939 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2132 16.121564917 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2134 16.124001266 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2135 16.126359615 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2136 16.126359784 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2137 16.126359829 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2138 16.126359875 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2139 16.126359923 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2140 16.126359972 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2141 16.126360016 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2142 16.126360065 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2144 16.126516782 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2145 16.126516838 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2150 16.126568776 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2151 16.126568857 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2152 16.126568903 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2153 16.126568947 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1088 NetPerfMeter Results 2154 16.126568990 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2155 16.126569037 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2156 16.126569084 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2162 16.128296076 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2163 16.128991998 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2164 16.128992266 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2166 16.132186659 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2168 16.133696852 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2169 16.133697204 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2170 16.133697304 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2171 16.133697400 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2172 16.133697505 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2175 16.136109923 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2177 16.138000289 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2178 16.138000795 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2179 16.138000952 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2180 16.138001087 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2181 16.138001222 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2182 16.138001355 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2183 16.138001497 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2184 16.138001654 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2189 16.138407582 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2190 16.138407852 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2191 16.138407948 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1360 NetPerfMeter Results 2193 16.138949169 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 100 SACK (Ack=113, Arwnd=106496) NetPerfMeter Remove Flow 2194 16.147965640 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=23, Arwnd=106496) NetPerfMeter Acknowledge 2195 16.149160472 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2197 16.149694877 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2199 16.359112863 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 400 NetPerfMeter Results 2200 16.360439472 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 100 SACK (Ack=117, Arwnd=106496) NetPerfMeter Remove Flow 2201 16.367838301 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=24, Arwnd=106496) NetPerfMeter Acknowledge 2202 16.369999711 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2204 16.370249698 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 252 NetPerfMeter Results 2205 16.371333521 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 100 SACK (Ack=120, Arwnd=106496) NetPerfMeter Remove Flow 2206 16.377931209 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=25, Arwnd=106496) NetPerfMeter Acknowledge 2207 16.379416052 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2209 16.379921676 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2211 16.586758032 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 460 NetPerfMeter Results 2212 16.588004878 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 100 SACK (Ack=124, Arwnd=106496) NetPerfMeter Remove Flow 2213 16.596287178 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=26, Arwnd=106496) NetPerfMeter Acknowledge 2214 16.600862615 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2216 16.601572074 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 256 NetPerfMeter Results 2217 16.602770488 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 100 SACK (Ack=127, Arwnd=106496) NetPerfMeter Remove Flow 2218 16.608528578 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=27, Arwnd=106496) NetPerfMeter Acknowledge 2219 16.610851595 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2221 16.611228721 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2224 16.820428495 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 368 NetPerfMeter Results 2226 16.821725312 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 100 SACK (Ack=131, Arwnd=106496) NetPerfMeter Remove Flow 2227 16.829665670 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=28, Arwnd=106496) NetPerfMeter Acknowledge 2228 16.831477557 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2230 16.831711400 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 276 NetPerfMeter Results 2233 16.832859448 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 100 SACK (Ack=134, Arwnd=106496) NetPerfMeter Remove Flow 2235 16.838963861 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=29, Arwnd=106496) NetPerfMeter Acknowledge 2236 16.839917250 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2238 16.841055807 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 236 NetPerfMeter Results 2241 16.842312060 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 100 SACK (Ack=137, Arwnd=106496) NetPerfMeter Remove Flow 2243 16.847748197 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=30, Arwnd=106496) NetPerfMeter Acknowledge 2244 16.848933463 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2246 16.849525492 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 236 NetPerfMeter Results 2249 16.850661714 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 100 SACK (Ack=140, Arwnd=106496) NetPerfMeter Remove Flow 2251 16.857615760 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=31, Arwnd=106496) NetPerfMeter Acknowledge 2252 16.859140443 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2254 16.859653107 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 976 NetPerfMeter Results 2257 16.860923512 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 100 SACK (Ack=143, Arwnd=106496) NetPerfMeter Remove Flow 2259 16.866293943 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=32, Arwnd=106496) NetPerfMeter Acknowledge 2260 16.867822941 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2262 16.868668201 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2265 17.079265007 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 368 NetPerfMeter Results 2267 17.080555093 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 100 SACK (Ack=147, Arwnd=106496) NetPerfMeter Remove Flow 2268 17.089928582 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=33, Arwnd=106496) NetPerfMeter Acknowledge 2269 17.091479195 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2271 17.092073003 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 272 NetPerfMeter Results 2274 17.093044526 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 100 SACK (Ack=150, Arwnd=106496) NetPerfMeter Remove Flow 2276 17.099098185 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=34, Arwnd=106496) NetPerfMeter Acknowledge 2277 17.100201203 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2279 17.100852674 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 244 NetPerfMeter Results 2282 17.101916382 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 100 SACK (Ack=153, Arwnd=106496) NetPerfMeter Remove Flow 2284 17.109026614 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=35, Arwnd=106496) NetPerfMeter Acknowledge 2285 17.112907819 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2287 17.115302865 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 244 NetPerfMeter Results 2290 17.116443045 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 100 SACK (Ack=156, Arwnd=106496) NetPerfMeter Remove Flow 2292 17.122058351 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=36, Arwnd=106496) NetPerfMeter Acknowledge 2293 17.125840461 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2295 17.126459769 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 976 NetPerfMeter Results 2297 17.126760188 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 100 SACK (Ack=159, Arwnd=106496) NetPerfMeter Remove Flow 2300 17.132579296 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=37, Arwnd=106496) NetPerfMeter Acknowledge 2301 17.133301477 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2302 17.133302153 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 284 NetPerfMeter Results 2304 17.133706810 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 100 SACK (Ack=162, Arwnd=106496) NetPerfMeter Remove Flow 2305 17.138731552 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=38, Arwnd=106496) NetPerfMeter Acknowledge 2306 17.139818471 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2308 17.140335127 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 288 NetPerfMeter Results 2309 17.140830809 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 100 SACK (Ack=165, Arwnd=106496) NetPerfMeter Remove Flow 2310 17.145622016 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=39, Arwnd=106496) NetPerfMeter Acknowledge 2311 17.147059541 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2313 17.148571671 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2314 17.149475099 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2316 17.150223037 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2318 17.359940788 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 740 NetPerfMeter Results 2319 17.361102522 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 100 SACK (Ack=171, Arwnd=106496) NetPerfMeter Remove Flow 2320 17.368203507 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=40, Arwnd=106496) NetPerfMeter Acknowledge 2321 17.370823736 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2323 17.371236232 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 280 NetPerfMeter Results 2324 17.372205596 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 100 SACK (Ack=174, Arwnd=106496) NetPerfMeter Remove Flow 2325 17.378113171 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=41, Arwnd=106496) NetPerfMeter Acknowledge 2326 17.379408121 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2328 17.379940226 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 284 NetPerfMeter Results 2329 17.380772832 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 100 SACK (Ack=177, Arwnd=106496) NetPerfMeter Remove Flow 2330 17.389000119 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=42, Arwnd=106496) NetPerfMeter Acknowledge 2331 17.389893116 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2332 17.389893325 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 380 NetPerfMeter Results 2334 17.390667295 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 100 SACK (Ack=180, Arwnd=106496) NetPerfMeter Remove Flow 2335 17.395701306 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 104 SACK (Ack=43, Arwnd=106496) NetPerfMeter Acknowledge 2336 17.397791412 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1468 NetPerfMeter Results 2338 17.398332887 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 284 NetPerfMeter Results """.replace("\r\n", "\n") in result def test_netperfmeter_test_udp(self, cmd_tshark, capture_file, test_env): '''Checks whether the NetPerfMeter dissector correctly handles NetPerfMeter Data via UDP.''' # Test: Identify and decode NetPerfMeter Data via UDP stdout = subprocess.check_output((cmd_tshark, '-r', capture_file('netperfmeter.pcapng.gz'), '-Y', 'frame.number >= 1 && frame.number <= 512 && udp && netperfmeter' ), encoding='utf-8', env=test_env) result = ''.join([x.strip()+"\n" for x in stdout.splitlines()]) assert """\ 26 0.556893098 192.168.0.20 → 192.168.0.27 UDP, NetPerfMeter 70 NetPerfMeter Identify Flow 31 0.778199411 192.168.0.20 → 192.168.0.27 UDP, NetPerfMeter 70 NetPerfMeter Identify Flow 166 5.097058561 192.168.0.20 → 192.168.0.27 UDP, NetPerfMeter 1068 NetPerfMeter Data 167 5.097156368 192.168.0.20 → 192.168.0.27 UDP, NetPerfMeter 556 NetPerfMeter Data 203 5.188581678 192.168.0.27 → 192.168.0.20 UDP, NetPerfMeter 1068 NetPerfMeter Data 204 5.198869201 192.168.0.27 → 192.168.0.20 UDP, NetPerfMeter 556 NetPerfMeter Data 229 5.347412858 192.168.0.20 → 192.168.0.27 UDP, NetPerfMeter 1068 NetPerfMeter Data 248 5.521667162 192.168.0.27 → 192.168.0.20 UDP, NetPerfMeter 1068 NetPerfMeter Data 249 5.529727434 192.168.0.27 → 192.168.0.20 UDP, NetPerfMeter 556 NetPerfMeter Data 251 5.597939044 192.168.0.20 → 192.168.0.27 UDP, NetPerfMeter 1068 NetPerfMeter Data 252 5.597979296 192.168.0.20 → 192.168.0.27 UDP, NetPerfMeter 556 NetPerfMeter Data 315 5.848599107 192.168.0.20 → 192.168.0.27 UDP, NetPerfMeter 1068 NetPerfMeter Data 326 5.869626418 192.168.0.27 → 192.168.0.20 UDP, NetPerfMeter 1068 NetPerfMeter Data 327 5.870477253 192.168.0.27 → 192.168.0.20 UDP, NetPerfMeter 556 NetPerfMeter Data 336 6.099006262 192.168.0.20 → 192.168.0.27 UDP, NetPerfMeter 1068 NetPerfMeter Data 337 6.099035694 192.168.0.20 → 192.168.0.27 UDP, NetPerfMeter 556 NetPerfMeter Data 374 6.239221234 192.168.0.27 → 192.168.0.20 UDP, NetPerfMeter 1068 NetPerfMeter Data 375 6.240243736 192.168.0.27 → 192.168.0.20 UDP, NetPerfMeter 556 NetPerfMeter Data 406 6.349592731 192.168.0.20 → 192.168.0.27 UDP, NetPerfMeter 1068 NetPerfMeter Data 429 6.538916191 192.168.0.27 → 192.168.0.20 UDP, NetPerfMeter 1068 NetPerfMeter Data 430 6.540208385 192.168.0.27 → 192.168.0.20 UDP, NetPerfMeter 556 NetPerfMeter Data 438 6.600112279 192.168.0.20 → 192.168.0.27 UDP, NetPerfMeter 556 NetPerfMeter Data 439 6.600127896 192.168.0.20 → 192.168.0.27 UDP, NetPerfMeter 1068 NetPerfMeter Data 499 6.850796522 192.168.0.20 → 192.168.0.27 UDP, NetPerfMeter 1068 NetPerfMeter Data 509 6.874579699 192.168.0.27 → 192.168.0.20 UDP, NetPerfMeter 1068 NetPerfMeter Data 510 6.875289205 192.168.0.27 → 192.168.0.20 UDP, NetPerfMeter 556 NetPerfMeter Data """.replace("\r\n", "\n") in result def test_netperfmeter_test_dccp(self, cmd_tshark, capture_file, test_env): '''Checks whether the NetPerfMeter dissector correctly handles NetPerfMeter Data via DCCP.''' # Test: Identify and decode NetPerfMeter Data via DCCP stdout = subprocess.check_output((cmd_tshark, '-r', capture_file('netperfmeter.pcapng.gz'), '-Y', 'frame.number >= 1 && frame.number <= 256 && dccp && netperfmeter' ), encoding='utf-8', env=test_env) result = ''.join([x.strip()+"\n" for x in stdout.splitlines()]) assert """\ 39 1.000448305 192.168.0.20 → 192.168.0.27 DCCP, NetPerfMeter 106 NetPerfMeter Identify Flow 47 1.257376250 192.168.0.20 → 192.168.0.27 DCCP, NetPerfMeter 106 NetPerfMeter Identify Flow 55 1.518626642 192.168.0.20 → 192.168.0.27 DCCP, NetPerfMeter 106 NetPerfMeter Identify Flow 63 1.776552210 192.168.0.20 → 192.168.0.27 DCCP, NetPerfMeter 106 NetPerfMeter Identify Flow 71 2.013038051 192.168.0.20 → 192.168.0.27 DCCP, NetPerfMeter 106 NetPerfMeter Identify Flow 79 2.268029558 192.168.0.20 → 192.168.0.27 DCCP, NetPerfMeter 106 NetPerfMeter Identify Flow 87 2.526765502 192.168.0.20 → 192.168.0.27 DCCP, NetPerfMeter 106 NetPerfMeter Identify Flow 95 2.789401573 192.168.0.20 → 192.168.0.27 DCCP, NetPerfMeter 106 NetPerfMeter Identify Flow 103 3.011188128 192.168.0.20 → 192.168.0.27 DCCP, NetPerfMeter 106 NetPerfMeter Identify Flow 111 3.270945041 192.168.0.20 → 192.168.0.27 DCCP, NetPerfMeter 106 NetPerfMeter Identify Flow 168 5.097388740 192.168.0.20 → 192.168.0.27 DCCP, NetPerfMeter 1112 NetPerfMeter Data 169 5.097563303 192.168.0.20 → 192.168.0.27 DCCP, NetPerfMeter 1112 NetPerfMeter Data 170 5.097680252 192.168.0.20 → 192.168.0.27 DCCP, NetPerfMeter 600 NetPerfMeter Data 171 5.097804675 192.168.0.20 → 192.168.0.27 DCCP, NetPerfMeter 600 NetPerfMeter Data 172 5.097860862 192.168.0.20 → 192.168.0.27 DCCP, NetPerfMeter 336 NetPerfMeter Data 173 5.097960425 192.168.0.20 → 192.168.0.27 DCCP, NetPerfMeter 1104 NetPerfMeter Data 174 5.098168605 192.168.0.20 → 192.168.0.27 DCCP, NetPerfMeter 1104 NetPerfMeter Data 175 5.098268064 192.168.0.20 → 192.168.0.27 DCCP, NetPerfMeter 592 NetPerfMeter Data 176 5.098379939 192.168.0.20 → 192.168.0.27 DCCP, NetPerfMeter 592 NetPerfMeter Data 177 5.098474409 192.168.0.20 → 192.168.0.27 DCCP, NetPerfMeter 336 NetPerfMeter Data 205 5.203489906 192.168.0.27 → 192.168.0.20 DCCP, NetPerfMeter 1088 NetPerfMeter Data 206 5.208120579 192.168.0.27 → 192.168.0.20 DCCP, NetPerfMeter 1088 NetPerfMeter Data 207 5.211621270 192.168.0.27 → 192.168.0.20 DCCP, NetPerfMeter 576 NetPerfMeter Data 208 5.216629302 192.168.0.27 → 192.168.0.20 DCCP, NetPerfMeter 576 NetPerfMeter Data 209 5.218637208 192.168.0.27 → 192.168.0.20 DCCP, NetPerfMeter 320 NetPerfMeter Data 210 5.220923234 192.168.0.27 → 192.168.0.20 DCCP, NetPerfMeter 1088 NetPerfMeter Data 211 5.224470647 192.168.0.27 → 192.168.0.20 DCCP, NetPerfMeter 1088 NetPerfMeter Data 212 5.228633904 192.168.0.27 → 192.168.0.20 DCCP, NetPerfMeter 576 NetPerfMeter Data 213 5.235096316 192.168.0.27 → 192.168.0.20 DCCP, NetPerfMeter 576 NetPerfMeter Data 214 5.235387030 192.168.0.27 → 192.168.0.20 DCCP, NetPerfMeter 320 NetPerfMeter Data 230 5.347723929 192.168.0.20 → 192.168.0.27 DCCP, NetPerfMeter 1088 NetPerfMeter Data 231 5.348299245 192.168.0.20 → 192.168.0.27 DCCP, NetPerfMeter 1088 NetPerfMeter Data 236 5.432621676 192.168.0.20 → 192.168.0.27 DCCP, NetPerfMeter 320 NetPerfMeter Data 237 5.433090508 192.168.0.20 → 192.168.0.27 DCCP, NetPerfMeter 320 NetPerfMeter Data 238 5.458215001 192.168.0.27 → 192.168.0.20 DCCP, NetPerfMeter 1104 NetPerfMeter Data 240 5.472252869 192.168.0.27 → 192.168.0.20 DCCP, NetPerfMeter 1104 NetPerfMeter Data 250 5.597889485 192.168.0.20 → 192.168.0.27 DCCP, NetPerfMeter 1096 NetPerfMeter Data 255 5.598126766 192.168.0.20 → 192.168.0.27 DCCP, NetPerfMeter 1088 NetPerfMeter Data 256 5.598378615 192.168.0.20 → 192.168.0.27 DCCP, NetPerfMeter 576 NetPerfMeter Data """.replace("\r\n", "\n") in result def test_netperfmeter_test_tcp(self, cmd_tshark, capture_file, test_env): '''Checks whether the NetPerfMeter dissector correctly handles NetPerfMeter Data via TCP.''' # Test: Identify and decode NetPerfMeter Data via TCP stdout = subprocess.check_output((cmd_tshark, '-r', capture_file('netperfmeter.pcapng.gz'), '-Y', 'frame.number >= 1 && frame.number <= 512 && tcp && netperfmeter' ), encoding='utf-8', env=test_env) result = ''.join([x.strip()+"\n" for x in stdout.splitlines()]) assert """\ 12 0.038833197 192.168.0.20 → 192.168.0.27 TCP, NetPerfMeter 94 NetPerfMeter Identify Flow 20 0.340423798 192.168.0.20 → 192.168.0.27 TCP, NetPerfMeter 94 NetPerfMeter Identify Flow 164 5.096822593 192.168.0.20 → 192.168.0.27 TCP, NetPerfMeter 1092 NetPerfMeter Data 165 5.096933125 192.168.0.20 → 192.168.0.27 TCP, NetPerfMeter 580 NetPerfMeter Data 199 5.180197902 192.168.0.27 → 192.168.0.20 TCP, NetPerfMeter 1092 NetPerfMeter Data 201 5.183618768 192.168.0.27 → 192.168.0.20 TCP, NetPerfMeter 580 NetPerfMeter Data 228 5.347212980 192.168.0.20 → 192.168.0.27 TCP, NetPerfMeter 1092 NetPerfMeter Data 243 5.510843364 192.168.0.27 → 192.168.0.20 TCP, NetPerfMeter 1092 NetPerfMeter Data 246 5.518285725 192.168.0.27 → 192.168.0.20 TCP, NetPerfMeter 580 NetPerfMeter Data 253 5.598004664 192.168.0.20 → 192.168.0.27 TCP, NetPerfMeter 580 NetPerfMeter Data 254 5.598037007 192.168.0.20 → 192.168.0.27 TCP, NetPerfMeter 1092 NetPerfMeter Data 313 5.843608886 192.168.0.27 → 192.168.0.20 TCP, NetPerfMeter 1092 NetPerfMeter Data 316 5.848649435 192.168.0.20 → 192.168.0.27 TCP, NetPerfMeter 1092 NetPerfMeter Data 320 5.852294838 192.168.0.27 → 192.168.0.20 TCP, NetPerfMeter 580 NetPerfMeter Data 335 6.098962324 192.168.0.20 → 192.168.0.27 TCP, NetPerfMeter 580 NetPerfMeter Data 342 6.099194942 192.168.0.20 → 192.168.0.27 TCP, NetPerfMeter 1092 NetPerfMeter Data 370 6.178557080 192.168.0.27 → 192.168.0.20 TCP, NetPerfMeter 1092 NetPerfMeter Data 372 6.186668259 192.168.0.27 → 192.168.0.20 TCP, NetPerfMeter 580 NetPerfMeter Data 408 6.349677977 192.168.0.20 → 192.168.0.27 TCP, NetPerfMeter 1092 NetPerfMeter Data 425 6.512522597 192.168.0.27 → 192.168.0.20 TCP, NetPerfMeter 1092 NetPerfMeter Data 427 6.521373219 192.168.0.27 → 192.168.0.20 TCP, NetPerfMeter 580 NetPerfMeter Data 436 6.600056667 192.168.0.20 → 192.168.0.27 TCP, NetPerfMeter 580 NetPerfMeter Data 441 6.600170332 192.168.0.20 → 192.168.0.27 TCP, NetPerfMeter 1092 NetPerfMeter Data 497 6.846781911 192.168.0.27 → 192.168.0.20 TCP, NetPerfMeter 1092 NetPerfMeter Data 502 6.850917051 192.168.0.20 → 192.168.0.27 TCP, NetPerfMeter 1092 NetPerfMeter Data 507 6.857231771 192.168.0.27 → 192.168.0.20 TCP, NetPerfMeter 580 NetPerfMeter Data """.replace("\r\n", "\n") in result def test_netperfmeter_test_sctp(self, cmd_tshark, capture_file, test_env): '''Checks whether the NetPerfMeter dissector correctly handles NetPerfMeter Data via SCTP.''' # Test: Identify and decode NetPerfMeter Data via SCTP stdout = subprocess.check_output((cmd_tshark, '-r', capture_file('netperfmeter.pcapng.gz'), '-Y', 'frame.number >= 1 && frame.number <= 256 && sctp && netperfmeter && ((netperfmeter.message_type == 5) || (netperfmeter.message_type == 4))' ), encoding='utf-8', env=test_env) result = ''.join([x.strip()+"\n" for x in stdout.splitlines()]) assert """\ 120 3.541753666 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 92 NetPerfMeter Identify Flow 126 3.784578040 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 92 NetPerfMeter Identify Flow 132 4.006622016 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 92 NetPerfMeter Identify Flow 138 4.223204664 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 92 NetPerfMeter Identify Flow 144 4.439513544 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 92 NetPerfMeter Identify Flow 150 4.654398275 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 92 NetPerfMeter Identify Flow 160 4.887196553 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 92 NetPerfMeter Identify Flow 178 5.098706269 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 1088 NetPerfMeter Data 180 5.098939899 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 660 NetPerfMeter Data 181 5.099244178 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter, NetPerfMeter 1232 NetPerfMeter Data NetPerfMeter Data 182 5.099428646 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 1088 NetPerfMeter Data 183 5.099642887 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 1088 NetPerfMeter Data 215 5.242589734 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1104 SACK (Ack=11, Arwnd=106496) NetPerfMeter Data 216 5.242748399 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter, NetPerfMeter 1248 SACK (Ack=0, Arwnd=211968) NetPerfMeter Data NetPerfMeter Data 218 5.247412901 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 680 NetPerfMeter Data 220 5.252114400 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 208 SACK (Ack=13, Arwnd=105344) NetPerfMeter Data 221 5.266387026 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1088 NetPerfMeter Data 223 5.266637245 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1088 NetPerfMeter Data 224 5.273527654 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1104 SACK (Ack=1, Arwnd=106496) NetPerfMeter Data 232 5.349726358 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 208 SACK (Ack=5, Arwnd=212992) NetPerfMeter Data 235 5.355361743 192.168.0.27 → 192.168.0.20 SCTP, NetPerfMeter 1104 SACK (Ack=14, Arwnd=106368) NetPerfMeter Data 242 5.475302128 192.168.0.20 → 192.168.0.27 SCTP, NetPerfMeter 208 SACK (Ack=6, Arwnd=212992) NetPerfMeter Data """.replace("\r\n", "\n") in result
Shell Script
wireshark/tools/alpine-setup.sh
#!/bin/bash # Setup development environment on alpine systems # # Wireshark - Network traffic analyzer # By Gerald Combs <[email protected]> # Copyright 1998 Gerald Combs # # SPDX-License-Identifier: GPL-2.0-or-later # # We drag in tools that might not be needed by all users; it's easier # that way. # set -e -u -o pipefail function print_usage() { printf "\\nUtility to setup a alpine system for Wireshark Development.\\n" printf "The basic usage installs the needed software\\n\\n" printf "Usage: %s [--install-optional] [...other options...]\\n" "$0" printf "\\t--install-optional: install optional software as well\\n" printf "\\t--install-all: install everything\\n" printf "\\t[other]: other options are passed as-is to apk\\n" } ADDITIONAL=0 OPTIONS= for arg; do case $arg in --help) print_usage exit 0 ;; --install-optional) ADDITIONAL=1 ;; --install-all) ADDITIONAL=1 ;; *) OPTIONS="$OPTIONS $arg" ;; esac done # Check if the user is root if [ "$(id -u)" -ne 0 ] then echo "You must be root." exit 1 fi BASIC_LIST=" cmake ninja gcc g++ glib-dev libgcrypt-dev flex tiff-dev c-ares-dev pcre2-dev qt5-qtbase-dev qt5-qttools-dev qt5-qtmultimedia-dev qt5-qtsvg-dev speexdsp-dev python3 " ADDITIONAL_LIST=" git asciidoctor libssh-dev spandsp-dev libcap-dev libpcap-dev libxml2-dev libmaxminddb-dev krb5-dev lz4-dev gnutls-dev snappy-dev nghttp2-dev lua5.2-dev libnl3-dev sbc-dev minizip-dev brotli-dev perl py3-pytest py3-pytest-xdist " # Uncomment to add PNG compression utilities used by compress-pngs: # ADDITIONAL_LIST="$ADDITIONAL_LIST \ # advancecomp \ # optipng \ # oxipng \ # pngcrush" # Adds package $2 to list variable $1 if the package is found. # If $3 is given, then this version requirement must be satisfied. add_package() { local list="$1" pkgname="$2" # fail if the package is not known apk list $pkgname &> /dev/null || return 1 # package is found, append it to list eval "${list}=\"\${${list}} \${pkgname}\"" } ACTUAL_LIST=$BASIC_LIST # Now arrange for optional support libraries if [ $ADDITIONAL -ne 0 ] then ACTUAL_LIST="$ACTUAL_LIST $ADDITIONAL_LIST" fi apk update || exit 2 apk add $ACTUAL_LIST $OPTIONS || exit 2 if [ $ADDITIONAL -eq 0 ] then printf "\n*** Optional packages not installed. Rerun with --install-optional to have them.\n" fi
Shell Script
wireshark/tools/arch-setup.sh
#!/bin/bash # Setup development environment on Arch Linux # # Wireshark - Network traffic analyzer # By Gerald Combs <[email protected]> # Copyright 1998 Gerald Combs # # SPDX-License-Identifier: GPL-2.0-or-later # # We drag in tools that might not be needed by all users; it's easier # that way. # set -e -u -o pipefail function print_usage() { printf "\\nUtility to setup a pacman-based system for Wireshark development.\\n" printf "The basic usage installs the needed software\\n\\n" printf "Usage: %s [--install-optional] [...other options...]\\n" "$0" printf "\\t--install-optional: install optional software as well\\n" printf "\\t--install-test-deps: install packages required to run all tests\\n" printf "\\t--install-all: install everything\\n" printf "\\t[other]: other options are passed as-is to pacman\\n" printf "\\tPass --noconfirm to bypass any \"are you sure?\" messages.\\n" } ADDITIONAL=0 TESTDEPS=0 AUR=0 OPTIONS= for arg; do case $arg in --help) print_usage exit 0 ;; --install-optional) ADDITIONAL=1 ;; --install-test-deps) TESTDEPS=1 ;; --install-all) ADDITIONAL=1 TESTDEPS=1 AUR=1 ;; *) OPTIONS="$OPTIONS $arg" ;; esac done # Check if the user is root if [ "$(id -u)" -ne 0 ] then echo "You must be root." exit 1 fi BASIC_LIST="base-devel \ bcg729 \ brotli \ c-ares \ cmake \ git \ glib2 \ gnutls \ krb5 \ libcap \ libgcrypt \ libilbc \ libmaxminddb \ libnghttp2 \ libnl \ libpcap \ libssh \ libxml2 \ lua52 \ lz4 \ minizip \ ninja \ pcre2 \ python \ qt6-base \ qt6-multimedia \ qt6-tools \ qt6-5compat \ sbc \ snappy \ spandsp \ speexdsp \ zlib \ zstd" ADDITIONAL_LIST="asciidoctor \ ccache \ docbook-xml \ docbook-xsl \ doxygen \ libxslt \ perl" TESTDEPS_LIST="python-pytest \ python-pytest-xdist" ACTUAL_LIST=$BASIC_LIST if [ $ADDITIONAL -ne 0 ] then ACTUAL_LIST="$ACTUAL_LIST $ADDITIONAL_LIST" fi if [ $TESTDEPS -ne 0 ] then ACTUAL_LIST="$ACTUAL_LIST $TESTDEPS_LIST" fi # Partial upgrades are unsupported. pacman --sync --refresh --sysupgrade --needed $ACTUAL_LIST $OPTIONS || exit 2 if [ $ADDITIONAL -eq 0 ] then printf "\n*** Optional packages not installed. Rerun with --install-optional to have them.\n" fi if [ $TESTDEPS -eq 0 ] then printf "\n*** Test deps not installed. Rerun with --install-test-deps to have them.\n" fi if [ $AUR -ne 0 ] then printf "\n*** These and other packages may also be found in the AUR: libsmi.\n" fi
wireshark/tools/asn2deb
#!/usr/bin/env python3 # asn2deb - quick hack by W. Borgert <[email protected]> to create # Debian GNU/Linux packages from ASN.1 files for Wireshark. # Copyright 2004, W. Borgert # ASN.1 module for Wireshark, use of snacc type table: # Copyright 2003, Matthijs Melchior <[email protected]> # # Wireshark - Network traffic analyzer # By Gerald Combs <[email protected]> # Copyright 1998 Gerald Combs # SPDX-License-Identifier: GPL-2.0-or-later import getopt, os, string, sys, time scriptinfo = """asn2deb version 2004-02-17 Copyright 2004, W. Borgert Free software, released under the terms of the GPL.""" options = {'asn': None, 'dbopts': "", 'email': "[email protected]", 'help': 0, 'name': "No Name", 'preserve': 0, 'version': 0} def create_file(filename, content, mode = None): """Create a file with given content.""" global options if options['preserve'] and os.path.isfile(filename): return f = open(filename, 'w') f.write(content) f.close() if mode: os.chmod(filename, mode) def create_files(version, deb, email, asn, name, iso, rfc): """Create all files for the .deb build process.""" base = asn.lower()[:-5] if not os.path.isdir("packaging/debian"): os.mkdir("packaging/debian") create_file("packaging/debian/rules", """#!/usr/bin/make -f include /usr/share/cdbs/1/rules/debhelper.mk include /usr/share/cdbs/1/class/autotools.mk PREFIX=`pwd`/packaging/debian/wireshark-asn1-%s binary-post-install/wireshark-asn1-%s:: rm -f $(PREFIX)/usr/lib/wireshark/plugins/%s/*.a """ % (base, base, version), 0o755) create_file("packaging/debian/control", """Source: wireshark-asn1-%s Section: net Priority: optional Maintainer: %s <%s> Standards-Version: 3.6.1.0 Build-Depends: snacc, autotools-dev, debhelper, cdbs Package: wireshark-asn1-%s Architecture: all Depends: wireshark (= %s) Description: ASN.1/BER dissector for %s This package provides a type table for decoding BER (Basic Encoding Rules) data over TCP or UDP, described by an ASN.1 (Abstract Syntax Notation 1) file '%s.asn1'. """ % (base, name, email, base, deb, base, base)) create_file("packaging/debian/changelog", """wireshark-asn1-%s (0.0.1-1) unstable; urgency=low * Automatically created package. -- %s <%s> %s """ % (base, name, email, rfc + "\n (" + iso + ")")) create_file("packaging/debian/copyright", """This package has been created automatically be asn2deb on %s for Debian GNU/Linux. Wireshark: https://www.wireshark.com/ Copyright: GPL, as evidenced by existence of GPL license file \"COPYING\". (the GNU GPL may be viewed on Debian systems in /usr/share/common-licenses/GPL) """ % (iso)) def get_wrs_version(): """Detect version of wireshark-dev package.""" deb = os.popen( "dpkg-query -W --showformat='${Version}' wireshark-dev").read() debv = string.find(deb, "-") if debv == -1: debv = len(deb) version = deb[string.find(deb, ":")+1:debv] return version, deb def get_time(): """Detect current time and return ISO and RFC time string.""" currenttime = time.gmtime() return time.strftime("%Y-%m-%d %H:%M:%S +0000", currenttime), \ time.strftime("%a, %d %b %Y %H:%M:%S +0000", currenttime) def main(): global options process_opts(sys.argv) iso, rfc = get_time() version, deb = get_wrs_version() create_files(version, deb, options['email'], options['asn'], options['name'], iso, rfc) os.system("dpkg-buildpackage " + options['dbopts']) def process_opts(argv): """Process command line options.""" global options try: opts, args = getopt.getopt(argv[1:], "a:d:e:hn:pv", ["asn=", "dbopts=", "email=", "help", "name=", "preserve", "version"]) except getopt.GetoptError: usage(argv[0]) sys.exit(1) for o, a in opts: if o in ("-a", "--asn"): options['asn'] = a if o in ("-d", "--dbopts"): options['dbopts'] = a if o in ("-e", "--email"): options['email'] = a if o in ("-h", "--help"): options['help'] = 1 if o in ("-n", "--name"): options['name'] = a if o in ("-p", "--preserve"): options['preserve'] = 1 if o in ("-v", "--version"): options['version'] = 1 if options['help']: usage(argv[0]) sys.exit(0) if options['version']: print(scriptinfo) sys.exit(0) if not options['asn']: print("mandatory ASN.1 file parameter missing") sys.exit(1) if not os.access(options['asn'], os.R_OK): print("ASN.1 file not accessible") sys.exit(1) def usage(name): """Print usage help.""" print("Usage: " + name + " <parameters>\n" + \ "Parameters are\n" + \ " --asn -a asn1file, ASN.1 file to use (mandatory)\n" + \ " --dbopts -d opts, options for dpkg-buildpackage\n" + \ " --email -e address, use e-mail address\n" + \ " --help -h, print help and exit\n" + \ " --name -n name, use user name\n" + \ " --preserve -p, do not overwrite files\n" + \ " --version -v, print version and exit\n" + \ "Example:\n" + \ name + " -e [email protected] -a bar.asn1 -n \"My Name\" " + \ "-d \"-rfakeroot -uc -us\"") if __name__ == '__main__': main()
Python
wireshark/tools/asn2wrs.py
#!/usr/bin/env python3 # # asn2wrs.py # ASN.1 to Wireshark dissector compiler # Copyright 2004 Tomas Kukosa # # SPDX-License-Identifier: MIT # """ASN.1 to Wireshark dissector compiler""" # # Compiler from ASN.1 specification to the Wireshark dissector # # Based on ASN.1 to Python compiler from Aaron S. Lav's PyZ3950 package licensed under the X Consortium license # https://www.pobox.com/~asl2/software/PyZ3950/ # (ASN.1 to Python compiler functionality is broken but not removed, it could be revived if necessary) # # It requires Dave Beazley's PLY parsing package licensed under the LGPL (tested with version 2.3) # https://www.dabeaz.com/ply/ # # # ITU-T Recommendation X.680 (07/2002), # Information technology - Abstract Syntax Notation One (ASN.1): Specification of basic notation # # ITU-T Recommendation X.681 (07/2002), # Information technology - Abstract Syntax Notation One (ASN.1): Information object specification # # ITU-T Recommendation X.682 (07/2002), # Information technology - Abstract Syntax Notation One (ASN.1): Constraint specification # # ITU-T Recommendation X.683 (07/2002), # Information technology - Abstract Syntax Notation One (ASN.1): Parameterization of ASN.1 specifications # # ITU-T Recommendation X.880 (07/1994), # Information technology - Remote Operations: Concepts, model and notation # import warnings import re import sys import os import os.path import time import getopt import traceback try: from ply import lex from ply import yacc except ImportError: # Fallback: use lex.py and yacc from the tools directory within the # Wireshark source tree if python-ply is not installed. import lex import yacc if sys.version_info[0] < 3: from string import maketrans # OID name -> number conversion table oid_names = { '/itu-t' : 0, '/itu' : 0, '/ccitt' : 0, '/itu-r' : 0, '0/recommendation' : 0, '0.0/a' : 1, '0.0/b' : 2, '0.0/c' : 3, '0.0/d' : 4, '0.0/e' : 5, '0.0/f' : 6, '0.0/g' : 7, '0.0/h' : 8, '0.0/i' : 9, '0.0/j' : 10, '0.0/k' : 11, '0.0/l' : 12, '0.0/m' : 13, '0.0/n' : 14, '0.0/o' : 15, '0.0/p' : 16, '0.0/q' : 17, '0.0/r' : 18, '0.0/s' : 19, '0.0/t' : 20, '0.0/tseries' : 20, '0.0/u' : 21, '0.0/v' : 22, '0.0/w' : 23, '0.0/x' : 24, '0.0/y' : 25, '0.0/z' : 26, '0/question' : 1, '0/administration' : 2, '0/network-operator' : 3, '0/identified-organization' : 4, '0/r-recommendation' : 5, '0/data' : 9, '/iso' : 1, '1/standard' : 0, '1/registration-authority' : 1, '1/member-body' : 2, '1/identified-organization' : 3, '/joint-iso-itu-t' : 2, '/joint-iso-ccitt' : 2, '2/presentation' : 0, '2/asn1' : 1, '2/association-control' : 2, '2/reliable-transfer' : 3, '2/remote-operations' : 4, '2/ds' : 5, '2/directory' : 5, '2/mhs' : 6, '2/mhs-motis' : 6, '2/ccr' : 7, '2/oda' : 8, '2/ms' : 9, '2/osi-management' : 9, '2/transaction-processing' : 10, '2/dor' : 11, '2/distinguished-object-reference' : 11, '2/reference-data-transfe' : 12, '2/network-layer' : 13, '2/network-layer-management' : 13, '2/transport-layer' : 14, '2/transport-layer-management' : 14, '2/datalink-layer' : 15, '2/datalink-layer-managemen' : 15, '2/datalink-layer-management-information' : 15, '2/country' : 16, '2/registration-procedures' : 17, '2/registration-procedure' : 17, '2/physical-layer' : 18, '2/physical-layer-management' : 18, '2/mheg' : 19, '2/genericULS' : 20, '2/generic-upper-layers-security' : 20, '2/guls' : 20, '2/transport-layer-security-protocol' : 21, '2/network-layer-security-protocol' : 22, '2/international-organizations' : 23, '2/internationalRA' : 23, '2/sios' : 24, '2/uuid' : 25, '2/odp' : 26, '2/upu' : 40, } ITEM_FIELD_NAME = '_item' UNTAG_TYPE_NAME = '_untag' def asn2c(id): return id.replace('-', '_').replace('.', '_').replace('&', '_') input_file = None g_conform = None lexer = None in_oid = False class LexError(Exception): def __init__(self, tok, filename=None): self.tok = tok self.filename = filename self.msg = "Unexpected character %r" % (self.tok.value[0]) Exception.__init__(self, self.msg) def __repr__(self): return "%s:%d: %s" % (self.filename, self.tok.lineno, self.msg) __str__ = __repr__ class ParseError(Exception): def __init__(self, tok, filename=None): self.tok = tok self.filename = filename self.msg = "Unexpected token %s(%r)" % (self.tok.type, self.tok.value) Exception.__init__(self, self.msg) def __repr__(self): return "%s:%d: %s" % (self.filename, self.tok.lineno, self.msg) __str__ = __repr__ class DuplicateError(Exception): def __init__(self, type, ident): self.type = type self.ident = ident self.msg = "Duplicate %s for %s" % (self.type, self.ident) Exception.__init__(self, self.msg) def __repr__(self): return self.msg __str__ = __repr__ class CompError(Exception): def __init__(self, msg): self.msg = msg Exception.__init__(self, self.msg) def __repr__(self): return self.msg __str__ = __repr__ states = ( ('braceignore','exclusive'), ) precedence = ( ('left', 'UNION', 'BAR'), ('left', 'INTERSECTION', 'CIRCUMFLEX'), ) # 11 ASN.1 lexical items static_tokens = { r'::=' : 'ASSIGNMENT', # 11.16 Assignment lexical item r'\.\.' : 'RANGE', # 11.17 Range separator r'\.\.\.' : 'ELLIPSIS', # 11.18 Ellipsis r'\[\[' : 'LVERBRACK', # 11.19 Left version brackets r'\]\]' : 'RVERBRACK', # 11.20 Right version brackets # 11.26 Single character lexical items r'\{' : 'LBRACE', r'\}' : 'RBRACE', r'<' : 'LT', #r'>' : 'GT', r',' : 'COMMA', r'\.' : 'DOT', r'\(' : 'LPAREN', r'\)' : 'RPAREN', r'\[' : 'LBRACK', r'\]' : 'RBRACK', r'-' : 'MINUS', r':' : 'COLON', #r'=' : 'EQ', #r'"' : 'QUOTATION', #r"'" : 'APOSTROPHE', r';' : 'SEMICOLON', r'@' : 'AT', r'\!' : 'EXCLAMATION', r'\^' : 'CIRCUMFLEX', r'\&' : 'AMPERSAND', r'\|' : 'BAR' } # 11.27 Reserved words # all keys in reserved_words must start w/ upper case reserved_words = { 'ABSENT' : 'ABSENT', 'ABSTRACT-SYNTAX' : 'ABSTRACT_SYNTAX', 'ALL' : 'ALL', 'APPLICATION' : 'APPLICATION', 'AUTOMATIC' : 'AUTOMATIC', 'BEGIN' : 'BEGIN', 'BIT' : 'BIT', 'BOOLEAN' : 'BOOLEAN', 'BY' : 'BY', 'CHARACTER' : 'CHARACTER', 'CHOICE' : 'CHOICE', 'CLASS' : 'CLASS', 'COMPONENT' : 'COMPONENT', 'COMPONENTS' : 'COMPONENTS', 'CONSTRAINED' : 'CONSTRAINED', 'CONTAINING' : 'CONTAINING', 'DEFAULT' : 'DEFAULT', 'DEFINITIONS' : 'DEFINITIONS', 'EMBEDDED' : 'EMBEDDED', # 'ENCODED' : 'ENCODED', 'END' : 'END', 'ENUMERATED' : 'ENUMERATED', # 'EXCEPT' : 'EXCEPT', 'EXPLICIT' : 'EXPLICIT', 'EXPORTS' : 'EXPORTS', # 'EXTENSIBILITY' : 'EXTENSIBILITY', 'EXTERNAL' : 'EXTERNAL', 'FALSE' : 'FALSE', 'FROM' : 'FROM', 'GeneralizedTime' : 'GeneralizedTime', 'IDENTIFIER' : 'IDENTIFIER', 'IMPLICIT' : 'IMPLICIT', # 'IMPLIED' : 'IMPLIED', 'IMPORTS' : 'IMPORTS', 'INCLUDES' : 'INCLUDES', 'INSTANCE' : 'INSTANCE', 'INTEGER' : 'INTEGER', 'INTERSECTION' : 'INTERSECTION', 'MAX' : 'MAX', 'MIN' : 'MIN', 'MINUS-INFINITY' : 'MINUS_INFINITY', 'NULL' : 'NULL', 'OBJECT' : 'OBJECT', 'ObjectDescriptor' : 'ObjectDescriptor', 'OCTET' : 'OCTET', 'OF' : 'OF', 'OPTIONAL' : 'OPTIONAL', 'PATTERN' : 'PATTERN', 'PDV' : 'PDV', 'PLUS-INFINITY' : 'PLUS_INFINITY', 'PRESENT' : 'PRESENT', 'PRIVATE' : 'PRIVATE', 'REAL' : 'REAL', 'RELATIVE-OID' : 'RELATIVE_OID', 'SEQUENCE' : 'SEQUENCE', 'SET' : 'SET', 'SIZE' : 'SIZE', 'STRING' : 'STRING', 'SYNTAX' : 'SYNTAX', 'TAGS' : 'TAGS', 'TRUE' : 'TRUE', 'TYPE-IDENTIFIER' : 'TYPE_IDENTIFIER', 'UNION' : 'UNION', 'UNIQUE' : 'UNIQUE', 'UNIVERSAL' : 'UNIVERSAL', 'UTCTime' : 'UTCTime', 'WITH' : 'WITH', # X.208 obsolete but still used 'ANY' : 'ANY', 'DEFINED' : 'DEFINED', } for k in list(static_tokens.keys()): if static_tokens [k] is None: static_tokens [k] = k StringTypes = ['Numeric', 'Printable', 'IA5', 'BMP', 'Universal', 'UTF8', 'Teletex', 'T61', 'Videotex', 'Graphic', 'ISO646', 'Visible', 'General'] # Effective permitted-alphabet constraints are PER-visible only # for the known-multiplier character string types (X.691 27.1) # # XXX: This should include BMPString (UCS2) and UniversalString (UCS4), # but asn2wrs only suports the RestrictedCharacterStringValue # notation of "cstring", but not that of "CharacterStringList", # "Quadruple", or "Tuple" (See X.680 41.8), and packet-per.c does # not support members of the permitted-alphabet being outside the # ASCII range. We don't currently have any ASN.1 modules that need it, # anyway. KnownMultiplierStringTypes = ('NumericString', 'PrintableString', 'IA5String', 'ISO646String', 'VisibleString') for s in StringTypes: reserved_words[s + 'String'] = s + 'String' tokens = list(static_tokens.values()) \ + list(reserved_words.values()) \ + ['BSTRING', 'HSTRING', 'QSTRING', 'UCASE_IDENT', 'LCASE_IDENT', 'LCASE_IDENT_ASSIGNED', 'CLASS_IDENT', 'REAL_NUMBER', 'NUMBER', 'PYQUOTE'] cur_mod = __import__ (__name__) # XXX blech! for (k, v) in list(static_tokens.items ()): cur_mod.__dict__['t_' + v] = k # 11.10 Binary strings def t_BSTRING (t): r"'[01]*'B" return t # 11.12 Hexadecimal strings def t_HSTRING (t): r"'[0-9A-Fa-f]*'H" return t def t_QSTRING (t): r'"([^"]|"")*"' return t def t_UCASE_IDENT (t): r"[A-Z](-[a-zA-Z0-9]|[a-zA-Z0-9])*" # can't end w/ '-' if (is_class_ident(t.value)): t.type = 'CLASS_IDENT' if (is_class_syntax(t.value)): t.type = t.value t.type = reserved_words.get(t.value, t.type) return t lcase_ident_assigned = {} def t_LCASE_IDENT (t): r"[a-z](-[a-zA-Z0-9]|[a-zA-Z0-9])*" # can't end w/ '-' if (not in_oid and (t.value in lcase_ident_assigned)): t.type = 'LCASE_IDENT_ASSIGNED' return t # 11.9 Real numbers def t_REAL_NUMBER (t): r"[0-9]+\.[0-9]*(?!\.)" return t # 11.8 Numbers def t_NUMBER (t): r"0|([1-9][0-9]*)" return t # 11.6 Comments pyquote_str = 'PYQUOTE' def t_COMMENT(t): r"--(-[^\-\n]|[^\-\n])*(--|\n|-\n|$|-$)" if (t.value.find("\n") >= 0) : t.lexer.lineno += 1 if t.value[2:2+len (pyquote_str)] == pyquote_str: t.value = t.value[2+len(pyquote_str):] t.value = t.value.lstrip () t.type = pyquote_str return t return None t_ignore = " \t\r" def t_NEWLINE(t): r'\n+' t.lexer.lineno += t.value.count("\n") def t_error(t): global input_file raise LexError(t, input_file) # state 'braceignore' def t_braceignore_lbrace(t): r'\{' t.lexer.level +=1 def t_braceignore_rbrace(t): r'\}' t.lexer.level -=1 # If closing brace, return token if t.lexer.level == 0: t.type = 'RBRACE' return t def t_braceignore_QSTRING (t): r'"([^"]|"")*"' t.lexer.lineno += t.value.count("\n") def t_braceignore_COMMENT(t): r"--(-[^\-\n]|[^\-\n])*(--|\n|-\n|$|-$)" if (t.value.find("\n") >= 0) : t.lexer.lineno += 1 def t_braceignore_nonspace(t): r'[^\s\{\}\"-]+|-(?!-)' t_braceignore_ignore = " \t\r" def t_braceignore_NEWLINE(t): r'\n+' t.lexer.lineno += t.value.count("\n") def t_braceignore_error(t): t.lexer.skip(1) class Ctx: def __init__ (self, defined_dict, indent = 0): self.tags_def = 'EXPLICIT' # default = explicit self.indent_lev = 0 self.assignments = {} self.dependencies = {} self.pyquotes = [] self.defined_dict = defined_dict self.name_ctr = 0 def spaces (self): return " " * (4 * self.indent_lev) def indent (self): self.indent_lev += 1 def outdent (self): self.indent_lev -= 1 assert (self.indent_lev >= 0) def register_assignment (self, ident, val, dependencies): if ident in self.assignments: raise DuplicateError("assignment", ident) if ident in self.defined_dict: raise Exception("cross-module duplicates for %s" % ident) self.defined_dict [ident] = 1 self.assignments[ident] = val self.dependencies [ident] = dependencies return "" # return "#%s depends on %s" % (ident, str (dependencies)) def register_pyquote (self, val): self.pyquotes.append (val) return "" def output_assignments (self): already_output = {} text_list = [] assign_keys = list(self.assignments.keys()) to_output_count = len (assign_keys) while True: any_output = 0 for (ident, val) in list(self.assignments.items ()): if ident in already_output: continue ok = 1 for d in self.dependencies [ident]: if ((d not in already_output) and (d in assign_keys)): ok = 0 if ok: text_list.append ("%s=%s" % (ident, self.assignments [ident])) already_output [ident] = 1 any_output = 1 to_output_count -= 1 assert (to_output_count >= 0) if not any_output: if to_output_count == 0: break # OK, we detected a cycle cycle_list = [] for ident in list(self.assignments.keys ()): if ident not in already_output: depend_list = [d for d in self.dependencies[ident] if d in assign_keys] cycle_list.append ("%s(%s)" % (ident, ",".join (depend_list))) text_list.append ("# Cycle XXX " + ",".join (cycle_list)) for (ident, val) in list(self.assignments.items ()): if ident not in already_output: text_list.append ("%s=%s" % (ident, self.assignments [ident])) break return "\n".join (text_list) def output_pyquotes (self): return "\n".join (self.pyquotes) def make_new_name (self): self.name_ctr += 1 return "_compiler_generated_name_%d" % (self.name_ctr,) #--- Flags for EXPORT, USER_DEFINED, NO_EMIT, MAKE_ENUM ------------------------------- EF_TYPE = 0x0001 EF_VALS = 0x0002 EF_ENUM = 0x0004 EF_WS_DLL = 0x0010 # exported from shared library EF_EXTERN = 0x0020 EF_NO_PROT = 0x0040 EF_NO_TYPE = 0x0080 EF_UCASE = 0x0100 EF_TABLE = 0x0400 EF_DEFINE = 0x0800 EF_MODULE = 0x1000 #--- common dependency computation --- # Input : list of items # dictionary with lists of dependency # # # Output : list of two outputs: # [0] list of items in dependency # [1] list of cycle dependency cycles def dependency_compute(items, dependency, map_fn = lambda t: t, ignore_fn = lambda t: False): item_ord = [] item_cyc = [] x = {} # already emitted #print '# Dependency computation' for t in items: if map_fn(t) in x: #print 'Continue: %s : %s' % (t, (map_fn(t)) continue stack = [t] stackx = {t : dependency.get(t, [])[:]} #print 'Push: %s : %s' % (t, str(stackx[t])) while stack: if stackx[stack[-1]]: # has dependencies d = stackx[stack[-1]].pop(0) if map_fn(d) in x or ignore_fn(d): continue if d in stackx: # cyclic dependency c = stack[:] c.reverse() c = [d] + c[0:c.index(d)+1] c.reverse() item_cyc.append(c) #print 'Cyclic: %s ' % (' -> '.join(c)) continue stack.append(d) stackx[d] = dependency.get(d, [])[:] #print 'Push: %s : %s' % (d, str(stackx[d])) else: #print 'Pop: %s' % (stack[-1]) del stackx[stack[-1]] e = map_fn(stack.pop()) if e in x: continue #print 'Add: %s' % (e) item_ord.append(e) x[e] = True return (item_ord, item_cyc) # Given a filename, return a relative path from the current directory def relpath(filename): return os.path.relpath(filename) # Given a filename, return a relative path from epan/dissectors def rel_dissector_path(filename): path_parts = os.path.abspath(filename).split(os.sep) while (len(path_parts) > 3 and path_parts[0] != 'asn1'): path_parts.pop(0) path_parts.insert(0, '.') return '/'.join(path_parts) #--- EthCtx ------------------------------------------------------------------- class EthCtx: def __init__(self, conform, output, indent = 0): self.conform = conform self.output = output self.conform.ectx = self self.output.ectx = self self.encoding = 'per' self.aligned = False self.default_oid_variant = '' self.default_opentype_variant = '' self.default_containing_variant = '_pdu_new' self.default_embedded_pdv_cb = None self.default_external_type_cb = None self.remove_prefix = None self.srcdir = None self.emitted_pdu = {} self.module = {} self.module_ord = [] self.all_type_attr = {} self.all_tags = {} self.all_vals = {} def encp(self): # encoding protocol encp = self.encoding return encp # Encoding def Per(self): return self.encoding == 'per' def Ber(self): return self.encoding == 'ber' def Oer(self): return self.encoding == 'oer' def Aligned(self): return self.aligned def Unaligned(self): return not self.aligned def NeedTags(self): return self.tag_opt or self.Ber() def NAPI(self): return False # disable planned features def Module(self): # current module name return self.modules[-1][0] def groups(self): return self.group_by_prot or (self.conform.last_group > 0) def dbg(self, d): if (self.dbgopt.find(d) >= 0): return True else: return False def value_max(self, a, b): if (a == 'MAX') or (b == 'MAX'): return 'MAX'; if a == 'MIN': return b; if b == 'MIN': return a; try: if (int(a) > int(b)): return a else: return b except (ValueError, TypeError): pass return "MAX((%s),(%s))" % (a, b) def value_min(self, a, b): if (a == 'MIN') or (b == 'MIN'): return 'MIN'; if a == 'MAX': return b; if b == 'MAX': return a; try: if (int(a) < int(b)): return a else: return b except (ValueError, TypeError): pass return "MIN((%s),(%s))" % (a, b) def value_get_eth(self, val): if isinstance(val, Value): return val.to_str(self) ethname = val if val in self.value: ethname = self.value[val]['ethname'] return ethname def value_get_val(self, nm): val = asn2c(nm) if nm in self.value: if self.value[nm]['import']: v = self.get_val_from_all(nm, self.value[nm]['import']) if v is None: msg = 'Need value of imported value identifier %s from %s (%s)' % (nm, self.value[nm]['import'], self.value[nm]['proto']) warnings.warn_explicit(msg, UserWarning, '', 0) else: val = v else: val = self.value[nm]['value'] if isinstance (val, Value): val = val.to_str(self) else: msg = 'Need value of unknown value identifier %s' % (nm) warnings.warn_explicit(msg, UserWarning, '', 0) return val def eth_get_type_attr(self, type): #print "eth_get_type_attr(%s)" % (type) types = [type] while (not self.type[type]['import']): val = self.type[type]['val'] #print val ttype = type while (val.type == 'TaggedType'): val = val.val ttype += '/' + UNTAG_TYPE_NAME if (val.type != 'Type_Ref'): if (type != ttype): types.append(ttype) break type = val.val types.append(type) attr = {} #print " ", types while len(types): t = types.pop() if (self.type[t]['import']): attr.update(self.type[t]['attr']) attr.update(self.eth_get_type_attr_from_all(t, self.type[t]['import'])) elif (self.type[t]['val'].type == 'SelectionType'): val = self.type[t]['val'] (ftype, display) = val.eth_ftype(self) attr.update({ 'TYPE' : ftype, 'DISPLAY' : display, 'STRINGS' : val.eth_strings(), 'BITMASK' : '0' }); else: attr.update(self.type[t]['attr']) attr.update(self.eth_type[self.type[t]['ethname']]['attr']) if attr['STRINGS'].startswith('VALS64(') and '|BASE_VAL64_STRING' not in attr['DISPLAY']: attr['DISPLAY'] += '|BASE_VAL64_STRING' #print " ", attr return attr def eth_get_type_attr_from_all(self, type, module): attr = {} if module in self.all_type_attr and type in self.all_type_attr[module]: attr = self.all_type_attr[module][type] return attr def get_ttag_from_all(self, type, module): ttag = None if module in self.all_tags and type in self.all_tags[module]: ttag = self.all_tags[module][type] return ttag def get_val_from_all(self, nm, module): val = None if module in self.all_vals and nm in self.all_vals[module]: val = self.all_vals[module][nm] return val def get_obj_repr(self, ident, flds=[], not_flds=[]): def set_type_fn(cls, field, fnfield): obj[fnfield + '_fn'] = 'NULL' obj[fnfield + '_pdu'] = 'NULL' if field in val and isinstance(val[field], Type_Ref): p = val[field].eth_type_default_pars(self, '') obj[fnfield + '_fn'] = p['TYPE_REF_FN'] obj[fnfield + '_fn'] = obj[fnfield + '_fn'] % p # one iteration if (self.conform.check_item('PDU', cls + '.' + field)): obj[fnfield + '_pdu'] = 'dissect_' + self.field[val[field].val]['ethname'] return # end of get_type_fn() obj = { '_name' : ident, '_ident' : asn2c(ident)} obj['_class'] = self.oassign[ident].cls obj['_module'] = self.oassign[ident].module val = self.oassign[ident].val for f in flds: if f not in val: return None for f in not_flds: if f in val: return None for f in list(val.keys()): if isinstance(val[f], Node): obj[f] = val[f].fld_obj_repr(self) else: obj[f] = str(val[f]) if (obj['_class'] == 'TYPE-IDENTIFIER') or (obj['_class'] == 'ABSTRACT-SYNTAX'): set_type_fn(obj['_class'], '&Type', '_type') if (obj['_class'] == 'OPERATION'): set_type_fn(obj['_class'], '&ArgumentType', '_argument') set_type_fn(obj['_class'], '&ResultType', '_result') if (obj['_class'] == 'ERROR'): set_type_fn(obj['_class'], '&ParameterType', '_parameter') return obj #--- eth_reg_module ----------------------------------------------------------- def eth_reg_module(self, module): #print "eth_reg_module(module='%s')" % (module) name = module.get_name() self.modules.append([name, module.get_proto(self)]) if name in self.module: raise DuplicateError("module", name) self.module[name] = [] self.module_ord.append(name) #--- eth_module_dep_add ------------------------------------------------------------ def eth_module_dep_add(self, module, dep): self.module[module].append(dep) #--- eth_exports ------------------------------------------------------------ def eth_exports(self, exports): self.exports_all = False if ((len(exports) == 1) and (exports[0] == 'ALL')): self.exports_all = True return for e in (exports): if isinstance(e, Type_Ref): self.exports.append(e.val) elif isinstance(e, Class_Ref): self.cexports.append(e.val) else: self.vexports.append(e) #--- eth_reg_assign --------------------------------------------------------- def eth_reg_assign(self, ident, val, virt=False): #print("eth_reg_assign(ident='%s')" % (ident), 'module=', self.Module()) if ident in self.assign: raise DuplicateError("assignment", ident) self.assign[ident] = { 'val' : val , 'virt' : virt } self.assign_ord.append(ident) if (self.exports_all): self.exports.append(ident) #--- eth_reg_vassign -------------------------------------------------------- def eth_reg_vassign(self, vassign): ident = vassign.ident #print "eth_reg_vassign(ident='%s')" % (ident) if ident in self.vassign: raise DuplicateError("value assignment", ident) self.vassign[ident] = vassign self.vassign_ord.append(ident) if (self.exports_all): self.vexports.append(ident) #--- eth_reg_oassign -------------------------------------------------------- def eth_reg_oassign(self, oassign): ident = oassign.ident #print "eth_reg_oassign(ident='%s')" % (ident) if ident in self.oassign: if self.oassign[ident] == oassign: return # OK - already defined else: raise DuplicateError("information object assignment", ident) self.oassign[ident] = oassign self.oassign_ord.append(ident) self.oassign_cls.setdefault(oassign.cls, []).append(ident) #--- eth_import_type -------------------------------------------------------- def eth_import_type(self, ident, mod, proto): #print "eth_import_type(ident='%s', mod='%s', prot='%s')" % (ident, mod, proto) if ident in self.type: #print "already defined '%s' import=%s, module=%s" % (ident, str(self.type[ident]['import']), self.type[ident].get('module', '-')) if not self.type[ident]['import'] and (self.type[ident]['module'] == mod) : return # OK - already defined elif self.type[ident]['import'] and (self.type[ident]['import'] == mod) : return # OK - already imported else: raise DuplicateError("type", ident) self.type[ident] = {'import' : mod, 'proto' : proto, 'ethname' : '' } self.type[ident]['attr'] = { 'TYPE' : 'FT_NONE', 'DISPLAY' : 'BASE_NONE', 'STRINGS' : 'NULL', 'BITMASK' : '0' } mident = "$%s$%s" % (mod, ident) if (self.conform.check_item('TYPE_ATTR', mident)): self.type[ident]['attr'].update(self.conform.use_item('TYPE_ATTR', mident)) else: self.type[ident]['attr'].update(self.conform.use_item('TYPE_ATTR', ident)) if (self.conform.check_item('IMPORT_TAG', mident)): self.conform.copy_item('IMPORT_TAG', ident, mident) self.type_imp.append(ident) #--- dummy_import_type -------------------------------------------------------- def dummy_import_type(self, ident): # dummy imported if ident in self.type: raise Exception("Try to dummy import for existing type :%s" % ident) ethtype = asn2c(ident) self.type[ident] = {'import' : 'xxx', 'proto' : 'xxx', 'ethname' : ethtype } self.type[ident]['attr'] = { 'TYPE' : 'FT_NONE', 'DISPLAY' : 'BASE_NONE', 'STRINGS' : 'NULL', 'BITMASK' : '0' } self.eth_type[ethtype] = { 'import' : 'xxx', 'proto' : 'xxx' , 'attr' : {}, 'ref' : []} print("Dummy imported: %s (%s)" % (ident, ethtype)) return ethtype #--- eth_import_class -------------------------------------------------------- def eth_import_class(self, ident, mod, proto): #print "eth_import_class(ident='%s', mod='%s', prot='%s')" % (ident, mod, proto) if ident in self.objectclass: #print "already defined import=%s, module=%s" % (str(self.objectclass[ident]['import']), self.objectclass[ident]['module']) if not self.objectclass[ident]['import'] and (self.objectclass[ident]['module'] == mod) : return # OK - already defined elif self.objectclass[ident]['import'] and (self.objectclass[ident]['import'] == mod) : return # OK - already imported else: raise DuplicateError("object class", ident) self.objectclass[ident] = {'import' : mod, 'proto' : proto, 'ethname' : '' } self.objectclass_imp.append(ident) #--- eth_import_value ------------------------------------------------------- def eth_import_value(self, ident, mod, proto): #print "eth_import_value(ident='%s', mod='%s', prot='%s')" % (ident, mod, prot) if ident in self.value: #print "already defined import=%s, module=%s" % (str(self.value[ident]['import']), self.value[ident]['module']) if not self.value[ident]['import'] and (self.value[ident]['module'] == mod) : return # OK - already defined elif self.value[ident]['import'] and (self.value[ident]['import'] == mod) : return # OK - already imported else: raise DuplicateError("value", ident) self.value[ident] = {'import' : mod, 'proto' : proto, 'ethname' : ''} self.value_imp.append(ident) #--- eth_sel_req ------------------------------------------------------------ def eth_sel_req(self, typ, sel): key = typ + '.' + sel if key not in self.sel_req: self.sel_req[key] = { 'typ' : typ , 'sel' : sel} self.sel_req_ord.append(key) return key #--- eth_comp_req ------------------------------------------------------------ def eth_comp_req(self, type): self.comp_req_ord.append(type) #--- eth_dep_add ------------------------------------------------------------ def eth_dep_add(self, type, dep): if type not in self.type_dep: self.type_dep[type] = [] self.type_dep[type].append(dep) #--- eth_reg_type ----------------------------------------------------------- def eth_reg_type(self, ident, val, mod=None): #print("eth_reg_type(ident='%s', type='%s')" % (ident, val.type)) if ident in self.type: if self.type[ident]['import'] and (self.type[ident]['import'] == self.Module()) : # replace imported type del self.type[ident] self.type_imp.remove(ident) else: #print('DuplicateError: import=', self.type[ident]['import'], 'module=', self.Module()) raise DuplicateError("type", ident) val.ident = ident self.type[ident] = { 'val' : val, 'import' : None } self.type[ident]['module'] = self.Module() self.type[ident]['proto'] = self.proto if len(ident.split('/')) > 1: self.type[ident]['tname'] = val.eth_tname() else: self.type[ident]['tname'] = asn2c(ident) if mod : mident = "$%s$%s" % (mod, ident) else: mident = None self.type[ident]['export'] = self.conform.use_item('EXPORTS', ident) self.type[ident]['enum'] = self.conform.use_item('MAKE_ENUM', ident) self.type[ident]['vals_ext'] = self.conform.use_item('USE_VALS_EXT', ident) self.type[ident]['user_def'] = self.conform.use_item('USER_DEFINED', ident) if mident and self.conform.check_item('NO_EMIT', mident) : self.type[ident]['no_emit'] = self.conform.use_item('NO_EMIT', mident) else: self.type[ident]['no_emit'] = self.conform.use_item('NO_EMIT', ident) self.type[ident]['tname'] = self.conform.use_item('TYPE_RENAME', ident, val_dflt=self.type[ident]['tname']) self.type[ident]['ethname'] = '' if (val.type == 'Type_Ref') or (val.type == 'TaggedType') or (val.type == 'SelectionType') : self.type[ident]['attr'] = {} else: (ftype, display) = val.eth_ftype(self) self.type[ident]['attr'] = { 'TYPE' : ftype, 'DISPLAY' : display, 'STRINGS' : val.eth_strings(), 'BITMASK' : '0' } self.type[ident]['attr'].update(self.conform.use_item('TYPE_ATTR', ident)) self.type_ord.append(ident) # PDU if (self.conform.check_item('PDU', ident)): self.eth_reg_field(ident, ident, impl=val.HasImplicitTag(self), pdu=self.conform.use_item('PDU', ident)) #--- eth_reg_objectclass ---------------------------------------------------------- def eth_reg_objectclass(self, ident, val): #print "eth_reg_objectclass(ident='%s')" % (ident) if ident in self.objectclass: if self.objectclass[ident]['import'] and (self.objectclass[ident]['import'] == self.Module()) : # replace imported object class del self.objectclass[ident] self.objectclass_imp.remove(ident) elif isinstance(self.objectclass[ident]['val'], Class_Ref) and \ isinstance(val, Class_Ref) and \ (self.objectclass[ident]['val'].val == val.val): pass # ignore duplicated CLASS1 ::= CLASS2 else: raise DuplicateError("object class", ident) self.objectclass[ident] = { 'import' : None, 'module' : self.Module(), 'proto' : self.proto } self.objectclass[ident]['val'] = val self.objectclass[ident]['export'] = self.conform.use_item('EXPORTS', ident) self.objectclass_ord.append(ident) #--- eth_reg_value ---------------------------------------------------------- def eth_reg_value(self, ident, type, value, ethname=None): #print "eth_reg_value(ident='%s')" % (ident) if ident in self.value: if self.value[ident]['import'] and (self.value[ident]['import'] == self.Module()) : # replace imported value del self.value[ident] self.value_imp.remove(ident) elif ethname: self.value[ident]['ethname'] = ethname return else: raise DuplicateError("value", ident) self.value[ident] = { 'import' : None, 'module' : self.Module(), 'proto' : self.proto, 'type' : type, 'value' : value, 'no_emit' : False } self.value[ident]['export'] = self.conform.use_item('EXPORTS', ident) self.value[ident]['ethname'] = '' if (ethname): self.value[ident]['ethname'] = ethname self.value_ord.append(ident) #--- eth_reg_field ---------------------------------------------------------- def eth_reg_field(self, ident, type, idx='', parent=None, impl=False, pdu=None): #print "eth_reg_field(ident='%s', type='%s')" % (ident, type) if ident in self.field: if pdu and (type == self.field[ident]['type']): pass # OK already created PDU else: raise DuplicateError("field", ident) self.field[ident] = {'type' : type, 'idx' : idx, 'impl' : impl, 'pdu' : pdu, 'modified' : '', 'attr' : {} } name = ident.split('/')[-1] if self.remove_prefix and name.startswith(self.remove_prefix): name = name[len(self.remove_prefix):] if len(ident.split('/')) > 1 and name == ITEM_FIELD_NAME: # Sequence/Set of type if len(self.field[ident]['type'].split('/')) > 1: self.field[ident]['attr']['NAME'] = '"%s item"' % ident.split('/')[-2] self.field[ident]['attr']['ABBREV'] = asn2c(ident.split('/')[-2] + name) else: self.field[ident]['attr']['NAME'] = '"%s"' % self.field[ident]['type'] self.field[ident]['attr']['ABBREV'] = asn2c(self.field[ident]['type']) else: self.field[ident]['attr']['NAME'] = '"%s"' % name self.field[ident]['attr']['ABBREV'] = asn2c(name) if self.conform.check_item('FIELD_ATTR', ident): self.field[ident]['modified'] = '#' + str(id(self)) self.field[ident]['attr'].update(self.conform.use_item('FIELD_ATTR', ident)) if (pdu): self.field[ident]['pdu']['export'] = (self.conform.use_item('EXPORTS', ident + '_PDU') != 0) self.pdu_ord.append(ident) else: self.field_ord.append(ident) if parent: self.eth_dep_add(parent, type) def eth_dummy_eag_field_required(self): if (not self.dummy_eag_field): self.dummy_eag_field = 'eag_field' #--- eth_clean -------------------------------------------------------------- def eth_clean(self): self.proto = self.proto_opt; #--- ASN.1 tables ---------------- self.assign = {} self.assign_ord = [] self.field = {} self.pdu_ord = [] self.field_ord = [] self.type = {} self.type_ord = [] self.type_imp = [] self.type_dep = {} self.sel_req = {} self.sel_req_ord = [] self.comp_req_ord = [] self.vassign = {} self.vassign_ord = [] self.value = {} self.value_ord = [] self.value_imp = [] self.objectclass = {} self.objectclass_ord = [] self.objectclass_imp = [] self.oassign = {} self.oassign_ord = [] self.oassign_cls = {} #--- Modules ------------ self.modules = [] self.exports_all = False self.exports = [] self.cexports = [] self.vexports = [] #--- types ------------------- self.eth_type = {} self.eth_type_ord = [] self.eth_export_ord = [] self.eth_type_dupl = {} self.named_bit = [] #--- value dependencies ------------------- self.value_dep = {} #--- values ------------------- self.eth_value = {} self.eth_value_ord = [] #--- fields ------------------------- self.eth_hf = {} self.eth_hf_ord = [] self.eth_hfpdu_ord = [] self.eth_hf_dupl = {} self.dummy_eag_field = None #--- type dependencies ------------------- self.eth_type_ord1 = [] self.eth_dep_cycle = [] self.dep_cycle_eth_type = {} #--- value dependencies and export ------------------- self.eth_value_ord1 = [] self.eth_vexport_ord = [] #--- eth_prepare ------------------------------------------------------------ def eth_prepare(self): self.eproto = asn2c(self.proto) #--- dummy types/fields for PDU registration --- nm = 'NULL' if (self.conform.check_item('PDU', nm)): self.eth_reg_type('_dummy/'+nm, NullType()) self.eth_reg_field(nm, '_dummy/'+nm, pdu=self.conform.use_item('PDU', nm)) #--- required PDUs ---------------------------- for t in self.type_ord: pdu = self.type[t]['val'].eth_need_pdu(self) if not pdu: continue f = pdu['type'] pdu['reg'] = None pdu['hidden'] = False pdu['need_decl'] = True if f not in self.field: self.eth_reg_field(f, f, pdu=pdu) #--- values -> named values ------------------- t_for_update = {} for v in self.value_ord: if (self.value[v]['type'].type == 'Type_Ref') or self.conform.check_item('ASSIGN_VALUE_TO_TYPE', v): if self.conform.check_item('ASSIGN_VALUE_TO_TYPE', v): tnm = self.conform.use_item('ASSIGN_VALUE_TO_TYPE', v) else: tnm = self.value[v]['type'].val if tnm in self.type \ and not self.type[tnm]['import'] \ and (self.type[tnm]['val'].type == 'IntegerType'): self.type[tnm]['val'].add_named_value(v, self.value[v]['value']) self.value[v]['no_emit'] = True t_for_update[tnm] = True for t in list(t_for_update.keys()): self.type[t]['attr']['STRINGS'] = self.type[t]['val'].eth_strings() self.type[t]['attr'].update(self.conform.use_item('TYPE_ATTR', t)) #--- required components of --------------------------- #print "self.comp_req_ord = ", self.comp_req_ord for t in self.comp_req_ord: self.type[t]['val'].eth_reg_sub(t, self, components_available=True) #--- required selection types --------------------------- #print "self.sel_req_ord = ", self.sel_req_ord for t in self.sel_req_ord: tt = self.sel_req[t]['typ'] if tt not in self.type: self.dummy_import_type(t) elif self.type[tt]['import']: self.eth_import_type(t, self.type[tt]['import'], self.type[tt]['proto']) else: self.type[tt]['val'].sel_req(t, self.sel_req[t]['sel'], self) #--- types ------------------- for t in self.type_imp: # imported types nm = asn2c(t) self.eth_type[nm] = { 'import' : self.type[t]['import'], 'proto' : asn2c(self.type[t]['proto']), 'attr' : {}, 'ref' : []} self.eth_type[nm]['attr'].update(self.conform.use_item('ETYPE_ATTR', nm)) self.type[t]['ethname'] = nm for t in self.type_ord: # dummy import for missing type reference tp = self.type[t]['val'] #print "X : %s %s " % (t, tp.type) if isinstance(tp, TaggedType): #print "%s : %s " % (tp.type, t) tp = tp.val if isinstance(tp, Type_Ref): #print "%s : %s ::= %s " % (tp.type, t, tp.val) if tp.val not in self.type: self.dummy_import_type(tp.val) for t in self.type_ord: nm = self.type[t]['tname'] if ((nm.find('#') >= 0) or ((len(t.split('/'))>1) and (self.conform.get_fn_presence(t) or self.conform.check_item('FN_PARS', t) or self.conform.get_fn_presence('/'.join((t,ITEM_FIELD_NAME))) or self.conform.check_item('FN_PARS', '/'.join((t,ITEM_FIELD_NAME)))) and not self.conform.check_item('TYPE_RENAME', t))): if len(t.split('/')) == 2 and t.split('/')[1] == ITEM_FIELD_NAME: # Sequence of type at the 1st level nm = t.split('/')[0] + t.split('/')[1] elif t.split('/')[-1] == ITEM_FIELD_NAME: # Sequence/Set of type at next levels nm = 'T_' + self.conform.use_item('FIELD_RENAME', '/'.join(t.split('/')[0:-1]), val_dflt=t.split('/')[-2]) + t.split('/')[-1] elif t.split('/')[-1] == UNTAG_TYPE_NAME: # Untagged type nm = self.type['/'.join(t.split('/')[0:-1])]['ethname'] + '_U' else: nm = 'T_' + self.conform.use_item('FIELD_RENAME', t, val_dflt=t.split('/')[-1]) nm = asn2c(nm) if nm in self.eth_type: if nm in self.eth_type_dupl: self.eth_type_dupl[nm].append(t) else: self.eth_type_dupl[nm] = [self.eth_type[nm]['ref'][0], t] nm += '_%02d' % (len(self.eth_type_dupl[nm])-1) if nm in self.eth_type: self.eth_type[nm]['ref'].append(t) else: self.eth_type_ord.append(nm) self.eth_type[nm] = { 'import' : None, 'proto' : self.eproto, 'export' : 0, 'enum' : 0, 'vals_ext' : 0, 'user_def' : EF_TYPE|EF_VALS, 'no_emit' : EF_TYPE|EF_VALS, 'val' : self.type[t]['val'], 'attr' : {}, 'ref' : [t]} self.type[t]['ethname'] = nm if (not self.eth_type[nm]['export'] and self.type[t]['export']): # new export self.eth_export_ord.append(nm) self.eth_type[nm]['export'] |= self.type[t]['export'] self.eth_type[nm]['enum'] |= self.type[t]['enum'] self.eth_type[nm]['vals_ext'] |= self.type[t]['vals_ext'] self.eth_type[nm]['user_def'] &= self.type[t]['user_def'] self.eth_type[nm]['no_emit'] &= self.type[t]['no_emit'] if self.type[t]['attr'].get('STRINGS') == '$$': use_ext = self.type[t]['vals_ext'] if (use_ext): self.eth_type[nm]['attr']['STRINGS'] = '&%s_ext' % (self.eth_vals_nm(nm)) else: if self.eth_type[nm]['val'].type == 'IntegerType' \ and self.eth_type[nm]['val'].HasConstraint() \ and self.eth_type[nm]['val'].constr.Needs64b(self): self.eth_type[nm]['attr']['STRINGS'] = 'VALS64(%s)' % (self.eth_vals_nm(nm)) else: self.eth_type[nm]['attr']['STRINGS'] = 'VALS(%s)' % (self.eth_vals_nm(nm)) self.eth_type[nm]['attr'].update(self.conform.use_item('ETYPE_ATTR', nm)) for t in self.eth_type_ord: bits = self.eth_type[t]['val'].eth_named_bits() if (bits): old_val = 0 for (val, id) in bits: self.named_bit.append({'name' : id, 'val' : val, 'ethname' : 'hf_%s_%s_%s' % (self.eproto, t, asn2c(id)), 'ftype' : 'FT_BOOLEAN', 'display' : '8', 'strings' : 'NULL', 'bitmask' : '0x'+('80','40','20','10','08','04','02','01')[val%8]}) old_val = val + 1 if self.eth_type[t]['val'].eth_need_tree(): self.eth_type[t]['tree'] = "ett_%s_%s" % (self.eth_type[t]['proto'], t) else: self.eth_type[t]['tree'] = None #--- register values from enums ------------ for t in self.eth_type_ord: if (self.eth_type[t]['val'].eth_has_enum(t, self)): self.eth_type[t]['val'].reg_enum_vals(t, self) #--- value dependencies ------------------- for v in self.value_ord: if isinstance (self.value[v]['value'], Value): dep = self.value[v]['value'].get_dep() else: dep = self.value[v]['value'] if dep and dep in self.value: self.value_dep.setdefault(v, []).append(dep) #--- exports all necessary values for v in self.value_ord: if not self.value[v]['export']: continue deparr = self.value_dep.get(v, []) while deparr: d = deparr.pop() if not self.value[d]['import']: if not self.value[d]['export']: self.value[d]['export'] = EF_TYPE deparr.extend(self.value_dep.get(d, [])) #--- values ------------------- for v in self.value_imp: nm = asn2c(v) self.eth_value[nm] = { 'import' : self.value[v]['import'], 'proto' : asn2c(self.value[v]['proto']), 'ref' : []} self.value[v]['ethname'] = nm for v in self.value_ord: if (self.value[v]['ethname']): continue if (self.value[v]['no_emit']): continue nm = asn2c(v) self.eth_value[nm] = { 'import' : None, 'proto' : asn2c(self.value[v]['proto']), 'export' : self.value[v]['export'], 'ref' : [v] } self.eth_value[nm]['value'] = self.value[v]['value'] self.eth_value_ord.append(nm) self.value[v]['ethname'] = nm #--- fields ------------------------- for f in (self.pdu_ord + self.field_ord): if len(f.split('/')) > 1 and f.split('/')[-1] == ITEM_FIELD_NAME: # Sequence/Set of type nm = self.conform.use_item('FIELD_RENAME', '/'.join(f.split('/')[0:-1]), val_dflt=f.split('/')[-2]) + f.split('/')[-1] else: nm = f.split('/')[-1] nm = self.conform.use_item('FIELD_RENAME', f, val_dflt=nm) nm = asn2c(nm) if (self.field[f]['pdu']): nm += '_PDU' if (not self.merge_modules or self.field[f]['pdu']['export']): nm = self.eproto + '_' + nm t = self.field[f]['type'] if t in self.type: ethtype = self.type[t]['ethname'] else: # undefined type ethtype = self.dummy_import_type(t) ethtypemod = ethtype + self.field[f]['modified'] if nm in self.eth_hf: if nm in self.eth_hf_dupl: if ethtypemod in self.eth_hf_dupl[nm]: nm = self.eth_hf_dupl[nm][ethtypemod] self.eth_hf[nm]['ref'].append(f) self.field[f]['ethname'] = nm continue else: nmx = nm + ('_%02d' % (len(self.eth_hf_dupl[nm]))) self.eth_hf_dupl[nm][ethtype] = nmx nm = nmx else: if (self.eth_hf[nm]['ethtype']+self.eth_hf[nm]['modified']) == ethtypemod: self.eth_hf[nm]['ref'].append(f) self.field[f]['ethname'] = nm continue else: nmx = nm + '_01' self.eth_hf_dupl[nm] = {self.eth_hf[nm]['ethtype']+self.eth_hf[nm]['modified'] : nm, \ ethtypemod : nmx} nm = nmx if (self.field[f]['pdu']): self.eth_hfpdu_ord.append(nm) else: self.eth_hf_ord.append(nm) fullname = 'hf_%s_%s' % (self.eproto, nm) attr = self.eth_get_type_attr(self.field[f]['type']).copy() attr.update(self.field[f]['attr']) if (self.NAPI() and 'NAME' in attr): attr['NAME'] += self.field[f]['idx'] attr.update(self.conform.use_item('EFIELD_ATTR', nm)) use_vals_ext = self.eth_type[ethtype].get('vals_ext') if (use_vals_ext): attr['DISPLAY'] += '|BASE_EXT_STRING' self.eth_hf[nm] = {'fullname' : fullname, 'pdu' : self.field[f]['pdu'], 'ethtype' : ethtype, 'modified' : self.field[f]['modified'], 'attr' : attr.copy(), 'ref' : [f]} self.field[f]['ethname'] = nm if (self.dummy_eag_field): # Prepending "dummy_" avoids matching checkhf.pl. self.dummy_eag_field = 'dummy_hf_%s_%s' % (self.eproto, self.dummy_eag_field) #--- type dependencies ------------------- (self.eth_type_ord1, self.eth_dep_cycle) = dependency_compute(self.type_ord, self.type_dep, map_fn = lambda t: self.type[t]['ethname'], ignore_fn = lambda t: self.type[t]['import']) i = 0 while i < len(self.eth_dep_cycle): t = self.type[self.eth_dep_cycle[i][0]]['ethname'] self.dep_cycle_eth_type.setdefault(t, []).append(i) i += 1 #--- value dependencies and export ------------------- for v in self.eth_value_ord: if self.eth_value[v]['export']: self.eth_vexport_ord.append(v) else: self.eth_value_ord1.append(v) #--- export tags, values, ... --- for t in self.exports: if t not in self.type: continue if self.type[t]['import']: continue m = self.type[t]['module'] if not self.Per() and not self.Oer(): if m not in self.all_tags: self.all_tags[m] = {} self.all_tags[m][t] = self.type[t]['val'].GetTTag(self) if m not in self.all_type_attr: self.all_type_attr[m] = {} self.all_type_attr[m][t] = self.eth_get_type_attr(t).copy() for v in self.vexports: if v not in self.value: continue if self.value[v]['import']: continue m = self.value[v]['module'] if m not in self.all_vals: self.all_vals[m] = {} vv = self.value[v]['value'] if isinstance (vv, Value): vv = vv.to_str(self) self.all_vals[m][v] = vv #--- eth_vals_nm ------------------------------------------------------------ def eth_vals_nm(self, tname): out = "" if (not self.eth_type[tname]['export'] & EF_NO_PROT): out += "%s_" % (self.eproto) out += "%s_vals" % (tname) return out #--- eth_vals --------------------------------------------------------------- def eth_vals(self, tname, vals): out = "" has_enum = self.eth_type[tname]['enum'] & EF_ENUM use_ext = self.eth_type[tname]['vals_ext'] if (use_ext): vals.sort(key=lambda vals_entry: int(vals_entry[0])) if (not self.eth_type[tname]['export'] & EF_VALS): out += 'static ' if (self.eth_type[tname]['export'] & EF_VALS) and (self.eth_type[tname]['export'] & EF_TABLE): out += 'static ' if self.eth_type[tname]['val'].HasConstraint() and self.eth_type[tname]['val'].constr.Needs64b(self) \ and self.eth_type[tname]['val'].type == 'IntegerType': out += "const val64_string %s[] = {\n" % (self.eth_vals_nm(tname)) else: out += "const value_string %s[] = {\n" % (self.eth_vals_nm(tname)) for (val, id) in vals: if (has_enum): vval = self.eth_enum_item(tname, id) else: vval = val out += ' { %3s, "%s" },\n' % (vval, id) out += " { 0, NULL }\n};\n" if (use_ext): out += "\nstatic value_string_ext %s_ext = VALUE_STRING_EXT_INIT(%s);\n" % (self.eth_vals_nm(tname), self.eth_vals_nm(tname)) return out #--- eth_enum_prefix ------------------------------------------------------------ def eth_enum_prefix(self, tname, type=False): out = "" if (self.eth_type[tname]['export'] & EF_ENUM): no_prot = self.eth_type[tname]['export'] & EF_NO_PROT else: no_prot = self.eth_type[tname]['enum'] & EF_NO_PROT if (not no_prot): out += self.eproto if ((not self.eth_type[tname]['enum'] & EF_NO_TYPE) or type): if (out): out += '_' out += tname if (self.eth_type[tname]['enum'] & EF_UCASE): out = out.upper() if (out): out += '_' return out #--- eth_enum_nm ------------------------------------------------------------ def eth_enum_nm(self, tname): out = self.eth_enum_prefix(tname, type=True) out += "enum" return out #--- eth_enum_item --------------------------------------------------------------- def eth_enum_item(self, tname, ident): out = self.eth_enum_prefix(tname) out += asn2c(ident) if (self.eth_type[tname]['enum'] & EF_UCASE): out = out.upper() return out #--- eth_enum --------------------------------------------------------------- def eth_enum(self, tname, vals): out = "" if (self.eth_type[tname]['enum'] & EF_DEFINE): out += "/* enumerated values for %s */\n" % (tname) for (val, id) in vals: out += '#define %-12s %3s\n' % (self.eth_enum_item(tname, id), val) else: out += "typedef enum _%s {\n" % (self.eth_enum_nm(tname)) first_line = 1 for (val, id) in vals: if (first_line == 1): first_line = 0 else: out += ",\n" out += ' %-12s = %3s' % (self.eth_enum_item(tname, id), val) out += "\n} %s;\n" % (self.eth_enum_nm(tname)) return out #--- eth_bits --------------------------------------------------------------- def eth_bits(self, tname, bits): out = "" out += "static int * const " out += "%(TABLE)s[] = {\n" for (val, id) in bits: out += ' &hf_%s_%s_%s,\n' % (self.eproto, tname, asn2c(id)) out += " NULL\n};\n" return out #--- eth_type_fn_h ---------------------------------------------------------- def eth_type_fn_h(self, tname): out = "" if (not self.eth_type[tname]['export'] & EF_TYPE): out += 'static ' out += "int " if (self.Ber()): out += "dissect_%s_%s(bool implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_)" % (self.eth_type[tname]['proto'], tname) elif (self.Per() or self.Oer()): out += "dissect_%s_%s(tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_)" % (self.eth_type[tname]['proto'], tname) out += ";\n" return out #--- eth_fn_call ------------------------------------------------------------ def eth_fn_call(self, fname, ret=None, indent=2, par=None): out = indent * ' ' if (ret): if (ret == 'return'): out += 'return ' else: out += ret + ' = ' out += fname + '(' ind = len(out) for i in range(len(par)): if (i>0): out += ind * ' ' out += ', '.join(par[i]) if (i<(len(par)-1)): out += ',\n' out += ');\n' return out def output_proto_root(self): out = '' if self.conform.proto_root_name: out += ' proto_item *prot_ti = proto_tree_add_item(tree, ' + self.conform.proto_root_name + ', tvb, 0, -1, ENC_NA);\n' out += ' proto_item_set_hidden(prot_ti);\n' return out #--- eth_type_fn_hdr -------------------------------------------------------- def eth_type_fn_hdr(self, tname): out = '\n' if (not self.eth_type[tname]['export'] & EF_TYPE): out += 'static ' out += "int\n" if (self.Ber()): out += "dissect_%s_%s(bool implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {\n" % (self.eth_type[tname]['proto'], tname) elif (self.Per() or self.Oer()): out += "dissect_%s_%s(tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {\n" % (self.eth_type[tname]['proto'], tname) #if self.conform.get_fn_presence(tname): # out += self.conform.get_fn_text(tname, 'FN_HDR') #el if self.conform.check_item('PDU', tname): out += self.output_proto_root() if self.conform.get_fn_presence(self.eth_type[tname]['ref'][0]): out += self.conform.get_fn_text(self.eth_type[tname]['ref'][0], 'FN_HDR') return out #--- eth_type_fn_ftr -------------------------------------------------------- def eth_type_fn_ftr(self, tname): out = '\n' #if self.conform.get_fn_presence(tname): # out += self.conform.get_fn_text(tname, 'FN_FTR') #el if self.conform.get_fn_presence(self.eth_type[tname]['ref'][0]): out += self.conform.get_fn_text(self.eth_type[tname]['ref'][0], 'FN_FTR') out += " return offset;\n" out += "}\n" return out #--- eth_type_fn_body ------------------------------------------------------- def eth_type_fn_body(self, tname, body, pars=None): out = body #if self.conform.get_fn_body_presence(tname): # out = self.conform.get_fn_text(tname, 'FN_BODY') #el if self.conform.get_fn_body_presence(self.eth_type[tname]['ref'][0]): out = self.conform.get_fn_text(self.eth_type[tname]['ref'][0], 'FN_BODY') if pars: try: out = out % pars except (TypeError): pass return out #--- eth_out_pdu_decl ---------------------------------------------------------- def eth_out_pdu_decl(self, f): t = self.eth_hf[f]['ethtype'] out = '' if (not self.eth_hf[f]['pdu']['export']): out += 'static ' out += 'int ' out += 'dissect_'+f+'(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, void *data _U_);\n' return out #--- eth_output_hf ---------------------------------------------------------- def eth_output_hf (self): if not len(self.eth_hf_ord) and not len(self.eth_hfpdu_ord) and not len(self.named_bit): return fx = self.output.file_open('hf') for f in (self.eth_hfpdu_ord + self.eth_hf_ord): fx.write("%-50s/* %s */\n" % ("static int %s = -1; " % (self.eth_hf[f]['fullname']), self.eth_hf[f]['ethtype'])) if (self.named_bit): fx.write('/* named bits */\n') for nb in self.named_bit: fx.write("static int %s = -1;\n" % (nb['ethname'])) if (self.dummy_eag_field): fx.write("static int %s = -1; /* never registered */\n" % (self.dummy_eag_field)) self.output.file_close(fx) #--- eth_output_hf_arr ------------------------------------------------------ def eth_output_hf_arr (self): if not len(self.eth_hf_ord) and not len(self.eth_hfpdu_ord) and not len(self.named_bit): return fx = self.output.file_open('hfarr') for f in (self.eth_hfpdu_ord + self.eth_hf_ord): t = self.eth_hf[f]['ethtype'] if self.remove_prefix and t.startswith(self.remove_prefix): t = t[len(self.remove_prefix):] name=self.eth_hf[f]['attr']['NAME'] try: # Python < 3 trantab = maketrans("- ", "__") except Exception: trantab = str.maketrans("- ", "__") name = name.translate(trantab) namelower = name.lower() tquoted_lower = '"' + t.lower() + '"' # Try to avoid giving blurbs that give no more info than the name if tquoted_lower == namelower or \ t == "NULL" or \ tquoted_lower.replace("t_", "") == namelower: blurb = 'NULL' else: blurb = '"%s"' % (t) attr = self.eth_hf[f]['attr'].copy() if attr['TYPE'] == 'FT_NONE': attr['ABBREV'] = '"%s.%s_element"' % (self.proto, attr['ABBREV']) else: attr['ABBREV'] = '"%s.%s"' % (self.proto, attr['ABBREV']) if 'BLURB' not in attr: attr['BLURB'] = blurb fx.write(' { &%s,\n' % (self.eth_hf[f]['fullname'])) fx.write(' { %(NAME)s, %(ABBREV)s,\n' % attr) fx.write(' %(TYPE)s, %(DISPLAY)s, %(STRINGS)s, %(BITMASK)s,\n' % attr) fx.write(' %(BLURB)s, HFILL }},\n' % attr) for nb in self.named_bit: flt_str = nb['ethname'] # cut out hf_ flt_str = flt_str[3:] flt_str = flt_str.replace('_' , '.') #print("filter string=%s" % (flt_str)) fx.write(' { &%s,\n' % (nb['ethname'])) fx.write(' { "%s", "%s",\n' % (nb['name'], flt_str)) fx.write(' %s, %s, %s, %s,\n' % (nb['ftype'], nb['display'], nb['strings'], nb['bitmask'])) fx.write(' NULL, HFILL }},\n') self.output.file_close(fx) #--- eth_output_ett --------------------------------------------------------- def eth_output_ett (self): fx = self.output.file_open('ett') fempty = True #fx.write("static gint ett_%s = -1;\n" % (self.eproto)) for t in self.eth_type_ord: if self.eth_type[t]['tree']: fx.write("static gint %s = -1;\n" % (self.eth_type[t]['tree'])) fempty = False self.output.file_close(fx, discard=fempty) #--- eth_output_ett_arr ----------------------------------------------------- def eth_output_ett_arr(self): fx = self.output.file_open('ettarr') fempty = True #fx.write(" &ett_%s,\n" % (self.eproto)) for t in self.eth_type_ord: if self.eth_type[t]['tree']: fx.write(" &%s,\n" % (self.eth_type[t]['tree'])) fempty = False self.output.file_close(fx, discard=fempty) #--- eth_output_export ------------------------------------------------------ def eth_output_export(self): fx = self.output.file_open('exp', ext='h') for t in self.eth_export_ord: # vals if (self.eth_type[t]['export'] & EF_ENUM) and self.eth_type[t]['val'].eth_has_enum(t, self): fx.write(self.eth_type[t]['val'].eth_type_enum(t, self)) if (self.eth_type[t]['export'] & EF_VALS) and self.eth_type[t]['val'].eth_has_vals(): if not self.eth_type[t]['export'] & EF_TABLE: if self.eth_type[t]['export'] & EF_WS_DLL: fx.write("WS_DLL_PUBLIC ") else: fx.write("extern ") if self.eth_type[t]['val'].HasConstraint() and self.eth_type[t]['val'].constr.Needs64b(self) \ and self.eth_type[t]['val'].type == 'IntegerType': fx.write("const val64_string %s[];\n" % (self.eth_vals_nm(t))) else: fx.write("const value_string %s[];\n" % (self.eth_vals_nm(t))) else: fx.write(self.eth_type[t]['val'].eth_type_vals(t, self)) for t in self.eth_export_ord: # functions if (self.eth_type[t]['export'] & EF_TYPE): if self.eth_type[t]['export'] & EF_EXTERN: if self.eth_type[t]['export'] & EF_WS_DLL: fx.write("WS_DLL_PUBLIC ") else: fx.write("extern ") fx.write(self.eth_type_fn_h(t)) for f in self.eth_hfpdu_ord: # PDUs if (self.eth_hf[f]['pdu'] and self.eth_hf[f]['pdu']['export']): fx.write(self.eth_out_pdu_decl(f)) self.output.file_close(fx) #--- eth_output_expcnf ------------------------------------------------------ def eth_output_expcnf(self): fx = self.output.file_open('exp', ext='cnf') fx.write('#.MODULE\n') maxw = 0 for (m, p) in self.modules: if (len(m) > maxw): maxw = len(m) for (m, p) in self.modules: fx.write("%-*s %s\n" % (maxw, m, p)) fx.write('#.END\n\n') for cls in self.objectclass_ord: if self.objectclass[cls]['export']: cnm = cls if self.objectclass[cls]['export'] & EF_MODULE: cnm = "$%s$%s" % (self.objectclass[cls]['module'], cnm) fx.write('#.CLASS %s\n' % (cnm)) maxw = 2 for fld in self.objectclass[cls]['val'].fields: w = len(fld.fld_repr()[0]) if (w > maxw): maxw = w for fld in self.objectclass[cls]['val'].fields: repr = fld.fld_repr() fx.write('%-*s %s\n' % (maxw, repr[0], ' '.join(repr[1:]))) fx.write('#.END\n\n') if self.Ber(): fx.write('#.IMPORT_TAG\n') for t in self.eth_export_ord: # tags if (self.eth_type[t]['export'] & EF_TYPE): fx.write('%-24s ' % self.eth_type[t]['ref'][0]) fx.write('%s %s\n' % self.eth_type[t]['val'].GetTag(self)) fx.write('#.END\n\n') fx.write('#.TYPE_ATTR\n') for t in self.eth_export_ord: # attributes if (self.eth_type[t]['export'] & EF_TYPE): tnm = self.eth_type[t]['ref'][0] if self.eth_type[t]['export'] & EF_MODULE: tnm = "$%s$%s" % (self.type[tnm]['module'], tnm) fx.write('%-24s ' % tnm) attr = self.eth_get_type_attr(self.eth_type[t]['ref'][0]).copy() fx.write('TYPE = %(TYPE)-9s DISPLAY = %(DISPLAY)-9s STRINGS = %(STRINGS)s BITMASK = %(BITMASK)s\n' % attr) fx.write('#.END\n\n') self.output.file_close(fx, keep_anyway=True) #--- eth_output_val ------------------------------------------------------ def eth_output_val(self): fx = self.output.file_open('val', ext='h') for v in self.eth_value_ord1: vv = self.eth_value[v]['value'] if isinstance (vv, Value): vv = vv.to_str(self) fx.write("#define %-30s %s\n" % (v, vv)) for t in self.eth_type_ord1: if self.eth_type[t]['import']: continue if self.eth_type[t]['val'].eth_has_enum(t, self) and not (self.eth_type[t]['export'] & EF_ENUM): fx.write(self.eth_type[t]['val'].eth_type_enum(t, self)) self.output.file_close(fx) #--- eth_output_valexp ------------------------------------------------------ def eth_output_valexp(self): if (not len(self.eth_vexport_ord)): return fx = self.output.file_open('valexp', ext='h') for v in self.eth_vexport_ord: vv = self.eth_value[v]['value'] if isinstance (vv, Value): vv = vv.to_str(self) fx.write("#define %-30s %s\n" % (v, vv)) self.output.file_close(fx) #--- eth_output_types ------------------------------------------------------- def eth_output_types(self): def out_pdu(f): t = self.eth_hf[f]['ethtype'] impl = 'FALSE' out = '' if (not self.eth_hf[f]['pdu']['export']): out += 'static ' out += 'int ' out += 'dissect_'+f+'(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, void *data _U_) {\n' out += self.output_proto_root() out += ' int offset = 0;\n' off_par = 'offset' ret_par = 'offset' if (self.Per()): if (self.Aligned()): aligned = 'TRUE' else: aligned = 'FALSE' out += " asn1_ctx_t asn1_ctx;\n" out += self.eth_fn_call('asn1_ctx_init', par=(('&asn1_ctx', 'ASN1_ENC_PER', aligned, 'pinfo'),)) if (self.Ber()): out += " asn1_ctx_t asn1_ctx;\n" out += self.eth_fn_call('asn1_ctx_init', par=(('&asn1_ctx', 'ASN1_ENC_BER', 'TRUE', 'pinfo'),)) par=((impl, 'tvb', off_par,'&asn1_ctx', 'tree', self.eth_hf[f]['fullname']),) elif (self.Per()): par=(('tvb', off_par, '&asn1_ctx', 'tree', self.eth_hf[f]['fullname']),) elif (self.Oer()): out += " asn1_ctx_t asn1_ctx;\n" out += self.eth_fn_call('asn1_ctx_init', par=(('&asn1_ctx', 'ASN1_ENC_OER', 'TRUE', 'pinfo'),)) par=(('tvb', off_par,'&asn1_ctx', 'tree', self.eth_hf[f]['fullname']),) else: par=((),) out += self.eth_fn_call('dissect_%s_%s' % (self.eth_type[t]['proto'], t), ret=ret_par, par=par) if (self.Per()): out += ' offset += 7; offset >>= 3;\n' out += ' return offset;\n' out += '}\n' return out #end out_pdu() fx = self.output.file_open('fn') pos = fx.tell() if (len(self.eth_hfpdu_ord)): first_decl = True for f in self.eth_hfpdu_ord: if (self.eth_hf[f]['pdu'] and self.eth_hf[f]['pdu']['need_decl']): if first_decl: fx.write('/*--- PDUs declarations ---*/\n') first_decl = False fx.write(self.eth_out_pdu_decl(f)) if not first_decl: fx.write('\n') if self.eth_dep_cycle: fx.write('/*--- Cyclic dependencies ---*/\n\n') i = 0 while i < len(self.eth_dep_cycle): t = self.type[self.eth_dep_cycle[i][0]]['ethname'] if self.dep_cycle_eth_type[t][0] != i: i += 1; continue fx.write(''.join(['/* %s */\n' % ' -> '.join(self.eth_dep_cycle[i]) for i in self.dep_cycle_eth_type[t]])) if not self.eth_type[t]['export'] & EF_TYPE: fx.write(self.eth_type_fn_h(t)) else: fx.write('/*' + self.eth_type_fn_h(t).strip() + '*/\n') fx.write('\n') i += 1 fx.write('\n') for t in self.eth_type_ord1: if self.eth_type[t]['import']: continue if self.eth_type[t]['val'].eth_has_vals(): if self.eth_type[t]['no_emit'] & EF_VALS: pass elif self.eth_type[t]['user_def'] & EF_VALS: if self.eth_type[t]['val'].HasConstraint() and self.eth_type[t]['val'].constr.Needs64b(self) \ and self.eth_type[t]['val'].type == 'IntegerType': fx.write("extern const val64_string %s[];\n" % (self.eth_vals_nm(t))) else: fx.write("extern const value_string %s[];\n" % (self.eth_vals_nm(t))) elif (self.eth_type[t]['export'] & EF_VALS) and (self.eth_type[t]['export'] & EF_TABLE): pass else: fx.write(self.eth_type[t]['val'].eth_type_vals(t, self)) if self.eth_type[t]['no_emit'] & EF_TYPE: pass elif self.eth_type[t]['user_def'] & EF_TYPE: fx.write(self.eth_type_fn_h(t)) else: fx.write(self.eth_type[t]['val'].eth_type_fn(self.eth_type[t]['proto'], t, self)) fx.write('\n') if (len(self.eth_hfpdu_ord)): fx.write('/*--- PDUs ---*/\n\n') for f in self.eth_hfpdu_ord: if (self.eth_hf[f]['pdu']): if (f in self.emitted_pdu): fx.write(" /* %s already emitted */\n" % (f)) else: fx.write(out_pdu(f)) self.emitted_pdu[f] = True fx.write('\n') fempty = pos == fx.tell() self.output.file_close(fx, discard=fempty) #--- eth_output_dis_hnd ----------------------------------------------------- def eth_output_dis_hnd(self): fx = self.output.file_open('dis-hnd') fempty = True for f in self.eth_hfpdu_ord: pdu = self.eth_hf[f]['pdu'] if (pdu and pdu['reg'] and not pdu['hidden']): dis = self.proto if (pdu['reg'] != '.'): dis += '.' + pdu['reg'] fx.write('static dissector_handle_t %s_handle;\n' % (asn2c(dis))) fempty = False fx.write('\n') self.output.file_close(fx, discard=fempty) #--- eth_output_dis_reg ----------------------------------------------------- def eth_output_dis_reg(self): fx = self.output.file_open('dis-reg') fempty = True for f in self.eth_hfpdu_ord: pdu = self.eth_hf[f]['pdu'] if (pdu and pdu['reg']): new_prefix = '' if (pdu['new']): new_prefix = 'new_' dis = self.proto if (pdu['reg'] != '.'): dis += '.' + pdu['reg'] fx.write(' %sregister_dissector("%s", dissect_%s, proto_%s);\n' % (new_prefix, dis, f, self.eproto)) if (not pdu['hidden']): fx.write(' %s_handle = find_dissector("%s");\n' % (asn2c(dis), dis)) fempty = False fx.write('\n') self.output.file_close(fx, discard=fempty) #--- eth_output_dis_tab ----------------------------------------------------- def eth_output_dis_tab(self): fx = self.output.file_open('dis-tab') fempty = True for k in self.conform.get_order('REGISTER'): reg = self.conform.use_item('REGISTER', k) if reg['pdu'] not in self.field: continue f = self.field[reg['pdu']]['ethname'] pdu = self.eth_hf[f]['pdu'] new_prefix = '' if (pdu['new']): new_prefix = 'new_' if (reg['rtype'] in ('NUM', 'STR')): rstr = '' if (reg['rtype'] == 'STR'): rstr = 'string' else: rstr = 'uint' if (pdu['reg']): dis = self.proto if (pdu['reg'] != '.'): dis += '.' + pdu['reg'] if (not pdu['hidden']): hnd = '%s_handle' % (asn2c(dis)) else: hnd = 'find_dissector("%s")' % (dis) else: hnd = '%screate_dissector_handle(dissect_%s, proto_%s)' % (new_prefix, f, self.eproto) rport = self.value_get_eth(reg['rport']) fx.write(' dissector_add_%s("%s", %s, %s);\n' % (rstr, reg['rtable'], rport, hnd)) elif (reg['rtype'] in ('BER', 'PER', 'OER')): roid = self.value_get_eth(reg['roid']) fx.write(' %sregister_%s_oid_dissector(%s, dissect_%s, proto_%s, %s);\n' % (new_prefix, reg['rtype'].lower(), roid, f, self.eproto, reg['roidname'])) fempty = False fx.write('\n') self.output.file_close(fx, discard=fempty) #--- eth_output_syn_reg ----------------------------------------------------- def eth_output_syn_reg(self): fx = self.output.file_open('syn-reg') fempty = True first_decl = True for k in self.conform.get_order('SYNTAX'): reg = self.conform.use_item('SYNTAX', k) if reg['pdu'] not in self.field: continue f = self.field[reg['pdu']]['ethname'] pdu = self.eth_hf[f]['pdu'] new_prefix = '' if (pdu['new']): new_prefix = 'new_' if first_decl: fx.write(' /*--- Syntax registrations ---*/\n') first_decl = False fx.write(' %sregister_ber_syntax_dissector(%s, proto_%s, dissect_%s_PDU);\n' % (new_prefix, k, self.eproto, reg['pdu'])); fempty=False self.output.file_close(fx, discard=fempty) #--- eth_output_tables ----------------------------------------------------- def eth_output_tables(self): for num in list(self.conform.report.keys()): fx = self.output.file_open('table' + num) for rep in self.conform.report[num]: self.eth_output_table(fx, rep) self.output.file_close(fx) #--- eth_output_table ----------------------------------------------------- def eth_output_table(self, fx, rep): if rep['type'] == 'HDR': fx.write('\n') if rep['var']: var = rep['var'] var_list = var.split('.', 1) cls = var_list[0] del var_list[0] flds = [] not_flds = [] sort_flds = [] for f in var_list: if f[0] == '!': not_flds.append(f[1:]) continue if f[0] == '#': flds.append(f[1:]) sort_flds.append(f) continue if f[0] == '@': flds.append(f[1:]) sort_flds.append(f[1:]) continue flds.append(f) objs = {} objs_ord = [] if (cls in self.oassign_cls): for ident in self.oassign_cls[cls]: obj = self.get_obj_repr(ident, flds, not_flds) if not obj: continue obj['_LOOP'] = var obj['_DICT'] = str(obj) objs[ident] = obj objs_ord.append(ident) if (sort_flds): # Sort identifiers according to the matching object in objs. # The order is determined by sort_flds, keys prefixed by a # '#' are compared numerically. def obj_key_fn(name): obj = objs[name] return list( int(obj[f[1:]]) if f[0] == '#' else obj[f] for f in sort_flds ) objs_ord.sort(key=obj_key_fn) for ident in objs_ord: obj = objs[ident] try: text = rep['text'] % obj except (KeyError): raise sys.exc_info()[0]("%s:%s invalid key %s for information object %s of %s" % (rep['fn'], rep['lineno'], sys.exc_info()[1], ident, var)) fx.write(text) else: fx.write("/* Unknown or empty loop list %s */\n" % (var)) else: fx.write(rep['text']) if rep['type'] == 'FTR': fx.write('\n') #--- dupl_report ----------------------------------------------------- def dupl_report(self): # types tmplist = sorted(self.eth_type_dupl.keys()) for t in tmplist: msg = "The same type names for different types. Explicit type renaming is recommended.\n" msg += t + "\n" for tt in self.eth_type_dupl[t]: msg += " %-20s %s\n" % (self.type[tt]['ethname'], tt) warnings.warn_explicit(msg, UserWarning, '', 0) # fields tmplist = list(self.eth_hf_dupl.keys()) tmplist.sort() for f in tmplist: msg = "The same field names for different types. Explicit field renaming is recommended.\n" msg += f + "\n" for tt in list(self.eth_hf_dupl[f].keys()): msg += " %-20s %-20s " % (self.eth_hf_dupl[f][tt], tt) msg += ", ".join(self.eth_hf[self.eth_hf_dupl[f][tt]]['ref']) msg += "\n" warnings.warn_explicit(msg, UserWarning, '', 0) #--- eth_do_output ------------------------------------------------------------ def eth_do_output(self): if self.dbg('a'): print("\n# Assignments") for a in self.assign_ord: v = ' ' if (self.assign[a]['virt']): v = '*' print('{} {}'.format(v, a)) print("\n# Value assignments") for a in self.vassign_ord: print(' {}'.format(a)) print("\n# Information object assignments") for a in self.oassign_ord: print(" %-12s (%s)" % (a, self.oassign[a].cls)) if self.dbg('t'): print("\n# Imported Types") print("%-40s %-24s %-24s" % ("ASN.1 name", "Module", "Protocol")) print("-" * 100) for t in self.type_imp: print("%-40s %-24s %-24s" % (t, self.type[t]['import'], self.type[t]['proto'])) print("\n# Imported Values") print("%-40s %-24s %-24s" % ("ASN.1 name", "Module", "Protocol")) print("-" * 100) for t in self.value_imp: print("%-40s %-24s %-24s" % (t, self.value[t]['import'], self.value[t]['proto'])) print("\n# Imported Object Classes") print("%-40s %-24s %-24s" % ("ASN.1 name", "Module", "Protocol")) print("-" * 100) for t in self.objectclass_imp: print("%-40s %-24s %-24s" % (t, self.objectclass[t]['import'], self.objectclass[t]['proto'])) print("\n# Exported Types") print("%-31s %s" % ("Wireshark type", "Export Flag")) print("-" * 100) for t in self.eth_export_ord: print("%-31s 0x%02X" % (t, self.eth_type[t]['export'])) print("\n# Exported Values") print("%-40s %s" % ("Wireshark name", "Value")) print("-" * 100) for v in self.eth_vexport_ord: vv = self.eth_value[v]['value'] if isinstance (vv, Value): vv = vv.to_str(self) print("%-40s %s" % (v, vv)) print("\n# ASN.1 Object Classes") print("%-40s %-24s %-24s" % ("ASN.1 name", "Module", "Protocol")) print("-" * 100) for t in self.objectclass_ord: print("%-40s " % (t)) print("\n# ASN.1 Types") print("%-49s %-24s %-24s" % ("ASN.1 unique name", "'tname'", "Wireshark type")) print("-" * 100) for t in self.type_ord: print("%-49s %-24s %-24s" % (t, self.type[t]['tname'], self.type[t]['ethname'])) print("\n# Wireshark Types") print("Wireshark type References (ASN.1 types)") print("-" * 100) for t in self.eth_type_ord: sys.stdout.write("%-31s %d" % (t, len(self.eth_type[t]['ref']))) print(', '.join(self.eth_type[t]['ref'])) print("\n# ASN.1 Values") print("%-40s %-18s %-20s %s" % ("ASN.1 unique name", "Type", "Value", "Wireshark value")) print("-" * 100) for v in self.value_ord: vv = self.value[v]['value'] if isinstance (vv, Value): vv = vv.to_str(self) print("%-40s %-18s %-20s %s" % (v, self.value[v]['type'].eth_tname(), vv, self.value[v]['ethname'])) #print "\n# Wireshark Values" #print "%-40s %s" % ("Wireshark name", "Value") #print "-" * 100 #for v in self.eth_value_ord: # vv = self.eth_value[v]['value'] # if isinstance (vv, Value): # vv = vv.to_str(self) # print "%-40s %s" % (v, vv) print("\n# ASN.1 Fields") print("ASN.1 unique name Wireshark name ASN.1 type") print("-" * 100) for f in (self.pdu_ord + self.field_ord): print("%-40s %-20s %s" % (f, self.field[f]['ethname'], self.field[f]['type'])) print("\n# Wireshark Fields") print("Wireshark name Wireshark type References (ASN.1 fields)") print("-" * 100) for f in (self.eth_hfpdu_ord + self.eth_hf_ord): sys.stdout.write("%-30s %-20s %s" % (f, self.eth_hf[f]['ethtype'], len(self.eth_hf[f]['ref']))) print(', '.join(self.eth_hf[f]['ref'])) #print "\n# Order after dependencies" #print '\n'.join(self.eth_type_ord1) print("\n# Cyclic dependencies") for c in self.eth_dep_cycle: print(' -> '.join(c)) self.dupl_report() self.output.outnm = self.outnm_opt if (not self.output.outnm): self.output.outnm = self.proto self.output.outnm = self.output.outnm.replace('.', '-') if not self.justexpcnf: self.eth_output_hf() self.eth_output_ett() self.eth_output_types() self.eth_output_hf_arr() self.eth_output_ett_arr() self.eth_output_export() self.eth_output_val() self.eth_output_valexp() self.eth_output_dis_hnd() self.eth_output_dis_reg() self.eth_output_dis_tab() self.eth_output_syn_reg() self.eth_output_tables() if self.expcnf: self.eth_output_expcnf() def dbg_modules(self): def print_mod(m): sys.stdout.write("%-30s " % (m)) dep = self.module[m][:] for i in range(len(dep)): if dep[i] not in self.module: dep[i] = '*' + dep[i] print(', '.join(dep)) # end of print_mod() (mod_ord, mod_cyc) = dependency_compute(self.module_ord, self.module, ignore_fn = lambda t: t not in self.module) print("\n# ASN.1 Moudules") print("Module name Dependency") print("-" * 100) new_ord = False for m in (self.module_ord): print_mod(m) new_ord = new_ord or (self.module_ord.index(m) != mod_ord.index(m)) if new_ord: print("\n# ASN.1 Moudules - in dependency order") print("Module name Dependency") print("-" * 100) for m in (mod_ord): print_mod(m) if mod_cyc: print("\nCyclic dependencies:") for i in (list(range(len(mod_cyc)))): print("%02d: %s" % (i + 1, str(mod_cyc[i]))) #--- EthCnf ------------------------------------------------------------------- class EthCnf: def __init__(self): self.ectx = None self.tblcfg = {} self.table = {} self.order = {} self.fn = {} self.report = {} self.suppress_line = False self.include_path = [] self.proto_root_name = None # Value name Default value Duplicity check Usage check self.tblcfg['EXPORTS'] = { 'val_nm' : 'flag', 'val_dflt' : 0, 'chk_dup' : True, 'chk_use' : True } self.tblcfg['MAKE_ENUM'] = { 'val_nm' : 'flag', 'val_dflt' : 0, 'chk_dup' : True, 'chk_use' : True } self.tblcfg['USE_VALS_EXT'] = { 'val_nm' : 'flag', 'val_dflt' : 0, 'chk_dup' : True, 'chk_use' : True } self.tblcfg['PDU'] = { 'val_nm' : 'attr', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True } self.tblcfg['SYNTAX'] = { 'val_nm' : 'attr', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True } self.tblcfg['REGISTER'] = { 'val_nm' : 'attr', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True } self.tblcfg['USER_DEFINED'] = { 'val_nm' : 'flag', 'val_dflt' : 0, 'chk_dup' : True, 'chk_use' : True } self.tblcfg['NO_EMIT'] = { 'val_nm' : 'flag', 'val_dflt' : 0, 'chk_dup' : True, 'chk_use' : True } self.tblcfg['MODULE'] = { 'val_nm' : 'proto', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : False } self.tblcfg['OMIT_ASSIGNMENT'] = { 'val_nm' : 'omit', 'val_dflt' : False, 'chk_dup' : True, 'chk_use' : True } self.tblcfg['NO_OMIT_ASSGN'] = { 'val_nm' : 'omit', 'val_dflt' : True, 'chk_dup' : True, 'chk_use' : True } self.tblcfg['VIRTUAL_ASSGN'] = { 'val_nm' : 'name', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True } self.tblcfg['SET_TYPE'] = { 'val_nm' : 'type', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True } self.tblcfg['TYPE_RENAME'] = { 'val_nm' : 'eth_name', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True } self.tblcfg['FIELD_RENAME'] = { 'val_nm' : 'eth_name', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True } self.tblcfg['IMPORT_TAG'] = { 'val_nm' : 'ttag', 'val_dflt' : (), 'chk_dup' : True, 'chk_use' : False } self.tblcfg['FN_PARS'] = { 'val_nm' : 'pars', 'val_dflt' : {}, 'chk_dup' : True, 'chk_use' : True } self.tblcfg['TYPE_ATTR'] = { 'val_nm' : 'attr', 'val_dflt' : {}, 'chk_dup' : True, 'chk_use' : False } self.tblcfg['ETYPE_ATTR'] = { 'val_nm' : 'attr', 'val_dflt' : {}, 'chk_dup' : True, 'chk_use' : False } self.tblcfg['FIELD_ATTR'] = { 'val_nm' : 'attr', 'val_dflt' : {}, 'chk_dup' : True, 'chk_use' : True } self.tblcfg['EFIELD_ATTR'] = { 'val_nm' : 'attr', 'val_dflt' : {}, 'chk_dup' : True, 'chk_use' : True } self.tblcfg['ASSIGNED_ID'] = { 'val_nm' : 'ids', 'val_dflt' : {}, 'chk_dup' : False,'chk_use' : False } self.tblcfg['ASSIGN_VALUE_TO_TYPE'] = { 'val_nm' : 'name', 'val_dflt' : None, 'chk_dup' : True, 'chk_use' : True } for k in list(self.tblcfg.keys()) : self.table[k] = {} self.order[k] = [] def add_item(self, table, key, fn, lineno, **kw): if self.tblcfg[table]['chk_dup'] and key in self.table[table]: warnings.warn_explicit("Duplicated %s for %s. Previous one is at %s:%d" % (table, key, self.table[table][key]['fn'], self.table[table][key]['lineno']), UserWarning, fn, lineno) return self.table[table][key] = {'fn' : fn, 'lineno' : lineno, 'used' : False} self.table[table][key].update(kw) self.order[table].append(key) def update_item(self, table, key, fn, lineno, **kw): if key not in self.table[table]: self.table[table][key] = {'fn' : fn, 'lineno' : lineno, 'used' : False} self.order[table].append(key) self.table[table][key][self.tblcfg[table]['val_nm']] = {} self.table[table][key][self.tblcfg[table]['val_nm']].update(kw[self.tblcfg[table]['val_nm']]) def get_order(self, table): return self.order[table] def check_item(self, table, key): return key in self.table[table] def copy_item(self, table, dst_key, src_key): if (src_key in self.table[table]): self.table[table][dst_key] = self.table[table][src_key] def check_item_value(self, table, key, **kw): return key in self.table[table] and kw.get('val_nm', self.tblcfg[table]['val_nm']) in self.table[table][key] def use_item(self, table, key, **kw): vdflt = kw.get('val_dflt', self.tblcfg[table]['val_dflt']) if key not in self.table[table]: return vdflt vname = kw.get('val_nm', self.tblcfg[table]['val_nm']) #print "use_item() - set used for %s %s" % (table, key) self.table[table][key]['used'] = True return self.table[table][key].get(vname, vdflt) def omit_assignment(self, type, ident, module): if self.ectx.conform.use_item('OMIT_ASSIGNMENT', ident): return True if self.ectx.conform.use_item('OMIT_ASSIGNMENT', '*') or \ self.ectx.conform.use_item('OMIT_ASSIGNMENT', '*'+type) or \ self.ectx.conform.use_item('OMIT_ASSIGNMENT', '*/'+module) or \ self.ectx.conform.use_item('OMIT_ASSIGNMENT', '*'+type+'/'+module): return self.ectx.conform.use_item('NO_OMIT_ASSGN', ident) return False def add_fn_line(self, name, ctx, line, fn, lineno): if name not in self.fn: self.fn[name] = {'FN_HDR' : None, 'FN_FTR' : None, 'FN_BODY' : None} if (self.fn[name][ctx]): self.fn[name][ctx]['text'] += line else: self.fn[name][ctx] = {'text' : line, 'used' : False, 'fn' : fn, 'lineno' : lineno} def get_fn_presence(self, name): #print "get_fn_presence('%s'):%s" % (name, str(self.fn.has_key(name))) #if self.fn.has_key(name): print self.fn[name] return name in self.fn def get_fn_body_presence(self, name): return name in self.fn and self.fn[name]['FN_BODY'] def get_fn_text(self, name, ctx): if (name not in self.fn): return ''; if (not self.fn[name][ctx]): return ''; self.fn[name][ctx]['used'] = True out = self.fn[name][ctx]['text'] if (not self.suppress_line): out = '#line %u "%s"\n%s\n' % (self.fn[name][ctx]['lineno'], rel_dissector_path(self.fn[name][ctx]['fn']), out); return out def add_pdu(self, par, fn, lineno): #print "add_pdu(par=%s, %s, %d)" % (str(par), fn, lineno) (reg, hidden) = (None, False) if (len(par) > 1): reg = par[1] if (reg and reg[0]=='@'): (reg, hidden) = (reg[1:], True) attr = {'new' : False, 'reg' : reg, 'hidden' : hidden, 'need_decl' : False, 'export' : False} self.add_item('PDU', par[0], attr=attr, fn=fn, lineno=lineno) return def add_syntax(self, par, fn, lineno): #print "add_syntax(par=%s, %s, %d)" % (str(par), fn, lineno) if( (len(par) >=2)): name = par[1] else: name = '"'+par[0]+'"' attr = { 'pdu' : par[0] } self.add_item('SYNTAX', name, attr=attr, fn=fn, lineno=lineno) return def add_register(self, pdu, par, fn, lineno): #print "add_register(pdu=%s, par=%s, %s, %d)" % (pdu, str(par), fn, lineno) if (par[0] in ('N', 'NUM')): rtype = 'NUM'; (pmin, pmax) = (2, 2) elif (par[0] in ('S', 'STR')): rtype = 'STR'; (pmin, pmax) = (2, 2) elif (par[0] in ('B', 'BER')): rtype = 'BER'; (pmin, pmax) = (1, 2) elif (par[0] in ('P', 'PER')): rtype = 'PER'; (pmin, pmax) = (1, 2) elif (par[0] in ('O', 'OER')): rtype = 'OER'; (pmin, pmax) = (1, 2) else: warnings.warn_explicit("Unknown registration type '%s'" % (par[2]), UserWarning, fn, lineno); return if ((len(par)-1) < pmin): warnings.warn_explicit("Too few parameters for %s registration type. At least %d parameters are required" % (rtype, pmin), UserWarning, fn, lineno) return if ((len(par)-1) > pmax): warnings.warn_explicit("Too many parameters for %s registration type. Only %d parameters are allowed" % (rtype, pmax), UserWarning, fn, lineno) attr = {'pdu' : pdu, 'rtype' : rtype} if (rtype in ('NUM', 'STR')): attr['rtable'] = par[1] attr['rport'] = par[2] rkey = '/'.join([rtype, attr['rtable'], attr['rport']]) elif (rtype in ('BER', 'PER', 'OER')): attr['roid'] = par[1] attr['roidname'] = '""' if (len(par)>=3): attr['roidname'] = par[2] elif attr['roid'][0] != '"': attr['roidname'] = '"' + attr['roid'] + '"' rkey = '/'.join([rtype, attr['roid']]) self.add_item('REGISTER', rkey, attr=attr, fn=fn, lineno=lineno) def check_par(self, par, pmin, pmax, fn, lineno): for i in range(len(par)): if par[i] == '-': par[i] = None continue if par[i][0] == '#': par[i:] = [] break if len(par) < pmin: warnings.warn_explicit("Too few parameters. At least %d parameters are required" % (pmin), UserWarning, fn, lineno) return None if (pmax >= 0) and (len(par) > pmax): warnings.warn_explicit("Too many parameters. Only %d parameters are allowed" % (pmax), UserWarning, fn, lineno) return par[0:pmax] return par def read(self, fn): def get_par(line, pmin, pmax, fn, lineno): par = line.split(None, pmax) par = self.check_par(par, pmin, pmax, fn, lineno) return par def get_par_nm(line, pmin, pmax, fn, lineno): if pmax: par = line.split(None, pmax) else: par = [line,] for i in range(len(par)): if par[i][0] == '#': par[i:] = [] break if len(par) < pmin: warnings.warn_explicit("Too few parameters. At least %d parameters are required" % (pmin), UserWarning, fn, lineno) return None if len(par) > pmax: nmpar = par[pmax] else: nmpar = '' nmpars = {} nmpar_first = re.compile(r'^\s*(?P<attr>[_A-Z][_A-Z0-9]*)\s*=\s*') nmpar_next = re.compile(r'\s+(?P<attr>[_A-Z][_A-Z0-9]*)\s*=\s*') nmpar_end = re.compile(r'\s*$') result = nmpar_first.search(nmpar) pos = 0 while result: k = result.group('attr') pos = result.end() result = nmpar_next.search(nmpar, pos) p1 = pos if result: p2 = result.start() else: p2 = nmpar_end.search(nmpar, pos).start() v = nmpar[p1:p2] nmpars[k] = v if len(par) > pmax: par[pmax] = nmpars return par f = open(fn, "r") lineno = 0 is_import = False directive = re.compile(r'^\s*#\.(?P<name>[A-Z_][A-Z_0-9]*)(\s+|$)') cdirective = re.compile(r'^\s*##') report = re.compile(r'^TABLE(?P<num>\d*)_(?P<type>HDR|BODY|FTR)$') comment = re.compile(r'^\s*#[^.#]') empty = re.compile(r'^\s*$') ctx = None name = '' default_flags = 0x00 stack = [] while True: if not f.closed: line = f.readline() lineno += 1 else: line = None if not line: if not f.closed: f.close() if stack: frec = stack.pop() fn, f, lineno, is_import = frec['fn'], frec['f'], frec['lineno'], frec['is_import'] continue else: break if comment.search(line): continue result = directive.search(line) if result: # directive rep_result = report.search(result.group('name')) if result.group('name') == 'END_OF_CNF': f.close() elif result.group('name') == 'OPT': ctx = result.group('name') par = get_par(line[result.end():], 0, -1, fn=fn, lineno=lineno) if not par: continue self.set_opt(par[0], par[1:], fn, lineno) ctx = None elif result.group('name') in ('PDU', 'REGISTER', 'MODULE', 'MODULE_IMPORT', 'OMIT_ASSIGNMENT', 'NO_OMIT_ASSGN', 'VIRTUAL_ASSGN', 'SET_TYPE', 'ASSIGN_VALUE_TO_TYPE', 'TYPE_RENAME', 'FIELD_RENAME', 'TF_RENAME', 'IMPORT_TAG', 'TYPE_ATTR', 'ETYPE_ATTR', 'FIELD_ATTR', 'EFIELD_ATTR', 'SYNTAX'): ctx = result.group('name') elif result.group('name') in ('OMIT_ALL_ASSIGNMENTS', 'OMIT_ASSIGNMENTS_EXCEPT', 'OMIT_ALL_TYPE_ASSIGNMENTS', 'OMIT_TYPE_ASSIGNMENTS_EXCEPT', 'OMIT_ALL_VALUE_ASSIGNMENTS', 'OMIT_VALUE_ASSIGNMENTS_EXCEPT'): ctx = result.group('name') key = '*' if ctx in ('OMIT_ALL_TYPE_ASSIGNMENTS', 'OMIT_TYPE_ASSIGNMENTS_EXCEPT'): key += 'T' if ctx in ('OMIT_ALL_VALUE_ASSIGNMENTS', 'OMIT_VALUE_ASSIGNMENTS_EXCEPT'): key += 'V' par = get_par(line[result.end():], 0, 1, fn=fn, lineno=lineno) if par: key += '/' + par[0] self.add_item('OMIT_ASSIGNMENT', key, omit=True, fn=fn, lineno=lineno) if ctx in ('OMIT_ASSIGNMENTS_EXCEPT', 'OMIT_TYPE_ASSIGNMENTS_EXCEPT', 'OMIT_VALUE_ASSIGNMENTS_EXCEPT'): ctx = 'NO_OMIT_ASSGN' else: ctx = None elif result.group('name') in ('EXPORTS', 'MODULE_EXPORTS', 'USER_DEFINED', 'NO_EMIT'): ctx = result.group('name') default_flags = EF_TYPE|EF_VALS if ctx == 'MODULE_EXPORTS': ctx = 'EXPORTS' default_flags |= EF_MODULE if ctx == 'EXPORTS': par = get_par(line[result.end():], 0, 5, fn=fn, lineno=lineno) else: par = get_par(line[result.end():], 0, 1, fn=fn, lineno=lineno) if not par: continue p = 1 if (par[0] == 'WITH_VALS'): default_flags |= EF_TYPE|EF_VALS elif (par[0] == 'WITHOUT_VALS'): default_flags |= EF_TYPE; default_flags &= ~EF_VALS elif (par[0] == 'ONLY_VALS'): default_flags &= ~EF_TYPE; default_flags |= EF_VALS elif (ctx == 'EXPORTS'): p = 0 else: warnings.warn_explicit("Unknown parameter value '%s'" % (par[0]), UserWarning, fn, lineno) for i in range(p, len(par)): if (par[i] == 'ONLY_ENUM'): default_flags &= ~(EF_TYPE|EF_VALS); default_flags |= EF_ENUM elif (par[i] == 'WITH_ENUM'): default_flags |= EF_ENUM elif (par[i] == 'VALS_WITH_TABLE'): default_flags |= EF_TABLE elif (par[i] == 'WS_DLL'): default_flags |= EF_WS_DLL elif (par[i] == 'EXTERN'): default_flags |= EF_EXTERN elif (par[i] == 'NO_PROT_PREFIX'): default_flags |= EF_NO_PROT else: warnings.warn_explicit("Unknown parameter value '%s'" % (par[i]), UserWarning, fn, lineno) elif result.group('name') in ('MAKE_ENUM', 'MAKE_DEFINES'): ctx = result.group('name') default_flags = EF_ENUM if ctx == 'MAKE_ENUM': default_flags |= EF_NO_PROT|EF_NO_TYPE if ctx == 'MAKE_DEFINES': default_flags |= EF_DEFINE|EF_UCASE|EF_NO_TYPE par = get_par(line[result.end():], 0, 3, fn=fn, lineno=lineno) for i in range(0, len(par)): if (par[i] == 'NO_PROT_PREFIX'): default_flags |= EF_NO_PROT elif (par[i] == 'PROT_PREFIX'): default_flags &= ~ EF_NO_PROT elif (par[i] == 'NO_TYPE_PREFIX'): default_flags |= EF_NO_TYPE elif (par[i] == 'TYPE_PREFIX'): default_flags &= ~ EF_NO_TYPE elif (par[i] == 'UPPER_CASE'): default_flags |= EF_UCASE elif (par[i] == 'NO_UPPER_CASE'): default_flags &= ~EF_UCASE else: warnings.warn_explicit("Unknown parameter value '%s'" % (par[i]), UserWarning, fn, lineno) elif result.group('name') == 'USE_VALS_EXT': ctx = result.group('name') default_flags = 0xFF elif result.group('name') == 'FN_HDR': minp = 1 if (ctx in ('FN_PARS',)) and name: minp = 0 par = get_par(line[result.end():], minp, 1, fn=fn, lineno=lineno) if (not par) and (minp > 0): continue ctx = result.group('name') if par: name = par[0] elif result.group('name') == 'FN_FTR': minp = 1 if (ctx in ('FN_PARS','FN_HDR')) and name: minp = 0 par = get_par(line[result.end():], minp, 1, fn=fn, lineno=lineno) if (not par) and (minp > 0): continue ctx = result.group('name') if par: name = par[0] elif result.group('name') == 'FN_BODY': par = get_par_nm(line[result.end():], 1, 1, fn=fn, lineno=lineno) if not par: continue ctx = result.group('name') name = par[0] if len(par) > 1: self.add_item('FN_PARS', name, pars=par[1], fn=fn, lineno=lineno) elif result.group('name') == 'FN_PARS': par = get_par_nm(line[result.end():], 0, 1, fn=fn, lineno=lineno) ctx = result.group('name') if not par: name = None elif len(par) == 1: name = par[0] self.add_item(ctx, name, pars={}, fn=fn, lineno=lineno) elif len(par) > 1: self.add_item(ctx, par[0], pars=par[1], fn=fn, lineno=lineno) ctx = None elif result.group('name') == 'CLASS': par = get_par(line[result.end():], 1, 1, fn=fn, lineno=lineno) if not par: continue ctx = result.group('name') name = par[0] add_class_ident(name) if not name.split('$')[-1].isupper(): warnings.warn_explicit("No lower-case letters shall be included in information object class name (%s)" % (name), UserWarning, fn, lineno) elif result.group('name') == 'ASSIGNED_OBJECT_IDENTIFIER': par = get_par(line[result.end():], 1, 1, fn=fn, lineno=lineno) if not par: continue self.update_item('ASSIGNED_ID', 'OBJECT_IDENTIFIER', ids={par[0] : par[0]}, fn=fn, lineno=lineno) elif rep_result: # Reports num = rep_result.group('num') type = rep_result.group('type') if type == 'BODY': par = get_par(line[result.end():], 1, 1, fn=fn, lineno=lineno) if not par: continue else: par = get_par(line[result.end():], 0, 0, fn=fn, lineno=lineno) rep = { 'type' : type, 'var' : None, 'text' : '', 'fn' : fn, 'lineno' : lineno } if len(par) > 0: rep['var'] = par[0] self.report.setdefault(num, []).append(rep) ctx = 'TABLE' name = num elif result.group('name') in ('INCLUDE', 'IMPORT') : is_imp = result.group('name') == 'IMPORT' par = get_par(line[result.end():], 1, 1, fn=fn, lineno=lineno) if not par: warnings.warn_explicit("%s requires parameter" % (result.group('name'),), UserWarning, fn, lineno) continue fname = par[0] #print "Try include: %s" % (fname) if (not os.path.exists(fname)): fname = os.path.join(os.path.split(fn)[0], par[0]) #print "Try include: %s" % (fname) i = 0 while not os.path.exists(fname) and (i < len(self.include_path)): fname = os.path.join(self.include_path[i], par[0]) #print "Try include: %s" % (fname) i += 1 if (not os.path.exists(fname)): if is_imp: continue # just ignore else: fname = par[0] # report error fnew = open(fname, "r") stack.append({'fn' : fn, 'f' : f, 'lineno' : lineno, 'is_import' : is_import}) fn, f, lineno, is_import = par[0], fnew, 0, is_imp elif result.group('name') == 'END': ctx = None else: warnings.warn_explicit("Unknown directive '%s'" % (result.group('name')), UserWarning, fn, lineno) continue if not ctx: if not empty.match(line): warnings.warn_explicit("Non-empty line in empty context", UserWarning, fn, lineno) elif ctx == 'OPT': if empty.match(line): continue par = get_par(line, 1, -1, fn=fn, lineno=lineno) if not par: continue self.set_opt(par[0], par[1:], fn, lineno) elif ctx in ('EXPORTS', 'USER_DEFINED', 'NO_EMIT'): if empty.match(line): continue if ctx == 'EXPORTS': par = get_par(line, 1, 6, fn=fn, lineno=lineno) else: par = get_par(line, 1, 2, fn=fn, lineno=lineno) if not par: continue flags = default_flags p = 2 if (len(par)>=2): if (par[1] == 'WITH_VALS'): flags |= EF_TYPE|EF_VALS elif (par[1] == 'WITHOUT_VALS'): flags |= EF_TYPE; flags &= ~EF_VALS elif (par[1] == 'ONLY_VALS'): flags &= ~EF_TYPE; flags |= EF_VALS elif (ctx == 'EXPORTS'): p = 1 else: warnings.warn_explicit("Unknown parameter value '%s'" % (par[1]), UserWarning, fn, lineno) for i in range(p, len(par)): if (par[i] == 'ONLY_ENUM'): flags &= ~(EF_TYPE|EF_VALS); flags |= EF_ENUM elif (par[i] == 'WITH_ENUM'): flags |= EF_ENUM elif (par[i] == 'VALS_WITH_TABLE'): flags |= EF_TABLE elif (par[i] == 'WS_DLL'): flags |= EF_WS_DLL elif (par[i] == 'EXTERN'): flags |= EF_EXTERN elif (par[i] == 'NO_PROT_PREFIX'): flags |= EF_NO_PROT else: warnings.warn_explicit("Unknown parameter value '%s'" % (par[i]), UserWarning, fn, lineno) self.add_item(ctx, par[0], flag=flags, fn=fn, lineno=lineno) elif ctx in ('MAKE_ENUM', 'MAKE_DEFINES'): if empty.match(line): continue par = get_par(line, 1, 4, fn=fn, lineno=lineno) if not par: continue flags = default_flags for i in range(1, len(par)): if (par[i] == 'NO_PROT_PREFIX'): flags |= EF_NO_PROT elif (par[i] == 'PROT_PREFIX'): flags &= ~ EF_NO_PROT elif (par[i] == 'NO_TYPE_PREFIX'): flags |= EF_NO_TYPE elif (par[i] == 'TYPE_PREFIX'): flags &= ~ EF_NO_TYPE elif (par[i] == 'UPPER_CASE'): flags |= EF_UCASE elif (par[i] == 'NO_UPPER_CASE'): flags &= ~EF_UCASE else: warnings.warn_explicit("Unknown parameter value '%s'" % (par[i]), UserWarning, fn, lineno) self.add_item('MAKE_ENUM', par[0], flag=flags, fn=fn, lineno=lineno) elif ctx == 'USE_VALS_EXT': if empty.match(line): continue par = get_par(line, 1, 1, fn=fn, lineno=lineno) if not par: continue flags = default_flags self.add_item('USE_VALS_EXT', par[0], flag=flags, fn=fn, lineno=lineno) elif ctx == 'PDU': if empty.match(line): continue par = get_par(line, 1, 5, fn=fn, lineno=lineno) if not par: continue self.add_pdu(par[0:2], fn, lineno) if (len(par)>=3): self.add_register(par[0], par[2:5], fn, lineno) elif ctx == 'SYNTAX': if empty.match(line): continue par = get_par(line, 1, 2, fn=fn, lineno=lineno) if not par: continue if not self.check_item('PDU', par[0]): self.add_pdu(par[0:1], fn, lineno) self.add_syntax(par, fn, lineno) elif ctx == 'REGISTER': if empty.match(line): continue par = get_par(line, 3, 4, fn=fn, lineno=lineno) if not par: continue if not self.check_item('PDU', par[0]): self.add_pdu(par[0:1], fn, lineno) self.add_register(par[0], par[1:4], fn, lineno) elif ctx in ('MODULE', 'MODULE_IMPORT'): if empty.match(line): continue par = get_par(line, 2, 2, fn=fn, lineno=lineno) if not par: continue self.add_item('MODULE', par[0], proto=par[1], fn=fn, lineno=lineno) elif ctx == 'IMPORT_TAG': if empty.match(line): continue par = get_par(line, 3, 3, fn=fn, lineno=lineno) if not par: continue self.add_item(ctx, par[0], ttag=(par[1], par[2]), fn=fn, lineno=lineno) elif ctx == 'OMIT_ASSIGNMENT': if empty.match(line): continue par = get_par(line, 1, 1, fn=fn, lineno=lineno) if not par: continue self.add_item(ctx, par[0], omit=True, fn=fn, lineno=lineno) elif ctx == 'NO_OMIT_ASSGN': if empty.match(line): continue par = get_par(line, 1, 1, fn=fn, lineno=lineno) if not par: continue self.add_item(ctx, par[0], omit=False, fn=fn, lineno=lineno) elif ctx == 'VIRTUAL_ASSGN': if empty.match(line): continue par = get_par(line, 2, -1, fn=fn, lineno=lineno) if not par: continue if (len(par[1].split('/')) > 1) and not self.check_item('SET_TYPE', par[1]): self.add_item('SET_TYPE', par[1], type=par[0], fn=fn, lineno=lineno) self.add_item('VIRTUAL_ASSGN', par[1], name=par[0], fn=fn, lineno=lineno) for nm in par[2:]: self.add_item('SET_TYPE', nm, type=par[0], fn=fn, lineno=lineno) if not par[0][0].isupper(): warnings.warn_explicit("Virtual assignment should have uppercase name (%s)" % (par[0]), UserWarning, fn, lineno) elif ctx == 'SET_TYPE': if empty.match(line): continue par = get_par(line, 2, 2, fn=fn, lineno=lineno) if not par: continue if not self.check_item('VIRTUAL_ASSGN', par[0]): self.add_item('SET_TYPE', par[0], type=par[1], fn=fn, lineno=lineno) if not par[1][0].isupper(): warnings.warn_explicit("Set type should have uppercase name (%s)" % (par[1]), UserWarning, fn, lineno) elif ctx == 'ASSIGN_VALUE_TO_TYPE': if empty.match(line): continue par = get_par(line, 2, 2, fn=fn, lineno=lineno) if not par: continue self.add_item(ctx, par[0], name=par[1], fn=fn, lineno=lineno) elif ctx == 'TYPE_RENAME': if empty.match(line): continue par = get_par(line, 2, 2, fn=fn, lineno=lineno) if not par: continue self.add_item('TYPE_RENAME', par[0], eth_name=par[1], fn=fn, lineno=lineno) if not par[1][0].isupper(): warnings.warn_explicit("Type should be renamed to uppercase name (%s)" % (par[1]), UserWarning, fn, lineno) elif ctx == 'FIELD_RENAME': if empty.match(line): continue par = get_par(line, 2, 2, fn=fn, lineno=lineno) if not par: continue self.add_item('FIELD_RENAME', par[0], eth_name=par[1], fn=fn, lineno=lineno) if not par[1][0].islower(): warnings.warn_explicit("Field should be renamed to lowercase name (%s)" % (par[1]), UserWarning, fn, lineno) elif ctx == 'TF_RENAME': if empty.match(line): continue par = get_par(line, 2, 2, fn=fn, lineno=lineno) if not par: continue tmpu = par[1][0].upper() + par[1][1:] tmpl = par[1][0].lower() + par[1][1:] self.add_item('TYPE_RENAME', par[0], eth_name=tmpu, fn=fn, lineno=lineno) if not tmpu[0].isupper(): warnings.warn_explicit("Type should be renamed to uppercase name (%s)" % (par[1]), UserWarning, fn, lineno) self.add_item('FIELD_RENAME', par[0], eth_name=tmpl, fn=fn, lineno=lineno) if not tmpl[0].islower(): warnings.warn_explicit("Field should be renamed to lowercase name (%s)" % (par[1]), UserWarning, fn, lineno) elif ctx in ('TYPE_ATTR', 'ETYPE_ATTR', 'FIELD_ATTR', 'EFIELD_ATTR'): if empty.match(line): continue par = get_par_nm(line, 1, 1, fn=fn, lineno=lineno) if not par: continue self.add_item(ctx, par[0], attr=par[1], fn=fn, lineno=lineno) elif ctx == 'FN_PARS': if empty.match(line): continue if name: par = get_par_nm(line, 0, 0, fn=fn, lineno=lineno) else: par = get_par_nm(line, 1, 1, fn=fn, lineno=lineno) if not par: continue if name: self.update_item(ctx, name, pars=par[0], fn=fn, lineno=lineno) else: self.add_item(ctx, par[0], pars=par[1], fn=fn, lineno=lineno) elif ctx in ('FN_HDR', 'FN_FTR', 'FN_BODY'): result = cdirective.search(line) if result: # directive line = '#' + line[result.end():] self.add_fn_line(name, ctx, line, fn=fn, lineno=lineno) elif ctx == 'CLASS': if empty.match(line): continue par = get_par(line, 1, 3, fn=fn, lineno=lineno) if not par: continue if not set_type_to_class(name, par[0], par[1:]): warnings.warn_explicit("Could not set type of class member %s.&%s to %s" % (name, par[0], par[1]), UserWarning, fn, lineno) elif ctx == 'TABLE': self.report[name][-1]['text'] += line def set_opt(self, opt, par, fn, lineno): #print("set_opt: %s, %s" % (opt, par)) if opt in ("-I",): par = self.check_par(par, 1, 1, fn, lineno) if not par: return self.include_path.append(relpath(par[0])) elif opt in ("-b", "BER", "CER", "DER"): par = self.check_par(par, 0, 0, fn, lineno) self.ectx.encoding = 'ber' elif opt in ("PER",): par = self.check_par(par, 0, 0, fn, lineno) self.ectx.encoding = 'per' elif opt in ("OER",): par = self.check_par(par, 0, 0, fn, lineno) self.ectx.encoding = 'oer' elif opt in ("-p", "PROTO"): par = self.check_par(par, 1, 1, fn, lineno) if not par: return self.ectx.proto_opt = par[0] self.ectx.merge_modules = True elif opt in ("ALIGNED",): par = self.check_par(par, 0, 0, fn, lineno) self.ectx.aligned = True elif opt in ("-u", "UNALIGNED"): par = self.check_par(par, 0, 0, fn, lineno) self.ectx.aligned = False elif opt in ("PROTO_ROOT_NAME"): par = self.check_par(par, 1, 1, fn, lineno) if not par: return self.proto_root_name = par[0] elif opt in ("-d",): par = self.check_par(par, 1, 1, fn, lineno) if not par: return self.ectx.dbgopt = par[0] elif opt in ("-e",): par = self.check_par(par, 0, 0, fn, lineno) self.ectx.expcnf = True elif opt in ("-S",): par = self.check_par(par, 0, 0, fn, lineno) self.ectx.merge_modules = True elif opt in ("GROUP_BY_PROT",): par = self.check_par(par, 0, 0, fn, lineno) self.ectx.group_by_prot = True elif opt in ("-o",): par = self.check_par(par, 1, 1, fn, lineno) if not par: return self.ectx.outnm_opt = par[0] elif opt in ("-O",): par = self.check_par(par, 1, 1, fn, lineno) if not par: return self.ectx.output.outdir = relpath(par[0]) elif opt in ("-s",): par = self.check_par(par, 1, 1, fn, lineno) if not par: return self.ectx.output.single_file = relpath(par[0]) elif opt in ("-k",): par = self.check_par(par, 0, 0, fn, lineno) self.ectx.output.keep = True elif opt in ("-L",): par = self.check_par(par, 0, 0, fn, lineno) self.suppress_line = True elif opt in ("EMBEDDED_PDV_CB",): par = self.check_par(par, 1, 1, fn, lineno) if not par: return self.ectx.default_embedded_pdv_cb = par[0] elif opt in ("EXTERNAL_TYPE_CB",): par = self.check_par(par, 1, 1, fn, lineno) if not par: return self.ectx.default_external_type_cb = par[0] elif opt in ("-r",): par = self.check_par(par, 1, 1, fn, lineno) if not par: return self.ectx.remove_prefix = par[0] else: warnings.warn_explicit("Unknown option %s" % (opt), UserWarning, fn, lineno) def dbg_print(self): print("\n# Conformance values") print("%-15s %-4s %-15s %-20s %s" % ("File", "Line", "Table", "Key", "Value")) print("-" * 100) tbls = sorted(self.table.keys()) for t in tbls: keys = sorted(self.table[t].keys()) for k in keys: print("%-15s %4s %-15s %-20s %s" % ( self.table[t][k]['fn'], self.table[t][k]['lineno'], t, k, str(self.table[t][k][self.tblcfg[t]['val_nm']]))) def unused_report(self): tbls = sorted(self.table.keys()) for t in tbls: if not self.tblcfg[t]['chk_use']: continue keys = sorted(self.table[t].keys()) for k in keys: if not self.table[t][k]['used']: warnings.warn_explicit("Unused %s for %s" % (t, k), UserWarning, self.table[t][k]['fn'], self.table[t][k]['lineno']) fnms = list(self.fn.keys()) fnms.sort() for f in fnms: keys = sorted(self.fn[f].keys()) for k in keys: if not self.fn[f][k]: continue if not self.fn[f][k]['used']: warnings.warn_explicit("Unused %s for %s" % (k, f), UserWarning, self.fn[f][k]['fn'], self.fn[f][k]['lineno']) #--- EthOut ------------------------------------------------------------------- class EthOut: def __init__(self): self.ectx = None self.outnm = None self.outdir = '.' self.single_file = None self.created_files = {} self.created_files_ord = [] self.keep = False def outcomment(self, ln, comment=None): if comment: return '%s %s\n' % (comment, ln) else: return '/* %-74s */\n' % (ln) def created_file_add(self, name, keep_anyway): name = os.path.normcase(os.path.abspath(name)) if name not in self.created_files: self.created_files_ord.append(name) self.created_files[name] = keep_anyway else: self.created_files[name] = self.created_files[name] or keep_anyway def created_file_exists(self, name): name = os.path.normcase(os.path.abspath(name)) return name in self.created_files #--- output_fname ------------------------------------------------------- def output_fname(self, ftype, ext='c'): fn = '' if not ext in ('cnf',): fn += 'packet-' fn += self.outnm if (ftype): fn += '-' + ftype fn += '.' + ext return fn #--- file_open ------------------------------------------------------- def file_open(self, ftype, ext='c'): fn = self.output_fname(ftype, ext=ext) if self.created_file_exists(fn): fx = open(fn, 'a') else: fx = open(fn, 'w') comment = None if ext in ('cnf',): comment = '#' fx.write(self.fhdr(fn, comment = comment)) else: if (not self.single_file and not self.created_file_exists(fn)): fx.write(self.fhdr(fn)) if not self.ectx.merge_modules: fx.write('\n') mstr = "--- " if self.ectx.groups(): mstr += "Module" if (len(self.ectx.modules) > 1): mstr += "s" for (m, p) in self.ectx.modules: mstr += " %s" % (m) else: mstr += "Module %s" % (self.ectx.Module()) mstr += " --- --- ---" fx.write(self.outcomment(mstr, comment)) fx.write('\n') return fx #--- file_close ------------------------------------------------------- def file_close(self, fx, discard=False, keep_anyway=False): fx.close() if discard and not self.created_file_exists(fx.name): os.unlink(fx.name) else: self.created_file_add(fx.name, keep_anyway) #--- fhdr ------------------------------------------------------- def fhdr(self, fn, comment=None): out = '' out += self.outcomment('Do not modify this file. Changes will be overwritten.', comment) out += self.outcomment('Generated automatically by the ASN.1 to Wireshark dissector compiler', comment) out += self.outcomment(os.path.basename(fn), comment) out += self.outcomment(' '.join(['asn2wrs.py'] + sys.argv[1:]), comment) out += '\n' # Make Windows path separator look like Unix path separator out = out.replace('\\', '/') # Change absolute paths and relative paths generated outside # source directory to paths relative to asn1/<proto> subdir. out = re.sub(r'(\s)[./A-Z]\S*/dissectors\b', r'\1../..', out) out = re.sub(r'(\s)[./A-Z]\S*/asn1/\S*?([\s/])', r'\1.\2', out) return out #--- dbg_print ------------------------------------------------------- def dbg_print(self): print("\n# Output files") print("\n".join(self.created_files_ord)) print("\n") #--- make_single_file ------------------------------------------------------- def make_single_file(self, suppress_line): if (not self.single_file): return in_nm = self.single_file + '.c' out_nm = os.path.join(self.outdir, self.output_fname('')) self.do_include(out_nm, in_nm, suppress_line) in_nm = self.single_file + '.h' if (os.path.exists(in_nm)): out_nm = os.path.join(self.outdir, self.output_fname('', ext='h')) self.do_include(out_nm, in_nm, suppress_line) if (not self.keep): for fn in self.created_files_ord: if not self.created_files[fn]: os.unlink(fn) #--- do_include ------------------------------------------------------- def do_include(self, out_nm, in_nm, suppress_line): def check_file(fn, fnlist): fnfull = os.path.normcase(os.path.abspath(fn)) if (fnfull in fnlist and os.path.exists(fnfull)): return os.path.normpath(fn) return None fin = open(in_nm, "r") fout = open(out_nm, "w") fout.write(self.fhdr(out_nm)) if (not suppress_line): fout.write('/* Input file: ' + os.path.basename(in_nm) +' */\n') fout.write('\n') fout.write('#line %u "%s"\n' % (1, rel_dissector_path(in_nm))) include = re.compile(r'^\s*#\s*include\s+[<"](?P<fname>[^>"]+)[>"]', re.IGNORECASE) cont_linenum = 0; while (True): cont_linenum = cont_linenum + 1; line = fin.readline() if (line == ''): break ifile = None result = include.search(line) #if (result): print os.path.normcase(os.path.abspath(result.group('fname'))) if (result): ifile = check_file(os.path.join(os.path.split(in_nm)[0], result.group('fname')), self.created_files) if (not ifile): ifile = check_file(os.path.join(self.outdir, result.group('fname')), self.created_files) if (not ifile): ifile = check_file(result.group('fname'), self.created_files) if (ifile): if (not suppress_line): fout.write('\n') fout.write('/*--- Included file: ' + ifile + ' ---*/\n') fout.write('#line %u "%s"\n' % (1, rel_dissector_path(ifile))) finc = open(ifile, "r") fout.write(finc.read()) if (not suppress_line): fout.write('\n') fout.write('/*--- End of included file: ' + ifile + ' ---*/\n') fout.write('#line %u "%s"\n' % (cont_linenum+1, rel_dissector_path(in_nm)) ) finc.close() else: fout.write(line) fout.close() fin.close() #--- Node --------------------------------------------------------------------- class Node: def __init__(self,*args, **kw): if len (args) == 0: self.type = self.__class__.__name__ else: assert (len(args) == 1) self.type = args[0] self.__dict__.update (kw) def str_child (self, key, child, depth): indent = " " * (2 * depth) keystr = indent + key + ": " if key == 'type': # already processed in str_depth return "" if isinstance (child, Node): # ugh return keystr + "\n" + child.str_depth (depth+1) if isinstance(child, type ([])): l = [] for x in child: if isinstance (x, Node): l.append (x.str_depth (depth+1)) else: l.append (indent + " " + str(x) + "\n") return keystr + "[\n" + ''.join(l) + indent + "]\n" else: return keystr + str (child) + "\n" def str_depth (self, depth): # ugh indent = " " * (2 * depth) l = ["%s%s" % (indent, self.type)] l.append ("".join ([self.str_child (k_v[0], k_v[1], depth + 1) for k_v in list(self.__dict__.items ())])) return "\n".join (l) def __repr__(self): return "\n" + self.str_depth (0) def to_python (self, ctx): return self.str_depth (ctx.indent_lev) def eth_reg(self, ident, ectx): pass def fld_obj_repr(self, ectx): return "/* TO DO %s */" % (str(self)) #--- ValueAssignment ------------------------------------------------------------- class ValueAssignment (Node): def __init__(self,*args, **kw) : Node.__init__ (self,*args, **kw) def eth_reg(self, ident, ectx): if ectx.conform.omit_assignment('V', self.ident, ectx.Module()): return # Assignment to omit ectx.eth_reg_vassign(self) ectx.eth_reg_value(self.ident, self.typ, self.val) #--- ObjectAssignment ------------------------------------------------------------- class ObjectAssignment (Node): def __init__(self,*args, **kw) : Node.__init__ (self,*args, **kw) def __eq__(self, other): if self.cls != other.cls: return False if len(self.val) != len(other.val): return False for f in (list(self.val.keys())): if f not in other.val: return False if isinstance(self.val[f], Node) and isinstance(other.val[f], Node): if not self.val[f].fld_obj_eq(other.val[f]): return False else: if str(self.val[f]) != str(other.val[f]): return False return True def eth_reg(self, ident, ectx): def make_virtual_type(cls, field, prefix): if isinstance(self.val, str): return if field in self.val and not isinstance(self.val[field], Type_Ref): vnm = prefix + '-' + self.ident virtual_tr = Type_Ref(val = vnm) t = self.val[field] self.val[field] = virtual_tr ectx.eth_reg_assign(vnm, t, virt=True) ectx.eth_reg_type(vnm, t) t.eth_reg_sub(vnm, ectx) if field in self.val and ectx.conform.check_item('PDU', cls + '.' + field): ectx.eth_reg_field(self.val[field].val, self.val[field].val, impl=self.val[field].HasImplicitTag(ectx), pdu=ectx.conform.use_item('PDU', cls + '.' + field)) return # end of make_virtual_type() if ectx.conform.omit_assignment('V', self.ident, ectx.Module()): return # Assignment to omit self.module = ectx.Module() ectx.eth_reg_oassign(self) if (self.cls == 'TYPE-IDENTIFIER') or (self.cls == 'ABSTRACT-SYNTAX'): make_virtual_type(self.cls, '&Type', 'TYPE') if (self.cls == 'OPERATION'): make_virtual_type(self.cls, '&ArgumentType', 'ARG') make_virtual_type(self.cls, '&ResultType', 'RES') if (self.cls == 'ERROR'): make_virtual_type(self.cls, '&ParameterType', 'PAR') #--- Type --------------------------------------------------------------------- class Type (Node): def __init__(self,*args, **kw) : self.name = None self.constr = None self.tags = [] self.named_list = None Node.__init__ (self,*args, **kw) def IsNamed(self): if self.name is None : return False else: return True def HasConstraint(self): if self.constr is None : return False else : return True def HasSizeConstraint(self): return self.HasConstraint() and self.constr.IsSize() def HasValueConstraint(self): return self.HasConstraint() and self.constr.IsValue() def HasPermAlph(self): return self.HasConstraint() and self.constr.IsPermAlph() def HasContentsConstraint(self): return self.HasConstraint() and self.constr.IsContents() def HasOwnTag(self): return len(self.tags) > 0 def HasImplicitTag(self, ectx): return (self.HasOwnTag() and self.tags[0].IsImplicit(ectx)) def IndetermTag(self, ectx): return False def AddTag(self, tag): self.tags[0:0] = [tag] def GetTag(self, ectx): #print "GetTag(%s)\n" % self.name; if (self.HasOwnTag()): return self.tags[0].GetTag(ectx) else: return self.GetTTag(ectx) def GetTTag(self, ectx): print("#Unhandled GetTTag() in %s" % (self.type)) print(self.str_depth(1)) return ('BER_CLASS_unknown', 'TAG_unknown') def SetName(self, name): self.name = name def AddConstraint(self, constr): if not self.HasConstraint(): self.constr = constr else: self.constr = Constraint(type = 'Intersection', subtype = [self.constr, constr]) def eth_tname(self): return '#' + self.type + '_' + str(id(self)) def eth_ftype(self, ectx): return ('FT_NONE', 'BASE_NONE') def eth_strings(self): return 'NULL' def eth_omit_field(self): return False def eth_need_tree(self): return False def eth_has_vals(self): return False def eth_has_enum(self, tname, ectx): return self.eth_has_vals() and (ectx.eth_type[tname]['enum'] & EF_ENUM) def eth_need_pdu(self, ectx): return None def eth_named_bits(self): return None def eth_reg_sub(self, ident, ectx): pass def get_components(self, ectx): print("#Unhandled get_components() in %s" % (self.type)) print(self.str_depth(1)) return [] def sel_req(self, sel, ectx): print("#Selection '%s' required for non-CHOICE type %s" % (sel, self.type)) print(self.str_depth(1)) def fld_obj_eq(self, other): return isinstance(other, Type) and (self.eth_tname() == other.eth_tname()) def eth_reg(self, ident, ectx, tstrip=0, tagflag=False, selflag=False, idx='', parent=None): #print "eth_reg(): %s, ident=%s, tstrip=%d, tagflag=%s, selflag=%s, parent=%s" %(self.type, ident, tstrip, str(tagflag), str(selflag), str(parent)) #print " ", self if (ectx.NeedTags() and (len(self.tags) > tstrip)): tagged_type = self for i in range(len(self.tags)-1, tstrip-1, -1): tagged_type = TaggedType(val=tagged_type, tstrip=i) tagged_type.AddTag(self.tags[i]) if not tagflag: # 1st tagged level if self.IsNamed() and not selflag: tagged_type.SetName(self.name) tagged_type.eth_reg(ident, ectx, tstrip=1, tagflag=tagflag, idx=idx, parent=parent) return nm = '' if ident and self.IsNamed() and not tagflag and not selflag: nm = ident + '/' + self.name elif ident: nm = ident elif self.IsNamed(): nm = self.name if not ident and ectx.conform.omit_assignment('T', nm, ectx.Module()): return # Assignment to omit if not ident: # Assignment ectx.eth_reg_assign(nm, self) if self.type == 'Type_Ref' and not self.tr_need_own_fn(ectx): ectx.eth_reg_type(nm, self) virtual_tr = Type_Ref(val=ectx.conform.use_item('SET_TYPE', nm)) if (self.type == 'Type_Ref') or ectx.conform.check_item('SET_TYPE', nm): if ident and (ectx.conform.check_item('TYPE_RENAME', nm) or ectx.conform.get_fn_presence(nm) or selflag): if ectx.conform.check_item('SET_TYPE', nm): ectx.eth_reg_type(nm, virtual_tr) # dummy Type Reference else: ectx.eth_reg_type(nm, self) # new type trnm = nm elif ectx.conform.check_item('SET_TYPE', nm): trnm = ectx.conform.use_item('SET_TYPE', nm) elif (self.type == 'Type_Ref') and self.tr_need_own_fn(ectx): ectx.eth_reg_type(nm, self) # need own function, e.g. for constraints trnm = nm else: trnm = self.val else: ectx.eth_reg_type(nm, self, mod = ectx.Module()) trnm = nm if ectx.conform.check_item('VIRTUAL_ASSGN', nm): vnm = ectx.conform.use_item('VIRTUAL_ASSGN', nm) ectx.eth_reg_assign(vnm, self, virt=True) ectx.eth_reg_type(vnm, self) self.eth_reg_sub(vnm, ectx) if parent and (ectx.type[parent]['val'].type == 'TaggedType'): ectx.type[parent]['val'].eth_set_val_name(parent, trnm, ectx) if ident and not tagflag and not self.eth_omit_field(): ectx.eth_reg_field(nm, trnm, idx=idx, parent=parent, impl=self.HasImplicitTag(ectx)) if ectx.conform.check_item('SET_TYPE', nm): virtual_tr.eth_reg_sub(nm, ectx) else: self.eth_reg_sub(nm, ectx) def eth_get_size_constr(self, ectx): (minv, maxv, ext) = ('MIN', 'MAX', False) if self.HasSizeConstraint(): if self.constr.IsSize(): (minv, maxv, ext) = self.constr.GetSize(ectx) if (self.constr.type == 'Intersection'): if self.constr.subtype[0].IsSize(): (minv, maxv, ext) = self.constr.subtype[0].GetSize(ectx) elif self.constr.subtype[1].IsSize(): (minv, maxv, ext) = self.constr.subtype[1].GetSize(ectx) if minv == 'MIN': minv = 'NO_BOUND' if maxv == 'MAX': maxv = 'NO_BOUND' if (ext): ext = 'TRUE' else: ext = 'FALSE' return (minv, maxv, ext) def eth_get_value_constr(self, ectx): (minv, maxv, ext) = ('MIN', 'MAX', False) if self.HasValueConstraint(): (minv, maxv, ext) = self.constr.GetValue(ectx) if minv == 'MIN': minv = 'NO_BOUND' if maxv == 'MAX': maxv = 'NO_BOUND' if str(minv).isdigit(): minv += 'U' elif (str(minv)[0] == "-") and str(minv)[1:].isdigit(): if (int(minv) == -(2**31)): minv = "G_MININT32" elif (int(minv) < -(2**31)): minv = "G_GINT64_CONSTANT(%s)" % (str(minv)) if str(maxv).isdigit(): if (int(maxv) >= 2**32): maxv = "G_GUINT64_CONSTANT(%s)" % (str(maxv)) else: maxv += 'U' if (ext): ext = 'TRUE' else: ext = 'FALSE' return (minv, maxv, ext) def eth_get_alphabet_constr(self, ectx): (alph, alphlen) = ('NULL', '0') if self.HasPermAlph(): alph = self.constr.GetPermAlph(ectx) if not alph: alph = 'NULL' if (alph != 'NULL'): if (((alph[0] + alph[-1]) == '""') and (not alph.count('"', 1, -1))): alphlen = str(len(alph) - 2) else: alphlen = 'strlen(%s)' % (alph) return (alph, alphlen) def eth_type_vals(self, tname, ectx): if self.eth_has_vals(): print("#Unhandled eth_type_vals('%s') in %s" % (tname, self.type)) print(self.str_depth(1)) return '' def eth_type_enum(self, tname, ectx): if self.eth_has_enum(tname, ectx): print("#Unhandled eth_type_enum('%s') in %s" % (tname, self.type)) print(self.str_depth(1)) return '' def eth_type_default_table(self, ectx, tname): return '' def eth_type_default_body(self, ectx, tname): print("#Unhandled eth_type_default_body('%s') in %s" % (tname, self.type)) print(self.str_depth(1)) return '' def eth_type_default_pars(self, ectx, tname): pars = { 'TNAME' : tname, 'ER' : ectx.encp(), 'FN_VARIANT' : '', 'TREE' : 'tree', 'TVB' : 'tvb', 'OFFSET' : 'offset', 'ACTX' : 'actx', 'HF_INDEX' : 'hf_index', 'VAL_PTR' : 'NULL', 'IMPLICIT_TAG' : 'implicit_tag', } if (ectx.eth_type[tname]['tree']): pars['ETT_INDEX'] = ectx.eth_type[tname]['tree'] if (ectx.merge_modules): pars['PROTOP'] = '' else: pars['PROTOP'] = ectx.eth_type[tname]['proto'] + '_' return pars def eth_type_fn(self, proto, tname, ectx): body = self.eth_type_default_body(ectx, tname) pars = self.eth_type_default_pars(ectx, tname) if ectx.conform.check_item('FN_PARS', tname): pars.update(ectx.conform.use_item('FN_PARS', tname)) elif ectx.conform.check_item('FN_PARS', ectx.eth_type[tname]['ref'][0]): pars.update(ectx.conform.use_item('FN_PARS', ectx.eth_type[tname]['ref'][0])) pars['DEFAULT_BODY'] = body for i in range(4): for k in list(pars.keys()): try: pars[k] = pars[k] % pars except (ValueError,TypeError): raise sys.exc_info()[0]("%s\n%s" % (str(pars), sys.exc_info()[1])) out = '\n' out += self.eth_type_default_table(ectx, tname) % pars out += ectx.eth_type_fn_hdr(tname) out += ectx.eth_type_fn_body(tname, body, pars=pars) out += ectx.eth_type_fn_ftr(tname) return out #--- Value -------------------------------------------------------------------- class Value (Node): def __init__(self,*args, **kw) : self.name = None Node.__init__ (self,*args, **kw) def SetName(self, name) : self.name = name def to_str(self, ectx): return str(self.val) def get_dep(self): return None def fld_obj_repr(self, ectx): return self.to_str(ectx) #--- Value_Ref ----------------------------------------------------------------- class Value_Ref (Value): def to_str(self, ectx): return asn2c(self.val) #--- ObjectClass --------------------------------------------------------------------- class ObjectClass (Node): def __init__(self,*args, **kw) : self.name = None Node.__init__ (self,*args, **kw) def SetName(self, name): self.name = name add_class_ident(self.name) def eth_reg(self, ident, ectx): if ectx.conform.omit_assignment('C', self.name, ectx.Module()): return # Assignment to omit ectx.eth_reg_objectclass(self.name, self) #--- Class_Ref ----------------------------------------------------------------- class Class_Ref (ObjectClass): pass #--- ObjectClassDefn --------------------------------------------------------------------- class ObjectClassDefn (ObjectClass): def reg_types(self): for fld in self.fields: repr = fld.fld_repr() set_type_to_class(self.name, repr[0], repr[1:]) #--- Tag --------------------------------------------------------------- class Tag (Node): def to_python (self, ctx): return 'asn1.TYPE(%s,%s)' % (mk_tag_str (ctx, self.tag.cls, self.tag_typ, self.tag.num), self.typ.to_python (ctx)) def IsImplicit(self, ectx): return ((self.mode == 'IMPLICIT') or ((self.mode == 'default') and (ectx.tag_def != 'EXPLICIT'))) def GetTag(self, ectx): tc = '' if (self.cls == 'UNIVERSAL'): tc = 'BER_CLASS_UNI' elif (self.cls == 'APPLICATION'): tc = 'BER_CLASS_APP' elif (self.cls == 'CONTEXT'): tc = 'BER_CLASS_CON' elif (self.cls == 'PRIVATE'): tc = 'BER_CLASS_PRI' return (tc, self.num) def eth_tname(self): n = '' if (self.cls == 'UNIVERSAL'): n = 'U' elif (self.cls == 'APPLICATION'): n = 'A' elif (self.cls == 'CONTEXT'): n = 'C' elif (self.cls == 'PRIVATE'): n = 'P' return n + str(self.num) #--- Constraint --------------------------------------------------------------- constr_cnt = 0 class Constraint (Node): def to_python (self, ctx): print("Ignoring constraint:", self.type) return self.subtype.typ.to_python (ctx) def __str__ (self): return "Constraint: type=%s, subtype=%s" % (self.type, self.subtype) def eth_tname(self): return '#' + self.type + '_' + str(id(self)) def IsSize(self): return (self.type == 'Size' and self.subtype.IsValue()) \ or (self.type == 'Intersection' and (self.subtype[0].IsSize() or self.subtype[1].IsSize())) \ def GetSize(self, ectx): (minv, maxv, ext) = ('MIN', 'MAX', False) if self.IsSize(): if self.type == 'Size': (minv, maxv, ext) = self.subtype.GetValue(ectx) ext = ext or (hasattr(self, 'ext') and self.ext) elif self.type == 'Intersection': if self.subtype[0].IsSize() and not self.subtype[1].IsSize(): (minv, maxv, ext) = self.subtype[0].GetSize(ectx) elif not self.subtype[0].IsSize() and self.subtype[1].IsSize(): (minv, maxv, ext) = self.subtype[1].GetSize(ectx) return (minv, maxv, ext) def IsValue(self): return self.type == 'SingleValue' \ or self.type == 'ValueRange' \ or (self.type == 'Intersection' and (self.subtype[0].IsValue() or self.subtype[1].IsValue())) \ or (self.type == 'Union' and (self.subtype[0].IsValue() and self.subtype[1].IsValue())) def GetValue(self, ectx): (minv, maxv, ext) = ('MIN', 'MAX', False) if self.IsValue(): if self.type == 'SingleValue': minv = ectx.value_get_eth(self.subtype) maxv = ectx.value_get_eth(self.subtype) ext = hasattr(self, 'ext') and self.ext elif self.type == 'ValueRange': minv = ectx.value_get_eth(self.subtype[0]) maxv = ectx.value_get_eth(self.subtype[1]) ext = hasattr(self, 'ext') and self.ext elif self.type == 'Intersection': if self.subtype[0].IsValue() and not self.subtype[1].IsValue(): (minv, maxv, ext) = self.subtype[0].GetValue(ectx) elif not self.subtype[0].IsValue() and self.subtype[1].IsValue(): (minv, maxv, ext) = self.subtype[1].GetValue(ectx) elif self.subtype[0].IsValue() and self.subtype[1].IsValue(): v0 = self.subtype[0].GetValue(ectx) v1 = self.subtype[1].GetValue(ectx) (minv, maxv, ext) = (ectx.value_max(v0[0],v1[0]), ectx.value_min(v0[1],v1[1]), v0[2] and v1[2]) elif self.type == 'Union': if self.subtype[0].IsValue() and self.subtype[1].IsValue(): v0 = self.subtype[0].GetValue(ectx) v1 = self.subtype[1].GetValue(ectx) (minv, maxv, ext) = (ectx.value_min(v0[0],v1[0]), ectx.value_max(v0[1],v1[1]), hasattr(self, 'ext') and self.ext) return (minv, maxv, ext) def IsAlphabet(self): return self.type == 'SingleValue' \ or self.type == 'ValueRange' \ or (self.type == 'Intersection' and (self.subtype[0].IsAlphabet() or self.subtype[1].IsAlphabet())) \ or (self.type == 'Union' and (self.subtype[0].IsAlphabet() and self.subtype[1].IsAlphabet())) def GetAlphabet(self, ectx): alph = None if self.IsAlphabet(): if self.type == 'SingleValue': alph = ectx.value_get_eth(self.subtype) elif self.type == 'ValueRange': if ((len(self.subtype[0]) == 3) and ((self.subtype[0][0] + self.subtype[0][-1]) == '""') \ and (len(self.subtype[1]) == 3) and ((self.subtype[1][0] + self.subtype[1][-1]) == '""')): alph = '"' for c in range(ord(self.subtype[0][1]), ord(self.subtype[1][1]) + 1): alph += chr(c) alph += '"' elif self.type == 'Union': if self.subtype[0].IsAlphabet() and self.subtype[1].IsAlphabet(): a0 = self.subtype[0].GetAlphabet(ectx) a1 = self.subtype[1].GetAlphabet(ectx) if (((a0[0] + a0[-1]) == '""') and not a0.count('"', 1, -1) \ and ((a1[0] + a1[-1]) == '""') and not a1.count('"', 1, -1)): alph = '"' + a0[1:-1] + a1[1:-1] + '"' else: alph = a0 + ' ' + a1 return alph def IsPermAlph(self): return self.type == 'From' and self.subtype.IsAlphabet() \ or (self.type == 'Intersection' and (self.subtype[0].IsPermAlph() or self.subtype[1].IsPermAlph())) \ def GetPermAlph(self, ectx): alph = None if self.IsPermAlph(): if self.type == 'From': alph = self.subtype.GetAlphabet(ectx) elif self.type == 'Intersection': if self.subtype[0].IsPermAlph() and not self.subtype[1].IsPermAlph(): alph = self.subtype[0].GetPermAlph(ectx) elif not self.subtype[0].IsPermAlph() and self.subtype[1].IsPermAlph(): alph = self.subtype[1].GetPermAlph(ectx) return alph def IsContents(self): return self.type == 'Contents' \ or (self.type == 'Intersection' and (self.subtype[0].IsContents() or self.subtype[1].IsContents())) \ def GetContents(self, ectx): contents = None if self.IsContents(): if self.type == 'Contents': if self.subtype.type == 'Type_Ref': contents = self.subtype.val elif self.type == 'Intersection': if self.subtype[0].IsContents() and not self.subtype[1].IsContents(): contents = self.subtype[0].GetContents(ectx) elif not self.subtype[0].IsContents() and self.subtype[1].IsContents(): contents = self.subtype[1].GetContents(ectx) return contents def IsNegativ(self): def is_neg(sval): return isinstance(sval, str) and (sval[0] == '-') if self.type == 'SingleValue': return is_neg(self.subtype) elif self.type == 'ValueRange': if self.subtype[0] == 'MIN': return True return is_neg(self.subtype[0]) return False def eth_constrname(self): def int2str(val): if isinstance(val, Value_Ref): return asn2c(val.val) try: if (int(val) < 0): return 'M' + str(-int(val)) else: return str(int(val)) except (ValueError, TypeError): return asn2c(str(val)) ext = '' if hasattr(self, 'ext') and self.ext: ext = '_' if self.type == 'SingleValue': return int2str(self.subtype) + ext elif self.type == 'ValueRange': return int2str(self.subtype[0]) + '_' + int2str(self.subtype[1]) + ext elif self.type == 'Size': return 'SIZE_' + self.subtype.eth_constrname() + ext else: if (not hasattr(self, 'constr_num')): global constr_cnt constr_cnt += 1 self.constr_num = constr_cnt return 'CONSTR%03d%s' % (self.constr_num, ext) def Needs64b(self, ectx): (minv, maxv, ext) = self.GetValue(ectx) if ((str(minv).isdigit() or ((str(minv)[0] == "-") and str(minv)[1:].isdigit())) \ and (str(maxv).isdigit() or ((str(maxv)[0] == "-") and str(maxv)[1:].isdigit())) \ and ((abs(int(maxv) - int(minv)) >= 2**32) or (int(minv) < -2**31) or (int(maxv) >= 2**32))) \ or (maxv == 'MAX') or (minv == 'MIN'): return True return False class Module (Node): def to_python (self, ctx): ctx.tag_def = self.tag_def.dfl_tag return """#%s %s""" % (self.ident, self.body.to_python (ctx)) def get_name(self): return self.ident.val def get_proto(self, ectx): if (ectx.proto): prot = ectx.proto else: prot = ectx.conform.use_item('MODULE', self.get_name(), val_dflt=self.get_name()) return prot def to_eth(self, ectx): ectx.tags_def = 'EXPLICIT' # default = explicit ectx.proto = self.get_proto(ectx) ectx.tag_def = self.tag_def.dfl_tag ectx.eth_reg_module(self) self.body.to_eth(ectx) class Module_Body (Node): def to_python (self, ctx): # XXX handle exports, imports. l = [x.to_python (ctx) for x in self.assign_list] l = [a for a in l if a != ''] return "\n".join (l) def to_eth(self, ectx): # Exports ectx.eth_exports(self.exports) # Imports for i in self.imports: mod = i.module.val proto = ectx.conform.use_item('MODULE', mod, val_dflt=mod) ectx.eth_module_dep_add(ectx.Module(), mod) for s in i.symbol_list: if isinstance(s, Type_Ref): ectx.eth_import_type(s.val, mod, proto) elif isinstance(s, Value_Ref): ectx.eth_import_value(s.val, mod, proto) elif isinstance(s, Class_Ref): ectx.eth_import_class(s.val, mod, proto) else: msg = 'Unknown kind of imported symbol %s from %s' % (str(s), mod) warnings.warn_explicit(msg, UserWarning, '', 0) # AssignmentList for a in self.assign_list: a.eth_reg('', ectx) class Default_Tags (Node): def to_python (self, ctx): # not to be used directly assert (0) # XXX should just calculate dependencies as we go along. def calc_dependencies (node, dict, trace = 0): if not hasattr (node, '__dict__'): if trace: print("#returning, node=", node) return if isinstance (node, Type_Ref): dict [node.val] = 1 if trace: print("#Setting", node.val) return for (a, val) in list(node.__dict__.items ()): if trace: print("# Testing node ", node, "attr", a, " val", val) if a[0] == '_': continue elif isinstance (val, Node): calc_dependencies (val, dict, trace) elif isinstance (val, type ([])): for v in val: calc_dependencies (v, dict, trace) class Type_Assign (Node): def __init__ (self, *args, **kw): Node.__init__ (self, *args, **kw) if isinstance (self.val, Tag): # XXX replace with generalized get_typ_ignoring_tag (no-op for Node, override in Tag) to_test = self.val.typ else: to_test = self.val if isinstance (to_test, SequenceType): to_test.sequence_name = self.name.name def to_python (self, ctx): dep_dict = {} calc_dependencies (self.val, dep_dict, 0) depend_list = list(dep_dict.keys ()) return ctx.register_assignment (self.name.name, self.val.to_python (ctx), depend_list) class PyQuote (Node): def to_python (self, ctx): return ctx.register_pyquote (self.val) #--- Type_Ref ----------------------------------------------------------------- class Type_Ref (Type): def to_python (self, ctx): return self.val def eth_reg_sub(self, ident, ectx): ectx.eth_dep_add(ident, self.val) def eth_tname(self): if self.HasSizeConstraint(): return asn2c(self.val) + '_' + self.constr.eth_constrname() else: return asn2c(self.val) def tr_need_own_fn(self, ectx): return (ectx.Per() or ectx.Oer()) and self.HasSizeConstraint() def fld_obj_repr(self, ectx): return self.val def get_components(self, ectx): if self.val not in ectx.type or ectx.type[self.val]['import']: msg = "Can not get COMPONENTS OF %s which is imported type" % (self.val) warnings.warn_explicit(msg, UserWarning, '', 0) return [] else: return ectx.type[self.val]['val'].get_components(ectx) def GetTTag(self, ectx): #print "GetTTag(%s)\n" % self.val; if (ectx.type[self.val]['import']): if 'ttag' not in ectx.type[self.val]: ttag = ectx.get_ttag_from_all(self.val, ectx.type[self.val]['import']) if not ttag and not ectx.conform.check_item('IMPORT_TAG', self.val): msg = 'Missing tag information for imported type %s from %s (%s)' % (self.val, ectx.type[self.val]['import'], ectx.type[self.val]['proto']) warnings.warn_explicit(msg, UserWarning, '', 0) ttag = ('-1/*imported*/', '-1/*imported*/') ectx.type[self.val]['ttag'] = ectx.conform.use_item('IMPORT_TAG', self.val, val_dflt=ttag) return ectx.type[self.val]['ttag'] else: return ectx.type[self.val]['val'].GetTag(ectx) def IndetermTag(self, ectx): if (ectx.type[self.val]['import']): return False else: return ectx.type[self.val]['val'].IndetermTag(ectx) def eth_type_default_pars(self, ectx, tname): if tname: pars = Type.eth_type_default_pars(self, ectx, tname) else: pars = {} t = ectx.type[self.val]['ethname'] pars['TYPE_REF_PROTO'] = ectx.eth_type[t]['proto'] pars['TYPE_REF_TNAME'] = t pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_PROTO)s_%(TYPE_REF_TNAME)s' if self.HasSizeConstraint(): (pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_size_constr(ectx) return pars def eth_type_default_body(self, ectx, tname): if (ectx.Ber()): body = ectx.eth_fn_call('%(TYPE_REF_FN)s', ret='offset', par=(('%(IMPLICIT_TAG)s', '%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),)) elif (ectx.Per() or ectx.Oer()): if self.HasSizeConstraint(): body = ectx.eth_fn_call('dissect_%(ER)s_size_constrained_type', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',), ('"%(TYPE_REF_TNAME)s"', '%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s',),)) else: body = ectx.eth_fn_call('%(TYPE_REF_FN)s', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),)) else: body = '#error Can not decode %s' % (tname) return body #--- SelectionType ------------------------------------------------------------ class SelectionType (Type): def to_python (self, ctx): return self.val def sel_of_typeref(self): return self.typ.type == 'Type_Ref' def eth_reg_sub(self, ident, ectx): if not self.sel_of_typeref(): self.seltype = '' return self.seltype = ectx.eth_sel_req(self.typ.val, self.sel) ectx.eth_dep_add(ident, self.seltype) def eth_ftype(self, ectx): (ftype, display) = ('FT_NONE', 'BASE_NONE') if self.sel_of_typeref() and not ectx.type[self.seltype]['import']: (ftype, display) = ectx.type[self.typ.val]['val'].eth_ftype_sel(self.sel, ectx) return (ftype, display) def GetTTag(self, ectx): #print "GetTTag(%s)\n" % self.seltype; if (ectx.type[self.seltype]['import']): if 'ttag' not in ectx.type[self.seltype]: if not ectx.conform.check_item('IMPORT_TAG', self.seltype): msg = 'Missing tag information for imported type %s from %s (%s)' % (self.seltype, ectx.type[self.seltype]['import'], ectx.type[self.seltype]['proto']) warnings.warn_explicit(msg, UserWarning, '', 0) ectx.type[self.seltype]['ttag'] = ectx.conform.use_item('IMPORT_TAG', self.seltype, val_dflt=('-1 /*imported*/', '-1 /*imported*/')) return ectx.type[self.seltype]['ttag'] else: return ectx.type[self.typ.val]['val'].GetTTagSel(self.sel, ectx) def eth_type_default_pars(self, ectx, tname): pars = Type.eth_type_default_pars(self, ectx, tname) if self.sel_of_typeref(): t = ectx.type[self.seltype]['ethname'] pars['TYPE_REF_PROTO'] = ectx.eth_type[t]['proto'] pars['TYPE_REF_TNAME'] = t pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_PROTO)s_%(TYPE_REF_TNAME)s' return pars def eth_type_default_body(self, ectx, tname): if not self.sel_of_typeref(): body = '#error Can not decode %s' % (tname) elif (ectx.Ber()): body = ectx.eth_fn_call('%(TYPE_REF_FN)s', ret='offset', par=(('%(IMPLICIT_TAG)s', '%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),)) elif (ectx.Per() or ectx.Oer()): body = ectx.eth_fn_call('%(TYPE_REF_FN)s', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),)) else: body = '#error Can not decode %s' % (tname) return body #--- TaggedType ----------------------------------------------------------------- class TaggedType (Type): def eth_tname(self): tn = '' for i in range(self.tstrip, len(self.val.tags)): tn += self.val.tags[i].eth_tname() tn += '_' tn += self.val.eth_tname() return tn def eth_set_val_name(self, ident, val_name, ectx): #print "TaggedType::eth_set_val_name(): ident=%s, val_name=%s" % (ident, val_name) self.val_name = val_name ectx.eth_dep_add(ident, self.val_name) def eth_reg_sub(self, ident, ectx): self.val_name = ident + '/' + UNTAG_TYPE_NAME self.val.eth_reg(self.val_name, ectx, tstrip=self.tstrip+1, tagflag=True, parent=ident) def GetTTag(self, ectx): #print "GetTTag(%s)\n" % self.seltype; return self.GetTag(ectx) def eth_ftype(self, ectx): return self.val.eth_ftype(ectx) def eth_type_default_pars(self, ectx, tname): pars = Type.eth_type_default_pars(self, ectx, tname) t = ectx.type[self.val_name]['ethname'] pars['TYPE_REF_PROTO'] = ectx.eth_type[t]['proto'] pars['TYPE_REF_TNAME'] = t pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_PROTO)s_%(TYPE_REF_TNAME)s' (pars['TAG_CLS'], pars['TAG_TAG']) = self.GetTag(ectx) if self.HasImplicitTag(ectx): pars['TAG_IMPL'] = 'TRUE' else: pars['TAG_IMPL'] = 'FALSE' return pars def eth_type_default_body(self, ectx, tname): if (ectx.Ber()): body = ectx.eth_fn_call('dissect_%(ER)s_tagged_type', ret='offset', par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'), ('%(HF_INDEX)s', '%(TAG_CLS)s', '%(TAG_TAG)s', '%(TAG_IMPL)s', '%(TYPE_REF_FN)s',),)) else: body = '#error Can not decode %s' % (tname) return body #--- SqType ----------------------------------------------------------- class SqType (Type): def out_item(self, f, val, optional, ext, ectx): if (val.eth_omit_field()): t = ectx.type[val.ident]['ethname'] fullname = ectx.dummy_eag_field else: ef = ectx.field[f]['ethname'] t = ectx.eth_hf[ef]['ethtype'] fullname = ectx.eth_hf[ef]['fullname'] if (ectx.Ber()): #print "optional=%s, e.val.HasOwnTag()=%s, e.val.IndetermTag()=%s" % (str(e.optional), str(e.val.HasOwnTag()), str(e.val.IndetermTag(ectx))) #print val.str_depth(1) opt = '' if (optional): opt = 'BER_FLAGS_OPTIONAL' if (not val.HasOwnTag()): if (opt): opt += '|' opt += 'BER_FLAGS_NOOWNTAG' elif (val.HasImplicitTag(ectx)): if (opt): opt += '|' opt += 'BER_FLAGS_IMPLTAG' if (val.IndetermTag(ectx)): if (opt): opt += '|' opt += 'BER_FLAGS_NOTCHKTAG' if (not opt): opt = '0' else: if optional: opt = 'ASN1_OPTIONAL' else: opt = 'ASN1_NOT_OPTIONAL' if (ectx.Ber()): (tc, tn) = val.GetTag(ectx) out = ' { %-24s, %-13s, %s, %s, dissect_%s_%s },\n' \ % ('&'+fullname, tc, tn, opt, ectx.eth_type[t]['proto'], t) elif (ectx.Per() or ectx.Oer()): out = ' { %-24s, %-23s, %-17s, dissect_%s_%s },\n' \ % ('&'+fullname, ext, opt, ectx.eth_type[t]['proto'], t) else: out = '' return out #--- SeqType ----------------------------------------------------------- class SeqType (SqType): def all_components(self): lst = self.elt_list[:] if hasattr(self, 'ext_list'): lst.extend(self.ext_list) if hasattr(self, 'elt_list2'): lst.extend(self.elt_list2) return lst def need_components(self): lst = self.all_components() for e in (lst): if e.type == 'components_of': return True return False def expand_components(self, ectx): while self.need_components(): for i in range(len(self.elt_list)): if self.elt_list[i].type == 'components_of': comp = self.elt_list[i].typ.get_components(ectx) self.elt_list[i:i+1] = comp break if hasattr(self, 'ext_list'): for i in range(len(self.ext_list)): if self.ext_list[i].type == 'components_of': comp = self.ext_list[i].typ.get_components(ectx) self.ext_list[i:i+1] = comp break if hasattr(self, 'elt_list2'): for i in range(len(self.elt_list2)): if self.elt_list2[i].type == 'components_of': comp = self.elt_list2[i].typ.get_components(ectx) self.elt_list2[i:i+1] = comp break def get_components(self, ectx): lst = self.elt_list[:] if hasattr(self, 'elt_list2'): lst.extend(self.elt_list2) return lst def eth_reg_sub(self, ident, ectx, components_available=False): # check if autotag is required autotag = False if (ectx.NeedTags() and (ectx.tag_def == 'AUTOMATIC')): autotag = True lst = self.all_components() for e in (self.elt_list): if e.val.HasOwnTag(): autotag = False; break; # expand COMPONENTS OF if self.need_components(): if components_available: self.expand_components(ectx) else: ectx.eth_comp_req(ident) return # extension addition groups if hasattr(self, 'ext_list'): if (ectx.Per() or ectx.Oer()): # add names eag_num = 1 for e in (self.ext_list): if isinstance(e.val, ExtensionAdditionGroup): e.val.parent_ident = ident e.val.parent_tname = ectx.type[ident]['tname'] if (e.val.ver): e.val.SetName("eag_v%s" % (e.val.ver)) else: e.val.SetName("eag_%d" % (eag_num)) eag_num += 1; else: # expand new_ext_list = [] for e in (self.ext_list): if isinstance(e.val, ExtensionAdditionGroup): new_ext_list.extend(e.val.elt_list) else: new_ext_list.append(e) self.ext_list = new_ext_list # do autotag if autotag: atag = 0 for e in (self.elt_list): e.val.AddTag(Tag(cls = 'CONTEXT', num = str(atag), mode = 'IMPLICIT')) atag += 1 if autotag and hasattr(self, 'elt_list2'): for e in (self.elt_list2): e.val.AddTag(Tag(cls = 'CONTEXT', num = str(atag), mode = 'IMPLICIT')) atag += 1 if autotag and hasattr(self, 'ext_list'): for e in (self.ext_list): e.val.AddTag(Tag(cls = 'CONTEXT', num = str(atag), mode = 'IMPLICIT')) atag += 1 # register components for e in (self.elt_list): e.val.eth_reg(ident, ectx, tstrip=1, parent=ident) if hasattr(self, 'ext_list'): for e in (self.ext_list): e.val.eth_reg(ident, ectx, tstrip=1, parent=ident) if hasattr(self, 'elt_list2'): for e in (self.elt_list2): e.val.eth_reg(ident, ectx, tstrip=1, parent=ident) def eth_type_default_table(self, ectx, tname): #print ("eth_type_default_table(tname='%s')" % (tname)) fname = ectx.eth_type[tname]['ref'][0] table = "static const %(ER)s_sequence_t %(TABLE)s[] = {\n" if hasattr(self, 'ext_list'): ext = 'ASN1_EXTENSION_ROOT' else: ext = 'ASN1_NO_EXTENSIONS' empty_ext_flag = '0' if (len(self.elt_list)==0) and hasattr(self, 'ext_list') and (len(self.ext_list)==0) and (not hasattr(self, 'elt_list2') or (len(self.elt_list2)==0)): empty_ext_flag = ext for e in (self.elt_list): f = fname + '/' + e.val.name table += self.out_item(f, e.val, e.optional, ext, ectx) if hasattr(self, 'ext_list'): for e in (self.ext_list): f = fname + '/' + e.val.name table += self.out_item(f, e.val, e.optional, 'ASN1_NOT_EXTENSION_ROOT', ectx) if hasattr(self, 'elt_list2'): for e in (self.elt_list2): f = fname + '/' + e.val.name table += self.out_item(f, e.val, e.optional, ext, ectx) if (ectx.Ber()): table += " { NULL, 0, 0, 0, NULL }\n};\n" else: table += " { NULL, %s, 0, NULL }\n};\n" % (empty_ext_flag) return table #--- SeqOfType ----------------------------------------------------------- class SeqOfType (SqType): def eth_type_default_table(self, ectx, tname): #print "eth_type_default_table(tname='%s')" % (tname) fname = ectx.eth_type[tname]['ref'][0] if self.val.IsNamed (): f = fname + '/' + self.val.name else: f = fname + '/' + ITEM_FIELD_NAME table = "static const %(ER)s_sequence_t %(TABLE)s[1] = {\n" table += self.out_item(f, self.val, False, 'ASN1_NO_EXTENSIONS', ectx) table += "};\n" return table #--- SequenceOfType ----------------------------------------------------------- class SequenceOfType (SeqOfType): def to_python (self, ctx): # name, tag (None for no tag, EXPLICIT() for explicit), typ) # or '' + (1,) for optional sizestr = '' if self.size_constr is not None: print("#Ignoring size constraint:", self.size_constr.subtype) return "%sasn1.SEQUENCE_OF (%s%s)" % (ctx.spaces (), self.val.to_python (ctx), sizestr) def eth_reg_sub(self, ident, ectx): itmnm = ident if not self.val.IsNamed (): itmnm += '/' + ITEM_FIELD_NAME self.val.eth_reg(itmnm, ectx, tstrip=1, idx='[##]', parent=ident) def eth_tname(self): if self.val.type != 'Type_Ref': return '#' + self.type + '_' + str(id(self)) if not self.HasConstraint(): return "SEQUENCE_OF_" + self.val.eth_tname() elif self.constr.IsSize(): return 'SEQUENCE_' + self.constr.eth_constrname() + '_OF_' + self.val.eth_tname() else: return '#' + self.type + '_' + str(id(self)) def eth_ftype(self, ectx): return ('FT_UINT32', 'BASE_DEC') def eth_need_tree(self): return True def GetTTag(self, ectx): return ('BER_CLASS_UNI', 'BER_UNI_TAG_SEQUENCE') def eth_type_default_pars(self, ectx, tname): pars = Type.eth_type_default_pars(self, ectx, tname) (pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_size_constr(ectx) pars['TABLE'] = '%(PROTOP)s%(TNAME)s_sequence_of' return pars def eth_type_default_body(self, ectx, tname): if (ectx.Ber()): if (ectx.constraints_check and self.HasSizeConstraint()): body = ectx.eth_fn_call('dissect_%(ER)s_constrained_sequence_of', ret='offset', par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'), ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),)) else: body = ectx.eth_fn_call('dissect_%(ER)s_sequence_of', ret='offset', par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'), ('%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),)) elif ((ectx.Per() or ectx.Oer()) and not self.HasConstraint()): body = ectx.eth_fn_call('dissect_%(ER)s_sequence_of', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), ('%(ETT_INDEX)s', '%(TABLE)s',),)) elif ((ectx.Per() or ectx.Oer()) and self.constr.type == 'Size'): body = ectx.eth_fn_call('dissect_%(ER)s_constrained_sequence_of', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), ('%(ETT_INDEX)s', '%(TABLE)s',), ('%(MIN_VAL)s', '%(MAX_VAL)s','%(EXT)s'),)) else: body = '#error Can not decode %s' % (tname) return body #--- SetOfType ---------------------------------------------------------------- class SetOfType (SeqOfType): def eth_reg_sub(self, ident, ectx): itmnm = ident if not self.val.IsNamed (): itmnm += '/' + ITEM_FIELD_NAME self.val.eth_reg(itmnm, ectx, tstrip=1, idx='(##)', parent=ident) def eth_tname(self): if self.val.type != 'Type_Ref': return '#' + self.type + '_' + str(id(self)) if not self.HasConstraint(): return "SET_OF_" + self.val.eth_tname() elif self.constr.IsSize(): return 'SET_' + self.constr.eth_constrname() + '_OF_' + self.val.eth_tname() else: return '#' + self.type + '_' + str(id(self)) def eth_ftype(self, ectx): return ('FT_UINT32', 'BASE_DEC') def eth_need_tree(self): return True def GetTTag(self, ectx): return ('BER_CLASS_UNI', 'BER_UNI_TAG_SET') def eth_type_default_pars(self, ectx, tname): pars = Type.eth_type_default_pars(self, ectx, tname) (pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_size_constr(ectx) pars['TABLE'] = '%(PROTOP)s%(TNAME)s_set_of' return pars def eth_type_default_body(self, ectx, tname): if (ectx.Ber()): if (ectx.constraints_check and self.HasSizeConstraint()): body = ectx.eth_fn_call('dissect_%(ER)s_constrained_set_of', ret='offset', par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'), ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),)) else: body = ectx.eth_fn_call('dissect_%(ER)s_set_of', ret='offset', par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'), ('%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),)) elif (ectx.Per() and not self.HasConstraint()): body = ectx.eth_fn_call('dissect_%(ER)s_set_of', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), ('%(ETT_INDEX)s', '%(TABLE)s',),)) elif (ectx.Per() and self.constr.type == 'Size'): body = ectx.eth_fn_call('dissect_%(ER)s_constrained_set_of', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), ('%(ETT_INDEX)s', '%(TABLE)s',), ('%(MIN_VAL)s', '%(MAX_VAL)s','%(EXT)s',),)) else: body = '#error Can not decode %s' % (tname) return body def mk_tag_str (ctx, cls, typ, num): # XXX should do conversion to int earlier! val = int (num) typ = typ.upper() if typ == 'DEFAULT': typ = ctx.tags_def return 'asn1.%s(%d,cls=asn1.%s_FLAG)' % (typ, val, cls) # XXX still ned #--- SequenceType ------------------------------------------------------------- class SequenceType (SeqType): def to_python (self, ctx): # name, tag (None for no tag, EXPLICIT() for explicit), typ) # or '' + (1,) for optional # XXX should also collect names for SEQUENCE inside SEQUENCE or # CHOICE or SEQUENCE_OF (where should the SEQUENCE_OF name come # from? for others, element or arm name would be fine) seq_name = getattr (self, 'sequence_name', None) if seq_name is None: seq_name = 'None' else: seq_name = "'" + seq_name + "'" if 'ext_list' in self.__dict__: return "%sasn1.SEQUENCE ([%s], ext=[%s], seq_name = %s)" % (ctx.spaces (), self.elts_to_py (self.elt_list, ctx), self.elts_to_py (self.ext_list, ctx), seq_name) else: return "%sasn1.SEQUENCE ([%s]), seq_name = %s" % (ctx.spaces (), self.elts_to_py (self.elt_list, ctx), seq_name) def elts_to_py (self, list, ctx): # we have elt_type, val= named_type, maybe default=, optional= # named_type node: either ident = or typ = # need to dismember these in order to generate Python output syntax. ctx.indent () def elt_to_py (e): assert (e.type == 'elt_type') nt = e.val optflag = e.optional #assert (not hasattr (e, 'default')) # XXX add support for DEFAULT! assert (nt.type == 'named_type') tagstr = 'None' identstr = nt.ident if hasattr (nt.typ, 'type') and nt.typ.type == 'tag': # ugh tagstr = mk_tag_str (ctx,nt.typ.tag.cls, nt.typ.tag.tag_typ,nt.typ.tag.num) nt = nt.typ return "('%s',%s,%s,%d)" % (identstr, tagstr, nt.typ.to_python (ctx), optflag) indentstr = ",\n" + ctx.spaces () rv = indentstr.join ([elt_to_py (e) for e in list]) ctx.outdent () return rv def eth_need_tree(self): return True def GetTTag(self, ectx): return ('BER_CLASS_UNI', 'BER_UNI_TAG_SEQUENCE') def eth_type_default_pars(self, ectx, tname): pars = Type.eth_type_default_pars(self, ectx, tname) pars['TABLE'] = '%(PROTOP)s%(TNAME)s_sequence' return pars def eth_type_default_body(self, ectx, tname): if (ectx.Ber()): body = ectx.eth_fn_call('dissect_%(ER)s_sequence', ret='offset', par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'), ('%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),)) elif (ectx.Per() or ectx.Oer()): body = ectx.eth_fn_call('dissect_%(ER)s_sequence', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), ('%(ETT_INDEX)s', '%(TABLE)s',),)) else: body = '#error Can not decode %s' % (tname) return body #--- ExtensionAdditionGroup --------------------------------------------------- class ExtensionAdditionGroup (SeqType): def __init__(self,*args, **kw) : self.parent_ident = None self.parent_tname = None SeqType.__init__ (self,*args, **kw) def eth_omit_field(self): return True def eth_tname(self): if (self.parent_tname and self.IsNamed()): return self.parent_tname + "_" + self.name else: return SeqType.eth_tname(self) def eth_reg_sub(self, ident, ectx): ectx.eth_dummy_eag_field_required() ectx.eth_dep_add(self.parent_ident, ident) SeqType.eth_reg_sub(self, ident, ectx) def eth_type_default_pars(self, ectx, tname): pars = Type.eth_type_default_pars(self, ectx, tname) pars['TABLE'] = '%(PROTOP)s%(TNAME)s_sequence' return pars def eth_type_default_body(self, ectx, tname): if (ectx.Per()): body = ectx.eth_fn_call('dissect_%(ER)s_sequence_eag', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(TABLE)s',),)) else: body = '#error Can not decode %s' % (tname) return body #--- SetType ------------------------------------------------------------------ class SetType (SeqType): def eth_need_tree(self): return True def GetTTag(self, ectx): return ('BER_CLASS_UNI', 'BER_UNI_TAG_SET') def eth_type_default_pars(self, ectx, tname): pars = Type.eth_type_default_pars(self, ectx, tname) pars['TABLE'] = '%(PROTOP)s%(TNAME)s_set' return pars def eth_type_default_body(self, ectx, tname): if (ectx.Ber()): body = ectx.eth_fn_call('dissect_%(ER)s_set', ret='offset', par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'), ('%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s',),)) elif (ectx.Per()): body = ectx.eth_fn_call('dissect_%(ER)s_set', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), ('%(ETT_INDEX)s', '%(TABLE)s',),)) else: body = '#error Can not decode %s' % (tname) return body #--- ChoiceType --------------------------------------------------------------- class ChoiceType (Type): def to_python (self, ctx): # name, tag (None for no tag, EXPLICIT() for explicit), typ) # or '' + (1,) for optional if 'ext_list' in self.__dict__: return "%sasn1.CHOICE ([%s], ext=[%s])" % (ctx.spaces (), self.elts_to_py (self.elt_list, ctx), self.elts_to_py (self.ext_list, ctx)) else: return "%sasn1.CHOICE ([%s])" % (ctx.spaces (), self.elts_to_py (self.elt_list, ctx)) def elts_to_py (self, list, ctx): ctx.indent () def elt_to_py (nt): assert (nt.type == 'named_type') tagstr = 'None' if hasattr (nt, 'ident'): identstr = nt.ident else: if hasattr (nt.typ, 'val'): identstr = nt.typ.val # XXX, making up name elif hasattr (nt.typ, 'name'): identstr = nt.typ.name else: identstr = ctx.make_new_name () if hasattr (nt.typ, 'type') and nt.typ.type == 'tag': # ugh tagstr = mk_tag_str (ctx,nt.typ.tag.cls, nt.typ.tag.tag_typ,nt.typ.tag.num) nt = nt.typ return "('%s',%s,%s)" % (identstr, tagstr, nt.typ.to_python (ctx)) indentstr = ",\n" + ctx.spaces () rv = indentstr.join ([elt_to_py (e) for e in list]) ctx.outdent () return rv def eth_reg_sub(self, ident, ectx): #print "eth_reg_sub(ident='%s')" % (ident) # check if autotag is required autotag = False if (ectx.NeedTags() and (ectx.tag_def == 'AUTOMATIC')): autotag = True for e in (self.elt_list): if e.HasOwnTag(): autotag = False; break; if autotag and hasattr(self, 'ext_list'): for e in (self.ext_list): if e.HasOwnTag(): autotag = False; break; # do autotag if autotag: atag = 0 for e in (self.elt_list): e.AddTag(Tag(cls = 'CONTEXT', num = str(atag), mode = 'IMPLICIT')) atag += 1 if autotag and hasattr(self, 'ext_list'): for e in (self.ext_list): e.AddTag(Tag(cls = 'CONTEXT', num = str(atag), mode = 'IMPLICIT')) atag += 1 for e in (self.elt_list): e.eth_reg(ident, ectx, tstrip=1, parent=ident) if ectx.conform.check_item('EXPORTS', ident + '.' + e.name): ectx.eth_sel_req(ident, e.name) if hasattr(self, 'ext_list'): for e in (self.ext_list): e.eth_reg(ident, ectx, tstrip=1, parent=ident) if ectx.conform.check_item('EXPORTS', ident + '.' + e.name): ectx.eth_sel_req(ident, e.name) def sel_item(self, ident, sel, ectx): lst = self.elt_list[:] if hasattr(self, 'ext_list'): lst.extend(self.ext_list) ee = None for e in (self.elt_list): if e.IsNamed() and (e.name == sel): ee = e break if not ee: print("#CHOICE %s does not contain item %s" % (ident, sel)) return ee def sel_req(self, ident, sel, ectx): #print "sel_req(ident='%s', sel=%s)\n%s" % (ident, sel, str(self)) ee = self.sel_item(ident, sel, ectx) if ee: ee.eth_reg(ident, ectx, tstrip=0, selflag=True) def eth_ftype(self, ectx): return ('FT_UINT32', 'BASE_DEC') def eth_ftype_sel(self, sel, ectx): ee = self.sel_item('', sel, ectx) if ee: return ee.eth_ftype(ectx) else: return ('FT_NONE', 'BASE_NONE') def eth_strings(self): return '$$' def eth_need_tree(self): return True def eth_has_vals(self): return True def GetTTag(self, ectx): lst = self.elt_list cls = 'BER_CLASS_ANY/*choice*/' #if hasattr(self, 'ext_list'): # lst.extend(self.ext_list) #if (len(lst) > 0): # cls = lst[0].GetTag(ectx)[0] #for e in (lst): # if (e.GetTag(ectx)[0] != cls): # cls = '-1/*choice*/' return (cls, '-1/*choice*/') def GetTTagSel(self, sel, ectx): ee = self.sel_item('', sel, ectx) if ee: return ee.GetTag(ectx) else: return ('BER_CLASS_ANY/*unknown selection*/', '-1/*unknown selection*/') def IndetermTag(self, ectx): #print "Choice IndetermTag()=%s" % (str(not self.HasOwnTag())) return not self.HasOwnTag() def detect_tagval(self, ectx): tagval = False lst = self.elt_list[:] if hasattr(self, 'ext_list'): lst.extend(self.ext_list) if (len(lst) > 0) and (not (ectx.Per() or ectx.Oer()) or lst[0].HasOwnTag()): t = lst[0].GetTag(ectx)[0] tagval = True else: t = '' tagval = False if (t == 'BER_CLASS_UNI'): tagval = False for e in (lst): if not (ectx.Per() or ectx.Oer()) or e.HasOwnTag(): tt = e.GetTag(ectx)[0] else: tt = '' tagval = False if (tt != t): tagval = False return tagval def get_vals(self, ectx): tagval = self.detect_tagval(ectx) vals = [] cnt = 0 for e in (self.elt_list): if (tagval): val = e.GetTag(ectx)[1] else: val = str(cnt) vals.append((val, e.name)) cnt += 1 if hasattr(self, 'ext_list'): for e in (self.ext_list): if (tagval): val = e.GetTag(ectx)[1] else: val = str(cnt) vals.append((val, e.name)) cnt += 1 return vals def eth_type_vals(self, tname, ectx): out = '\n' vals = self.get_vals(ectx) out += ectx.eth_vals(tname, vals) return out def reg_enum_vals(self, tname, ectx): vals = self.get_vals(ectx) for (val, id) in vals: ectx.eth_reg_value(id, self, val, ethname=ectx.eth_enum_item(tname, id)) def eth_type_enum(self, tname, ectx): out = '\n' vals = self.get_vals(ectx) out += ectx.eth_enum(tname, vals) return out def eth_type_default_pars(self, ectx, tname): pars = Type.eth_type_default_pars(self, ectx, tname) pars['TABLE'] = '%(PROTOP)s%(TNAME)s_choice' return pars def eth_type_default_table(self, ectx, tname): def out_item(val, e, ext, ectx): has_enum = ectx.eth_type[tname]['enum'] & EF_ENUM if (has_enum): vval = ectx.eth_enum_item(tname, e.name) else: vval = val f = fname + '/' + e.name ef = ectx.field[f]['ethname'] t = ectx.eth_hf[ef]['ethtype'] if (ectx.Ber()): opt = '' if (not e.HasOwnTag()): opt = 'BER_FLAGS_NOOWNTAG' elif (e.HasImplicitTag(ectx)): if (opt): opt += '|' opt += 'BER_FLAGS_IMPLTAG' if (not opt): opt = '0' if (ectx.Ber()): (tc, tn) = e.GetTag(ectx) out = ' { %3s, %-24s, %-13s, %s, %s, dissect_%s_%s },\n' \ % (vval, '&'+ectx.eth_hf[ef]['fullname'], tc, tn, opt, ectx.eth_type[t]['proto'], t) elif (ectx.Per() or ectx.Oer()): out = ' { %3s, %-24s, %-23s, dissect_%s_%s },\n' \ % (vval, '&'+ectx.eth_hf[ef]['fullname'], ext, ectx.eth_type[t]['proto'], t) else: out = '' return out # end out_item() #print "eth_type_default_table(tname='%s')" % (tname) fname = ectx.eth_type[tname]['ref'][0] tagval = self.detect_tagval(ectx) table = "static const %(ER)s_choice_t %(TABLE)s[] = {\n" cnt = 0 if hasattr(self, 'ext_list'): ext = 'ASN1_EXTENSION_ROOT' else: ext = 'ASN1_NO_EXTENSIONS' empty_ext_flag = '0' if (len(self.elt_list)==0) and hasattr(self, 'ext_list') and (len(self.ext_list)==0): empty_ext_flag = ext for e in (self.elt_list): if (tagval): val = e.GetTag(ectx)[1] else: val = str(cnt) table += out_item(val, e, ext, ectx) cnt += 1 if hasattr(self, 'ext_list'): for e in (self.ext_list): if (tagval): val = e.GetTag(ectx)[1] else: val = str(cnt) table += out_item(val, e, 'ASN1_NOT_EXTENSION_ROOT', ectx) cnt += 1 if (ectx.Ber()): table += " { 0, NULL, 0, 0, 0, NULL }\n};\n" else: table += " { 0, NULL, %s, NULL }\n};\n" % (empty_ext_flag) return table def eth_type_default_body(self, ectx, tname): if (ectx.Ber()): body = ectx.eth_fn_call('dissect_%(ER)s_choice', ret='offset', par=(('%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'), ('%(TABLE)s', '%(HF_INDEX)s', '%(ETT_INDEX)s'), ('%(VAL_PTR)s',),)) elif (ectx.Per() or ectx.Oer()): body = ectx.eth_fn_call('dissect_%(ER)s_choice', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), ('%(ETT_INDEX)s', '%(TABLE)s',), ('%(VAL_PTR)s',),)) else: body = '#error Can not decode %s' % (tname) return body #--- ChoiceValue ---------------------------------------------------- class ChoiceValue (Value): def to_str(self, ectx): return self.val.to_str(ectx) def fld_obj_eq(self, other): return isinstance(other, ChoiceValue) and (self.choice == other.choice) and (str(self.val.val) == str(other.val.val)) #--- EnumeratedType ----------------------------------------------------------- class EnumeratedType (Type): def to_python (self, ctx): def strify_one (named_num): return "%s=%s" % (named_num.ident, named_num.val) return "asn1.ENUM(%s)" % ",".join (map (strify_one, self.val)) def eth_ftype(self, ectx): return ('FT_UINT32', 'BASE_DEC') def eth_strings(self): return '$$' def eth_has_vals(self): return True def GetTTag(self, ectx): return ('BER_CLASS_UNI', 'BER_UNI_TAG_ENUMERATED') def get_vals_etc(self, ectx): vals = [] lastv = 0 used = {} maxv = 0 root_num = 0 ext_num = 0 map_table = [] for e in (self.val): if e.type == 'NamedNumber': used[int(e.val)] = True for e in (self.val): if e.type == 'NamedNumber': val = int(e.val) else: while lastv in used: lastv += 1 val = lastv used[val] = True vals.append((val, e.ident)) map_table.append(val) root_num += 1 if val > maxv: maxv = val if self.ext is not None: for e in (self.ext): if e.type == 'NamedNumber': used[int(e.val)] = True for e in (self.ext): if e.type == 'NamedNumber': val = int(e.val) else: while lastv in used: lastv += 1 val = lastv used[val] = True vals.append((val, e.ident)) map_table.append(val) ext_num += 1 if val > maxv: maxv = val need_map = False for i in range(len(map_table)): need_map = need_map or (map_table[i] != i) if (not need_map): map_table = None return (vals, root_num, ext_num, map_table) def eth_type_vals(self, tname, ectx): out = '\n' vals = self.get_vals_etc(ectx)[0] out += ectx.eth_vals(tname, vals) return out def reg_enum_vals(self, tname, ectx): vals = self.get_vals_etc(ectx)[0] for (val, id) in vals: ectx.eth_reg_value(id, self, val, ethname=ectx.eth_enum_item(tname, id)) def eth_type_enum(self, tname, ectx): out = '\n' vals = self.get_vals_etc(ectx)[0] out += ectx.eth_enum(tname, vals) return out def eth_type_default_pars(self, ectx, tname): pars = Type.eth_type_default_pars(self, ectx, tname) (root_num, ext_num, map_table) = self.get_vals_etc(ectx)[1:] if self.ext is not None: ext = 'TRUE' else: ext = 'FALSE' pars['ROOT_NUM'] = str(root_num) pars['EXT'] = ext pars['EXT_NUM'] = str(ext_num) if (map_table): pars['TABLE'] = '%(PROTOP)s%(TNAME)s_value_map' else: pars['TABLE'] = 'NULL' return pars def eth_type_default_table(self, ectx, tname): if (not ectx.Per() and not ectx.Oer()): return '' map_table = self.get_vals_etc(ectx)[3] if map_table is None: return '' table = "static uint32_t %(TABLE)s[%(ROOT_NUM)s+%(EXT_NUM)s] = {" table += ", ".join([str(v) for v in map_table]) table += "};\n" return table def eth_type_default_body(self, ectx, tname): if (ectx.Ber()): if (ectx.constraints_check and self.HasValueConstraint()): body = ectx.eth_fn_call('dissect_%(ER)s_constrained_integer', ret='offset', par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'), ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),)) else: body = ectx.eth_fn_call('dissect_%(ER)s_integer', ret='offset', par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'), ('%(VAL_PTR)s',),)) elif (ectx.Per() or ectx.Oer()): body = ectx.eth_fn_call('dissect_%(ER)s_enumerated', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), ('%(ROOT_NUM)s', '%(VAL_PTR)s', '%(EXT)s', '%(EXT_NUM)s', '%(TABLE)s',),)) else: body = '#error Can not decode %s' % (tname) return body #--- EmbeddedPDVType ----------------------------------------------------------- class EmbeddedPDVType (Type): def eth_tname(self): return 'EMBEDDED_PDV' def eth_ftype(self, ectx): return ('FT_NONE', 'BASE_NONE') def GetTTag(self, ectx): return ('BER_CLASS_UNI', 'BER_UNI_TAG_EMBEDDED_PDV') def eth_type_default_pars(self, ectx, tname): pars = Type.eth_type_default_pars(self, ectx, tname) if ectx.default_embedded_pdv_cb: pars['TYPE_REF_FN'] = ectx.default_embedded_pdv_cb else: pars['TYPE_REF_FN'] = 'NULL' return pars def eth_type_default_body(self, ectx, tname): if (ectx.Ber()): body = ectx.eth_fn_call('dissect_%(ER)s_EmbeddedPDV_Type', ret='offset', par=(('%(IMPLICIT_TAG)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',),)) elif (ectx.Per()): body = ectx.eth_fn_call('dissect_%(ER)s_embedded_pdv', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',),)) else: body = '#error Can not decode %s' % (tname) return body #--- ExternalType ----------------------------------------------------------- class ExternalType (Type): def eth_tname(self): return 'EXTERNAL' def eth_ftype(self, ectx): return ('FT_NONE', 'BASE_NONE') def GetTTag(self, ectx): return ('BER_CLASS_UNI', 'BER_UNI_TAG_EXTERNAL') def eth_type_default_pars(self, ectx, tname): pars = Type.eth_type_default_pars(self, ectx, tname) if ectx.default_external_type_cb: pars['TYPE_REF_FN'] = ectx.default_external_type_cb else: pars['TYPE_REF_FN'] = 'NULL' return pars def eth_type_default_body(self, ectx, tname): if (ectx.Ber()): body = ectx.eth_fn_call('dissect_%(ER)s_external_type', ret='offset', par=(('%(IMPLICIT_TAG)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',),)) elif (ectx.Per()): body = ectx.eth_fn_call('dissect_%(ER)s_external_type', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',),)) else: body = '#error Can not decode %s' % (tname) return body #--- OpenType ----------------------------------------------------------- class OpenType (Type): def to_python (self, ctx): return "asn1.ANY" def single_type(self): if (self.HasConstraint() and self.constr.type == 'Type' and self.constr.subtype.type == 'Type_Ref'): return self.constr.subtype.val return None def eth_reg_sub(self, ident, ectx): t = self.single_type() if t: ectx.eth_dep_add(ident, t) def eth_tname(self): t = self.single_type() if t: return 'OpenType_' + t else: return Type.eth_tname(self) def eth_ftype(self, ectx): return ('FT_NONE', 'BASE_NONE') def GetTTag(self, ectx): return ('BER_CLASS_ANY', '0') def eth_type_default_pars(self, ectx, tname): pars = Type.eth_type_default_pars(self, ectx, tname) pars['FN_VARIANT'] = ectx.default_opentype_variant t = self.single_type() if t: t = ectx.type[t]['ethname'] pars['TYPE_REF_PROTO'] = ectx.eth_type[t]['proto'] pars['TYPE_REF_TNAME'] = t pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_PROTO)s_%(TYPE_REF_TNAME)s' else: pars['TYPE_REF_FN'] = 'NULL' return pars def eth_type_default_body(self, ectx, tname): if (ectx.Per()): body = ectx.eth_fn_call('dissect_%(ER)s_open_type%(FN_VARIANT)s', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',),)) else: body = '#error Can not decode %s' % (tname) return body #--- InstanceOfType ----------------------------------------------------------- class InstanceOfType (Type): def eth_tname(self): return 'INSTANCE_OF' def eth_ftype(self, ectx): return ('FT_NONE', 'BASE_NONE') def GetTTag(self, ectx): return ('BER_CLASS_UNI', 'BER_UNI_TAG_EXTERNAL') def eth_type_default_pars(self, ectx, tname): pars = Type.eth_type_default_pars(self, ectx, tname) if ectx.default_external_type_cb: pars['TYPE_REF_FN'] = ectx.default_external_type_cb else: pars['TYPE_REF_FN'] = 'NULL' return pars def eth_type_default_body(self, ectx, tname): if (ectx.Ber()): body = ectx.eth_fn_call('dissect_%(ER)s_external_type', ret='offset', par=(('%(IMPLICIT_TAG)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(HF_INDEX)s', '%(TYPE_REF_FN)s',),)) else: body = '#error Can not decode %s' % (tname) return body #--- AnyType ----------------------------------------------------------- class AnyType (Type): def to_python (self, ctx): return "asn1.ANY" def eth_ftype(self, ectx): return ('FT_NONE', 'BASE_NONE') def GetTTag(self, ectx): return ('BER_CLASS_ANY', '0') def eth_type_default_body(self, ectx, tname): body = '#error Can not decode %s' % (tname) return body class Literal (Node): def to_python (self, ctx): return self.val #--- NullType ----------------------------------------------------------------- class NullType (Type): def to_python (self, ctx): return 'asn1.NULL' def eth_tname(self): return 'NULL' def GetTTag(self, ectx): return ('BER_CLASS_UNI', 'BER_UNI_TAG_NULL') def eth_type_default_body(self, ectx, tname): if (ectx.Ber()): body = ectx.eth_fn_call('dissect_%(ER)s_null', ret='offset', par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'),)) elif (ectx.Per() or ectx.Oer()): body = ectx.eth_fn_call('dissect_%(ER)s_null', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),)) else: body = '#error Can not decode %s' % (tname) return body #--- NullValue ---------------------------------------------------- class NullValue (Value): def to_str(self, ectx): return 'NULL' #--- RealType ----------------------------------------------------------------- class RealType (Type): def to_python (self, ctx): return 'asn1.REAL' def eth_tname(self): return 'REAL' def GetTTag(self, ectx): return ('BER_CLASS_UNI', 'BER_UNI_TAG_REAL') def eth_ftype(self, ectx): return ('FT_DOUBLE', 'BASE_NONE') def eth_type_default_body(self, ectx, tname): if (ectx.Ber()): body = ectx.eth_fn_call('dissect_%(ER)s_real', ret='offset', par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'), ('%(VAL_PTR)s',),)) elif (ectx.Per()): body = ectx.eth_fn_call('dissect_%(ER)s_real', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),)) else: body = '#error Can not decode %s' % (tname) return body #--- BooleanType -------------------------------------------------------------- class BooleanType (Type): def to_python (self, ctx): return 'asn1.BOOLEAN' def eth_tname(self): return 'BOOLEAN' def GetTTag(self, ectx): return ('BER_CLASS_UNI', 'BER_UNI_TAG_BOOLEAN') def eth_ftype(self, ectx): return ('FT_BOOLEAN', 'BASE_NONE') def eth_type_default_body(self, ectx, tname): if (ectx.Ber()): body = ectx.eth_fn_call('dissect_%(ER)s_boolean', ret='offset', par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s', '%(VAL_PTR)s'),)) elif (ectx.Per()): body = ectx.eth_fn_call('dissect_%(ER)s_boolean', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),)) elif (ectx.Oer()): body = ectx.eth_fn_call('dissect_%(ER)s_boolean', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),)) else: body = '#error Can not decode %s' % (tname) return body #--- OctetStringType ---------------------------------------------------------- class OctetStringType (Type): def to_python (self, ctx): return 'asn1.OCTSTRING' def eth_tname(self): if not self.HasConstraint(): return 'OCTET_STRING' elif self.constr.type == 'Size': return 'OCTET_STRING' + '_' + self.constr.eth_constrname() else: return '#' + self.type + '_' + str(id(self)) def eth_ftype(self, ectx): return ('FT_BYTES', 'BASE_NONE') def GetTTag(self, ectx): return ('BER_CLASS_UNI', 'BER_UNI_TAG_OCTETSTRING') def eth_need_pdu(self, ectx): pdu = None if self.HasContentsConstraint(): t = self.constr.GetContents(ectx) if t and (ectx.default_containing_variant in ('_pdu', '_pdu_new')): pdu = { 'type' : t, 'new' : ectx.default_containing_variant == '_pdu_new' } return pdu def eth_type_default_pars(self, ectx, tname): pars = Type.eth_type_default_pars(self, ectx, tname) (pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_size_constr(ectx) if self.HasContentsConstraint(): pars['FN_VARIANT'] = ectx.default_containing_variant t = self.constr.GetContents(ectx) if t: if pars['FN_VARIANT'] in ('_pdu', '_pdu_new'): t = ectx.field[t]['ethname'] pars['TYPE_REF_PROTO'] = '' pars['TYPE_REF_TNAME'] = t pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_TNAME)s' else: t = ectx.type[t]['ethname'] pars['TYPE_REF_PROTO'] = ectx.eth_type[t]['proto'] pars['TYPE_REF_TNAME'] = t pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_PROTO)s_%(TYPE_REF_TNAME)s' else: pars['TYPE_REF_FN'] = 'NULL' return pars def eth_type_default_body(self, ectx, tname): if (ectx.Ber()): if (ectx.constraints_check and self.HasSizeConstraint()): body = ectx.eth_fn_call('dissect_%(ER)s_constrained_octet_string', ret='offset', par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'), ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),)) else: body = ectx.eth_fn_call('dissect_%(ER)s_octet_string', ret='offset', par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'), ('%(VAL_PTR)s',),)) elif (ectx.Per() or ectx.Oer()): if self.HasContentsConstraint(): body = ectx.eth_fn_call('dissect_%(ER)s_octet_string_containing%(FN_VARIANT)s', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s', '%(TYPE_REF_FN)s',),)) else: body = ectx.eth_fn_call('dissect_%(ER)s_octet_string', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s', '%(VAL_PTR)s',),)) else: body = '#error Can not decode %s' % (tname) return body #--- CharacterStringType ------------------------------------------------------ class CharacterStringType (Type): def eth_tname(self): if not self.HasConstraint(): return self.eth_tsname() elif self.constr.type == 'Size': return self.eth_tsname() + '_' + self.constr.eth_constrname() else: return '#' + self.type + '_' + str(id(self)) def eth_ftype(self, ectx): return ('FT_STRING', 'BASE_NONE') class RestrictedCharacterStringType (CharacterStringType): def to_python (self, ctx): return 'asn1.' + self.eth_tsname() def GetTTag(self, ectx): return ('BER_CLASS_UNI', 'BER_UNI_TAG_' + self.eth_tsname()) def eth_type_default_pars(self, ectx, tname): pars = Type.eth_type_default_pars(self, ectx, tname) (pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_size_constr(ectx) (pars['STRING_TYPE'], pars['STRING_TAG']) = (self.eth_tsname(), self.GetTTag(ectx)[1]) (pars['ALPHABET'], pars['ALPHABET_LEN']) = self.eth_get_alphabet_constr(ectx) return pars def eth_type_default_body(self, ectx, tname): if (ectx.Ber()): if (ectx.constraints_check and self.HasSizeConstraint()): body = ectx.eth_fn_call('dissect_%(ER)s_constrained_restricted_string', ret='offset', par=(('%(IMPLICIT_TAG)s', '%(STRING_TAG)s'), ('%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'), ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),)) else: body = ectx.eth_fn_call('dissect_%(ER)s_restricted_string', ret='offset', par=(('%(IMPLICIT_TAG)s', '%(STRING_TAG)s'), ('%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'), ('%(VAL_PTR)s',),)) elif (ectx.Per() and self.HasPermAlph() and self.eth_tsname() in KnownMultiplierStringTypes): # XXX: If there is a permitted alphabet but it is extensible, # then the permitted-alphabet is not PER-visible and should be # ignored. (X.691 9.3.10, 9.3.18) We don't handle extensible # permitted-alphabets. body = ectx.eth_fn_call('dissect_%(ER)s_restricted_character_string', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s', '%(ALPHABET)s', '%(ALPHABET_LEN)s'), ('%(VAL_PTR)s',),)) elif (ectx.Per()): if (self.eth_tsname() == 'GeneralString'): body = ectx.eth_fn_call('dissect_%(ER)s_%(STRING_TYPE)s', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'),)) elif (self.eth_tsname() == 'GeneralizedTime' or self.eth_tsname() == 'UTCTime'): body = ectx.eth_fn_call('dissect_%(ER)s_VisibleString', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s'), ('%(VAL_PTR)s',),)) elif (self.eth_tsname() in KnownMultiplierStringTypes): body = ectx.eth_fn_call('dissect_%(ER)s_%(STRING_TYPE)s', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s'), ('%(VAL_PTR)s',),)) else: body = ectx.eth_fn_call('dissect_%(ER)s_%(STRING_TYPE)s', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s',),)) elif (ectx.Oer()): body = ectx.eth_fn_call('dissect_%(ER)s_%(STRING_TYPE)s', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s',),)) else: body = '#error Can not decode %s' % (tname) return body class BMPStringType (RestrictedCharacterStringType): def eth_tsname(self): return 'BMPString' class GeneralStringType (RestrictedCharacterStringType): def eth_tsname(self): return 'GeneralString' class GraphicStringType (RestrictedCharacterStringType): def eth_tsname(self): return 'GraphicString' class IA5StringType (RestrictedCharacterStringType): def eth_tsname(self): return 'IA5String' class NumericStringType (RestrictedCharacterStringType): def eth_tsname(self): return 'NumericString' class PrintableStringType (RestrictedCharacterStringType): def eth_tsname(self): return 'PrintableString' class TeletexStringType (RestrictedCharacterStringType): def eth_tsname(self): return 'TeletexString' class T61StringType (RestrictedCharacterStringType): def eth_tsname(self): return 'T61String' def GetTTag(self, ectx): return ('BER_CLASS_UNI', 'BER_UNI_TAG_TeletexString') class UniversalStringType (RestrictedCharacterStringType): def eth_tsname(self): return 'UniversalString' class UTF8StringType (RestrictedCharacterStringType): def eth_tsname(self): return 'UTF8String' class VideotexStringType (RestrictedCharacterStringType): def eth_tsname(self): return 'VideotexString' class VisibleStringType (RestrictedCharacterStringType): def eth_tsname(self): return 'VisibleString' class ISO646StringType (RestrictedCharacterStringType): def eth_tsname(self): return 'ISO646String' def GetTTag(self, ectx): return ('BER_CLASS_UNI', 'BER_UNI_TAG_VisibleString') class UnrestrictedCharacterStringType (CharacterStringType): def to_python (self, ctx): return 'asn1.UnrestrictedCharacterString' def eth_tsname(self): return 'CHARACTER_STRING' #--- UsefulType --------------------------------------------------------------- class GeneralizedTime (RestrictedCharacterStringType): def eth_tsname(self): return 'GeneralizedTime' def eth_ftype(self, ectx): if (ectx.Ber()): return ('FT_ABSOLUTE_TIME', 'ABSOLUTE_TIME_LOCAL') else: return ('FT_STRING', 'BASE_NONE') def eth_type_default_body(self, ectx, tname): if (ectx.Ber()): body = ectx.eth_fn_call('dissect_%(ER)s_%(STRING_TYPE)s', ret='offset', par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'),)) return body else: return RestrictedCharacterStringType.eth_type_default_body(self, ectx, tname) class UTCTime (RestrictedCharacterStringType): def eth_tsname(self): return 'UTCTime' def eth_type_default_body(self, ectx, tname): if (ectx.Ber()): body = ectx.eth_fn_call('dissect_%(ER)s_%(STRING_TYPE)s', ret='offset', par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s', 'NULL', 'NULL'),)) return body else: return RestrictedCharacterStringType.eth_type_default_body(self, ectx, tname) class ObjectDescriptor (RestrictedCharacterStringType): def eth_tsname(self): return 'ObjectDescriptor' def eth_type_default_body(self, ectx, tname): if (ectx.Ber()): body = RestrictedCharacterStringType.eth_type_default_body(self, ectx, tname) elif (ectx.Per()): body = ectx.eth_fn_call('dissect_%(ER)s_object_descriptor', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),)) else: body = '#error Can not decode %s' % (tname) return body #--- ObjectIdentifierType ----------------------------------------------------- class ObjectIdentifierType (Type): def to_python (self, ctx): return 'asn1.OBJECT_IDENTIFIER' def eth_tname(self): return 'OBJECT_IDENTIFIER' def eth_ftype(self, ectx): return ('FT_OID', 'BASE_NONE') def GetTTag(self, ectx): return ('BER_CLASS_UNI', 'BER_UNI_TAG_OID') def eth_type_default_pars(self, ectx, tname): pars = Type.eth_type_default_pars(self, ectx, tname) pars['FN_VARIANT'] = ectx.default_oid_variant return pars def eth_type_default_body(self, ectx, tname): if (ectx.Ber()): body = ectx.eth_fn_call('dissect_%(ER)s_object_identifier%(FN_VARIANT)s', ret='offset', par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),)) elif (ectx.Per()): body = ectx.eth_fn_call('dissect_%(ER)s_object_identifier%(FN_VARIANT)s', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),)) else: body = '#error Can not decode %s' % (tname) return body #--- ObjectIdentifierValue ---------------------------------------------------- class ObjectIdentifierValue (Value): def get_num(self, path, val): return str(oid_names.get(path + '/' + val, val)) def to_str(self, ectx): out = '' path = '' first = True sep = '' for v in self.comp_list: if isinstance(v, Node) and (v.type == 'name_and_number'): vstr = v.number elif v.isdigit(): vstr = v else: vstr = self.get_num(path, v) if not first and not vstr.isdigit(): vstr = ectx.value_get_val(vstr) if first: if vstr.isdigit(): out += '"' + vstr else: out += ectx.value_get_eth(vstr) + '"' else: out += sep + vstr path += sep + vstr first = False sep = '.' out += '"' return out def get_dep(self): v = self.comp_list[0] if isinstance(v, Node) and (v.type == 'name_and_number'): return None elif v.isdigit(): return None else: vstr = self.get_num('', v) if vstr.isdigit(): return None else: return vstr class NamedNumber(Node): def to_python (self, ctx): return "('%s',%s)" % (self.ident, self.val) def __lt__(self, other): return int(self.val) < int(other.val) class NamedNumListBase(Node): def to_python (self, ctx): return "asn1.%s_class ([%s])" % (self.asn1_typ,",".join ( [x.to_python (ctx) for x in self.named_list])) #--- RelativeOIDType ---------------------------------------------------------- class RelativeOIDType (Type): def eth_tname(self): return 'RELATIVE_OID' def eth_ftype(self, ectx): return ('FT_REL_OID', 'BASE_NONE') def GetTTag(self, ectx): return ('BER_CLASS_UNI', 'BER_UNI_TAG_RELATIVE_OID') def eth_type_default_pars(self, ectx, tname): pars = Type.eth_type_default_pars(self, ectx, tname) pars['FN_VARIANT'] = ectx.default_oid_variant return pars def eth_type_default_body(self, ectx, tname): if (ectx.Ber()): body = ectx.eth_fn_call('dissect_%(ER)s_relative_oid%(FN_VARIANT)s', ret='offset', par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),)) elif (ectx.Per()): body = ectx.eth_fn_call('dissect_%(ER)s_relative_oid%(FN_VARIANT)s', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),)) else: body = '#error Can not decode %s' % (tname) return body #--- IntegerType -------------------------------------------------------------- class IntegerType (Type): def to_python (self, ctx): return "asn1.INTEGER_class ([%s])" % (",".join ( [x.to_python (ctx) for x in self.named_list])) def add_named_value(self, ident, val): e = NamedNumber(ident = ident, val = val) if not self.named_list: self.named_list = [] self.named_list.append(e) def eth_tname(self): if self.named_list: return Type.eth_tname(self) if not self.HasConstraint(): return 'INTEGER' elif self.constr.type == 'SingleValue' or self.constr.type == 'ValueRange': return 'INTEGER' + '_' + self.constr.eth_constrname() else: return 'INTEGER' + '_' + self.constr.eth_tname() def GetTTag(self, ectx): return ('BER_CLASS_UNI', 'BER_UNI_TAG_INTEGER') def eth_ftype(self, ectx): if self.HasConstraint(): if not self.constr.IsNegativ(): if self.constr.Needs64b(ectx): return ('FT_UINT64', 'BASE_DEC') else: return ('FT_UINT32', 'BASE_DEC') if self.constr.Needs64b(ectx): return ('FT_INT64', 'BASE_DEC') return ('FT_INT32', 'BASE_DEC') def eth_strings(self): if (self.named_list): return '$$' else: return 'NULL' def eth_has_vals(self): if (self.named_list): return True else: return False def get_vals(self, ectx): vals = [] for e in (self.named_list): vals.append((int(e.val), e.ident)) return vals def eth_type_vals(self, tname, ectx): if not self.eth_has_vals(): return '' out = '\n' vals = self.get_vals(ectx) out += ectx.eth_vals(tname, vals) return out def reg_enum_vals(self, tname, ectx): vals = self.get_vals(ectx) for (val, id) in vals: ectx.eth_reg_value(id, self, val, ethname=ectx.eth_enum_item(tname, id)) def eth_type_enum(self, tname, ectx): if not self.eth_has_enum(tname, ectx): return '' out = '\n' vals = self.get_vals(ectx) out += ectx.eth_enum(tname, vals) return out def eth_type_default_pars(self, ectx, tname): pars = Type.eth_type_default_pars(self, ectx, tname) if self.HasValueConstraint(): (pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_value_constr(ectx) if (pars['FN_VARIANT'] == '') and self.constr.Needs64b(ectx): if ectx.Ber(): pars['FN_VARIANT'] = '64' else: if (ectx.Oer() and pars['MAX_VAL'] == 'NO_BOUND'): pars['FN_VARIANT'] = '_64b_no_ub' else: pars['FN_VARIANT'] = '_64b' return pars def eth_type_default_body(self, ectx, tname): if (ectx.Ber()): if (ectx.constraints_check and self.HasValueConstraint()): body = ectx.eth_fn_call('dissect_%(ER)s_constrained_integer%(FN_VARIANT)s', ret='offset', par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'), ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(HF_INDEX)s', '%(VAL_PTR)s',),)) else: body = ectx.eth_fn_call('dissect_%(ER)s_integer%(FN_VARIANT)s', ret='offset', par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s', '%(HF_INDEX)s'), ('%(VAL_PTR)s',),)) elif (ectx.Per() or ectx.Oer()): if (self.HasValueConstraint()): body = ectx.eth_fn_call('dissect_%(ER)s_constrained_integer%(FN_VARIANT)s', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(VAL_PTR)s', '%(EXT)s'),)) else: body = ectx.eth_fn_call('dissect_%(ER)s_integer%(FN_VARIANT)s', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s', '%(VAL_PTR)s'),)) else: body = '#error Can not decode %s' % (tname) return body #--- BitStringType ------------------------------------------------------------ class BitStringType (Type): def to_python (self, ctx): return "asn1.BITSTRING_class ([%s])" % (",".join ( [x.to_python (ctx) for x in self.named_list])) def eth_tname(self): if self.named_list: return Type.eth_tname(self) elif not self.HasConstraint(): return 'BIT_STRING' elif self.constr.IsSize(): return 'BIT_STRING' + '_' + self.constr.eth_constrname() else: return '#' + self.type + '_' + str(id(self)) def GetTTag(self, ectx): return ('BER_CLASS_UNI', 'BER_UNI_TAG_BITSTRING') def eth_ftype(self, ectx): return ('FT_BYTES', 'BASE_NONE') def eth_need_tree(self): return self.named_list def eth_need_pdu(self, ectx): pdu = None if self.HasContentsConstraint(): t = self.constr.GetContents(ectx) if t and (ectx.default_containing_variant in ('_pdu', '_pdu_new')): pdu = { 'type' : t, 'new' : ectx.default_containing_variant == '_pdu_new' } return pdu def sortNamedBits(self): return self.named_list.val def eth_named_bits(self): bits = [] if (self.named_list): sorted_list = self.named_list sorted_list.sort() expected_bit_no = 0; for e in (sorted_list): # Fill the table with "spare_bit" for "un named bits" if (int(e.val) != 0) and (expected_bit_no != int(e.val)): while ( expected_bit_no < int(e.val)): bits.append((expected_bit_no, ("spare_bit%u" % (expected_bit_no)))) expected_bit_no = expected_bit_no + 1 #print ("Adding named bits to list %s bit no %d" % (e.ident, int (e.val))) bits.append((int(e.val), e.ident)) expected_bit_no = int(e.val) + 1 return bits def eth_type_default_pars(self, ectx, tname): pars = Type.eth_type_default_pars(self, ectx, tname) pars['LEN_PTR'] = 'NULL' (pars['MIN_VAL'], pars['MAX_VAL'], pars['EXT']) = self.eth_get_size_constr(ectx) if 'ETT_INDEX' not in pars: pars['ETT_INDEX'] = '-1' pars['TABLE'] = 'NULL' if self.eth_named_bits(): pars['TABLE'] = '%(PROTOP)s%(TNAME)s_bits' if self.HasContentsConstraint(): pars['FN_VARIANT'] = ectx.default_containing_variant t = self.constr.GetContents(ectx) if t: if pars['FN_VARIANT'] in ('_pdu', '_pdu_new'): t = ectx.field[t]['ethname'] pars['TYPE_REF_PROTO'] = '' pars['TYPE_REF_TNAME'] = t pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_TNAME)s' else: t = ectx.type[t]['ethname'] pars['TYPE_REF_PROTO'] = ectx.eth_type[t]['proto'] pars['TYPE_REF_TNAME'] = t pars['TYPE_REF_FN'] = 'dissect_%(TYPE_REF_PROTO)s_%(TYPE_REF_TNAME)s' else: pars['TYPE_REF_FN'] = 'NULL' return pars def eth_type_default_table(self, ectx, tname): #print ("eth_type_default_table(tname='%s')" % (tname)) table = '' bits = self.eth_named_bits() if (bits): table = ectx.eth_bits(tname, bits) return table def eth_type_default_body(self, ectx, tname): bits = self.eth_named_bits() if (ectx.Ber()): if (ectx.constraints_check and self.HasSizeConstraint()): body = ectx.eth_fn_call('dissect_%(ER)s_constrained_bitstring', ret='offset', par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'), ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(TABLE)s', '%s' % len(bits),'%(HF_INDEX)s', '%(ETT_INDEX)s',), ('%(VAL_PTR)s',),)) else: body = ectx.eth_fn_call('dissect_%(ER)s_bitstring', ret='offset', par=(('%(IMPLICIT_TAG)s', '%(ACTX)s', '%(TREE)s', '%(TVB)s', '%(OFFSET)s'), ('%(TABLE)s', '%s' % len(bits), '%(HF_INDEX)s', '%(ETT_INDEX)s',), ('%(VAL_PTR)s',),)) elif (ectx.Per() or ectx.Oer()): if self.HasContentsConstraint(): body = ectx.eth_fn_call('dissect_%(ER)s_bit_string_containing%(FN_VARIANT)s', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s', '%(TYPE_REF_FN)s'),)) else: body = ectx.eth_fn_call('dissect_%(ER)s_bit_string', ret='offset', par=(('%(TVB)s', '%(OFFSET)s', '%(ACTX)s', '%(TREE)s', '%(HF_INDEX)s'), ('%(MIN_VAL)s', '%(MAX_VAL)s', '%(EXT)s','%(TABLE)s', '%s' % len(bits), '%(VAL_PTR)s', '%(LEN_PTR)s'),)) else: body = '#error Can not decode %s' % (tname) return body #--- BStringValue ------------------------------------------------------------ bstring_tab = { '0000' : '0', '0001' : '1', '0010' : '2', '0011' : '3', '0100' : '4', '0101' : '5', '0110' : '6', '0111' : '7', '1000' : '8', '1001' : '9', '1010' : 'A', '1011' : 'B', '1100' : 'C', '1101' : 'D', '1110' : 'E', '1111' : 'F', } class BStringValue (Value): def to_str(self, ectx): v = self.val[1:-2] if len(v) % 8: v += '0' * (8 - len(v) % 8) vv = '0x' for i in (list(range(0, len(v), 4))): vv += bstring_tab[v[i:i+4]] return vv #--- HStringValue ------------------------------------------------------------ class HStringValue (Value): def to_str(self, ectx): vv = '0x' vv += self.val[1:-2] return vv def __int__(self): return int(self.val[1:-2], 16) #--- FieldSpec ---------------------------------------------------------------- class FieldSpec (Node): def __init__(self,*args, **kw) : self.name = None Node.__init__ (self,*args, **kw) def SetName(self, name): self.name = name def get_repr(self): return ['#UNSUPPORTED_' + self.type] def fld_repr(self): repr = [self.name] repr.extend(self.get_repr()) return repr class TypeFieldSpec (FieldSpec): def get_repr(self): return [] class FixedTypeValueFieldSpec (FieldSpec): def get_repr(self): if isinstance(self.typ, Type_Ref): repr = ['TypeReference', self.typ.val] else: repr = [self.typ.type] return repr class VariableTypeValueFieldSpec (FieldSpec): def get_repr(self): return ['_' + self.type] class FixedTypeValueSetFieldSpec (FieldSpec): def get_repr(self): return ['_' + self.type] class ObjectFieldSpec (FieldSpec): def get_repr(self): return ['ClassReference', self.cls.val] class ObjectSetFieldSpec (FieldSpec): def get_repr(self): return ['ClassReference', self.cls.val] #============================================================================== def p_module_list_1 (t): 'module_list : module_list ModuleDefinition' t[0] = t[1] + [t[2]] def p_module_list_2 (t): 'module_list : ModuleDefinition' t[0] = [t[1]] #--- ITU-T Recommendation X.680 ----------------------------------------------- # 11 ASN.1 lexical items -------------------------------------------------------- # 11.2 Type references def p_type_ref (t): 'type_ref : UCASE_IDENT' t[0] = Type_Ref(val=t[1]) # 11.3 Identifiers def p_identifier (t): 'identifier : LCASE_IDENT' t[0] = t[1] # 11.4 Value references # cause reduce/reduce conflict #def p_valuereference (t): # 'valuereference : LCASE_IDENT' # t[0] = Value_Ref(val=t[1]) # 11.5 Module references def p_modulereference (t): 'modulereference : UCASE_IDENT' t[0] = t[1] # 12 Module definition -------------------------------------------------------- # 12.1 def p_ModuleDefinition (t): 'ModuleDefinition : ModuleIdentifier DEFINITIONS TagDefault ASSIGNMENT ModuleBegin BEGIN ModuleBody END' t[0] = Module (ident = t[1], tag_def = t[3], body = t[7]) def p_ModuleBegin (t): 'ModuleBegin : ' if t[-4].val == 'Remote-Operations-Information-Objects': x880_module_begin() def p_TagDefault_1 (t): '''TagDefault : EXPLICIT TAGS | IMPLICIT TAGS | AUTOMATIC TAGS ''' t[0] = Default_Tags (dfl_tag = t[1]) def p_TagDefault_2 (t): 'TagDefault : ' # 12.2 The "TagDefault" is taken as EXPLICIT TAGS if it is "empty". t[0] = Default_Tags (dfl_tag = 'EXPLICIT') def p_ModuleIdentifier_1 (t): 'ModuleIdentifier : modulereference DefinitiveIdentifier' # name, oid t [0] = Node('module_ident', val = t[1], ident = t[2]) def p_ModuleIdentifier_2 (t): 'ModuleIdentifier : modulereference' # name, oid t [0] = Node('module_ident', val = t[1], ident = None) def p_DefinitiveIdentifier (t): 'DefinitiveIdentifier : ObjectIdentifierValue' t[0] = t[1] #def p_module_ref (t): # 'module_ref : UCASE_IDENT' # t[0] = t[1] def p_ModuleBody_1 (t): 'ModuleBody : Exports Imports AssignmentList' t[0] = Module_Body (exports = t[1], imports = t[2], assign_list = t[3]) def p_ModuleBody_2 (t): 'ModuleBody : ' t[0] = Node ('module_body', exports = [], imports = [], assign_list = []) def p_Exports_1 (t): 'Exports : EXPORTS syms_exported SEMICOLON' t[0] = t[2] def p_Exports_2 (t): 'Exports : EXPORTS ALL SEMICOLON' t[0] = [ 'ALL' ] def p_Exports_3 (t): 'Exports : ' t[0] = [ 'ALL' ] def p_syms_exported_1 (t): 'syms_exported : exp_sym_list' t[0] = t[1] def p_syms_exported_2 (t): 'syms_exported : ' t[0] = [] def p_exp_sym_list_1 (t): 'exp_sym_list : Symbol' t[0] = [t[1]] def p_exp_sym_list_2 (t): 'exp_sym_list : exp_sym_list COMMA Symbol' t[0] = t[1] + [t[3]] def p_Imports_1 (t): 'Imports : importsbegin IMPORTS SymbolsImported SEMICOLON' t[0] = t[3] global lcase_ident_assigned lcase_ident_assigned = {} def p_importsbegin (t): 'importsbegin : ' global lcase_ident_assigned global g_conform lcase_ident_assigned = {} lcase_ident_assigned.update(g_conform.use_item('ASSIGNED_ID', 'OBJECT_IDENTIFIER')) def p_Imports_2 (t): 'Imports : ' t[0] = [] def p_SymbolsImported_1(t): 'SymbolsImported : ' t[0] = [] def p_SymbolsImported_2 (t): 'SymbolsImported : SymbolsFromModuleList' t[0] = t[1] def p_SymbolsFromModuleList_1 (t): 'SymbolsFromModuleList : SymbolsFromModuleList SymbolsFromModule' t[0] = t[1] + [t[2]] def p_SymbolsFromModuleList_2 (t): 'SymbolsFromModuleList : SymbolsFromModule' t[0] = [t[1]] def p_SymbolsFromModule (t): 'SymbolsFromModule : SymbolList FROM GlobalModuleReference' t[0] = Node ('SymbolList', symbol_list = t[1], module = t[3]) for s in (t[0].symbol_list): if (isinstance(s, Value_Ref)): lcase_ident_assigned[s.val] = t[3] import_symbols_from_module(t[0].module, t[0].symbol_list) def import_symbols_from_module(module, symbol_list): if module.val == 'Remote-Operations-Information-Objects': for i in range(len(symbol_list)): s = symbol_list[i] if isinstance(s, Type_Ref) or isinstance(s, Class_Ref): x880_import(s.val) if isinstance(s, Type_Ref) and is_class_ident(s.val): symbol_list[i] = Class_Ref (val = s.val) return for i in range(len(symbol_list)): s = symbol_list[i] if isinstance(s, Type_Ref) and is_class_ident("$%s$%s" % (module.val, s.val)): import_class_from_module(module.val, s.val) if isinstance(s, Type_Ref) and is_class_ident(s.val): symbol_list[i] = Class_Ref (val = s.val) def p_GlobalModuleReference (t): 'GlobalModuleReference : modulereference AssignedIdentifier' t [0] = Node('module_ident', val = t[1], ident = t[2]) def p_AssignedIdentifier_1 (t): 'AssignedIdentifier : ObjectIdentifierValue' t[0] = t[1] def p_AssignedIdentifier_2 (t): 'AssignedIdentifier : LCASE_IDENT_ASSIGNED' t[0] = t[1] def p_AssignedIdentifier_3 (t): 'AssignedIdentifier : ' pass def p_SymbolList_1 (t): 'SymbolList : Symbol' t[0] = [t[1]] def p_SymbolList_2 (t): 'SymbolList : SymbolList COMMA Symbol' t[0] = t[1] + [t[3]] def p_Symbol (t): '''Symbol : Reference | ParameterizedReference''' t[0] = t[1] def p_Reference_1 (t): '''Reference : type_ref | objectclassreference ''' t[0] = t[1] def p_Reference_2 (t): '''Reference : LCASE_IDENT_ASSIGNED | identifier ''' # instead of valuereference wich causes reduce/reduce conflict t[0] = Value_Ref(val=t[1]) def p_AssignmentList_1 (t): 'AssignmentList : AssignmentList Assignment' t[0] = t[1] + [t[2]] def p_AssignmentList_2 (t): 'AssignmentList : Assignment SEMICOLON' t[0] = [t[1]] def p_AssignmentList_3 (t): 'AssignmentList : Assignment' t[0] = [t[1]] def p_Assignment (t): '''Assignment : TypeAssignment | ValueAssignment | ValueSetTypeAssignment | ObjectClassAssignment | ObjectAssignment | ObjectSetAssignment | ParameterizedAssignment | pyquote ''' t[0] = t[1] # 13 Referencing type and value definitions ----------------------------------- # 13.1 def p_DefinedType (t): '''DefinedType : ExternalTypeReference | type_ref | ParameterizedType''' t[0] = t[1] def p_DefinedValue_1(t): '''DefinedValue : ExternalValueReference''' t[0] = t[1] def p_DefinedValue_2(t): '''DefinedValue : identifier ''' # instead of valuereference wich causes reduce/reduce conflict t[0] = Value_Ref(val=t[1]) # 13.6 def p_ExternalTypeReference (t): 'ExternalTypeReference : modulereference DOT type_ref' t[0] = Node ('ExternalTypeReference', module = t[1], typ = t[3]) def p_ExternalValueReference (t): 'ExternalValueReference : modulereference DOT identifier' t[0] = Node ('ExternalValueReference', module = t[1], ident = t[3]) # 15 Assigning types and values ----------------------------------------------- # 15.1 def p_TypeAssignment (t): 'TypeAssignment : UCASE_IDENT ASSIGNMENT Type' t[0] = t[3] t[0].SetName(t[1]) # 15.2 def p_ValueAssignment (t): 'ValueAssignment : LCASE_IDENT ValueType ASSIGNMENT Value' t[0] = ValueAssignment(ident = t[1], typ = t[2], val = t[4]) # only "simple" types are supported to simplify grammer def p_ValueType (t): '''ValueType : type_ref | BooleanType | IntegerType | ObjectIdentifierType | OctetStringType | RealType ''' t[0] = t[1] # 15.6 def p_ValueSetTypeAssignment (t): 'ValueSetTypeAssignment : UCASE_IDENT ValueType ASSIGNMENT ValueSet' t[0] = Node('ValueSetTypeAssignment', name=t[1], typ=t[2], val=t[4]) # 15.7 def p_ValueSet (t): 'ValueSet : lbraceignore rbraceignore' t[0] = None # 16 Definition of types and values ------------------------------------------- # 16.1 def p_Type (t): '''Type : BuiltinType | ReferencedType | ConstrainedType''' t[0] = t[1] # 16.2 def p_BuiltinType (t): '''BuiltinType : AnyType | BitStringType | BooleanType | CharacterStringType | ChoiceType | EmbeddedPDVType | EnumeratedType | ExternalType | InstanceOfType | IntegerType | NullType | ObjectClassFieldType | ObjectIdentifierType | OctetStringType | RealType | RelativeOIDType | SequenceType | SequenceOfType | SetType | SetOfType | TaggedType''' t[0] = t[1] # 16.3 def p_ReferencedType (t): '''ReferencedType : DefinedType | UsefulType | SelectionType''' t[0] = t[1] # 16.5 def p_NamedType (t): 'NamedType : identifier Type' t[0] = t[2] t[0].SetName (t[1]) # 16.7 def p_Value (t): '''Value : BuiltinValue | ReferencedValue | ObjectClassFieldValue''' t[0] = t[1] # 16.9 def p_BuiltinValue (t): '''BuiltinValue : BooleanValue | ChoiceValue | IntegerValue | ObjectIdentifierValue | RealValue | SequenceValue | hex_string | binary_string | char_string''' # XXX we don't support {data} here t[0] = t[1] # 16.11 def p_ReferencedValue (t): '''ReferencedValue : DefinedValue | ValueFromObject''' t[0] = t[1] # 16.13 #def p_NamedValue (t): # 'NamedValue : identifier Value' # t[0] = Node ('NamedValue', ident = t[1], value = t[2]) # 17 Notation for the boolean type -------------------------------------------- # 17.1 def p_BooleanType (t): 'BooleanType : BOOLEAN' t[0] = BooleanType () # 17.2 def p_BooleanValue (t): '''BooleanValue : TRUE | FALSE''' t[0] = t[1] # 18 Notation for the integer type -------------------------------------------- # 18.1 def p_IntegerType_1 (t): 'IntegerType : INTEGER' t[0] = IntegerType (named_list = None) def p_IntegerType_2 (t): 'IntegerType : INTEGER LBRACE NamedNumberList RBRACE' t[0] = IntegerType(named_list = t[3]) def p_NamedNumberList_1 (t): 'NamedNumberList : NamedNumber' t[0] = [t[1]] def p_NamedNumberList_2 (t): 'NamedNumberList : NamedNumberList COMMA NamedNumber' t[0] = t[1] + [t[3]] def p_NamedNumber (t): '''NamedNumber : identifier LPAREN SignedNumber RPAREN | identifier LPAREN DefinedValue RPAREN''' t[0] = NamedNumber(ident = t[1], val = t[3]) def p_SignedNumber_1 (t): 'SignedNumber : NUMBER' t[0] = t [1] def p_SignedNumber_2 (t): 'SignedNumber : MINUS NUMBER' t[0] = '-' + t[2] # 18.9 def p_IntegerValue (t): 'IntegerValue : SignedNumber' t[0] = t [1] # 19 Notation for the enumerated type ----------------------------------------- # 19.1 def p_EnumeratedType (t): 'EnumeratedType : ENUMERATED LBRACE Enumerations RBRACE' t[0] = EnumeratedType (val = t[3]['val'], ext = t[3]['ext']) def p_Enumerations_1 (t): 'Enumerations : Enumeration' t[0] = { 'val' : t[1], 'ext' : None } def p_Enumerations_2 (t): 'Enumerations : Enumeration COMMA ELLIPSIS ExceptionSpec' t[0] = { 'val' : t[1], 'ext' : [] } def p_Enumerations_3 (t): 'Enumerations : Enumeration COMMA ELLIPSIS ExceptionSpec COMMA Enumeration' t[0] = { 'val' : t[1], 'ext' : t[6] } def p_Enumeration_1 (t): 'Enumeration : EnumerationItem' t[0] = [t[1]] def p_Enumeration_2 (t): 'Enumeration : Enumeration COMMA EnumerationItem' t[0] = t[1] + [t[3]] def p_EnumerationItem (t): '''EnumerationItem : Identifier | NamedNumber''' t[0] = t[1] def p_Identifier (t): 'Identifier : identifier' t[0] = Node ('Identifier', ident = t[1]) # 20 Notation for the real type ----------------------------------------------- # 20.1 def p_RealType (t): 'RealType : REAL' t[0] = RealType () # 20.6 def p_RealValue (t): '''RealValue : REAL_NUMBER | SpecialRealValue''' t[0] = t [1] def p_SpecialRealValue (t): '''SpecialRealValue : PLUS_INFINITY | MINUS_INFINITY''' t[0] = t[1] # 21 Notation for the bitstring type ------------------------------------------ # 21.1 def p_BitStringType_1 (t): 'BitStringType : BIT STRING' t[0] = BitStringType (named_list = None) def p_BitStringType_2 (t): 'BitStringType : BIT STRING LBRACE NamedBitList RBRACE' t[0] = BitStringType (named_list = t[4]) def p_NamedBitList_1 (t): 'NamedBitList : NamedBit' t[0] = [t[1]] def p_NamedBitList_2 (t): 'NamedBitList : NamedBitList COMMA NamedBit' t[0] = t[1] + [t[3]] def p_NamedBit (t): '''NamedBit : identifier LPAREN NUMBER RPAREN | identifier LPAREN DefinedValue RPAREN''' t[0] = NamedNumber (ident = t[1], val = t[3]) # 22 Notation for the octetstring type ---------------------------------------- # 22.1 def p_OctetStringType (t): 'OctetStringType : OCTET STRING' t[0] = OctetStringType () # 23 Notation for the null type ----------------------------------------------- # 23.1 def p_NullType (t): 'NullType : NULL' t[0] = NullType () # 23.3 def p_NullValue (t): 'NullValue : NULL' t[0] = NullValue () # 24 Notation for sequence types ---------------------------------------------- # 24.1 def p_SequenceType_1 (t): 'SequenceType : SEQUENCE LBRACE RBRACE' t[0] = SequenceType (elt_list = []) def p_SequenceType_2 (t): 'SequenceType : SEQUENCE LBRACE ComponentTypeLists RBRACE' t[0] = SequenceType (elt_list = t[3]['elt_list']) if 'ext_list' in t[3]: t[0].ext_list = t[3]['ext_list'] if 'elt_list2' in t[3]: t[0].elt_list2 = t[3]['elt_list2'] def p_ExtensionAndException_1 (t): 'ExtensionAndException : ELLIPSIS' t[0] = [] def p_OptionalExtensionMarker_1 (t): 'OptionalExtensionMarker : COMMA ELLIPSIS' t[0] = True def p_OptionalExtensionMarker_2 (t): 'OptionalExtensionMarker : ' t[0] = False def p_ComponentTypeLists_1 (t): 'ComponentTypeLists : ComponentTypeList' t[0] = {'elt_list' : t[1]} def p_ComponentTypeLists_2 (t): 'ComponentTypeLists : ComponentTypeList COMMA ExtensionAndException OptionalExtensionMarker' t[0] = {'elt_list' : t[1], 'ext_list' : []} def p_ComponentTypeLists_3 (t): 'ComponentTypeLists : ComponentTypeList COMMA ExtensionAndException ExtensionAdditionList OptionalExtensionMarker' t[0] = {'elt_list' : t[1], 'ext_list' : t[4]} def p_ComponentTypeLists_4 (t): 'ComponentTypeLists : ComponentTypeList COMMA ExtensionAndException ExtensionEndMarker COMMA ComponentTypeList' t[0] = {'elt_list' : t[1], 'ext_list' : [], 'elt_list2' : t[6]} def p_ComponentTypeLists_5 (t): 'ComponentTypeLists : ComponentTypeList COMMA ExtensionAndException ExtensionAdditionList ExtensionEndMarker COMMA ComponentTypeList' t[0] = {'elt_list' : t[1], 'ext_list' : t[4], 'elt_list2' : t[7]} def p_ComponentTypeLists_6 (t): 'ComponentTypeLists : ExtensionAndException OptionalExtensionMarker' t[0] = {'elt_list' : [], 'ext_list' : []} def p_ComponentTypeLists_7 (t): 'ComponentTypeLists : ExtensionAndException ExtensionAdditionList OptionalExtensionMarker' t[0] = {'elt_list' : [], 'ext_list' : t[2]} def p_ExtensionEndMarker (t): 'ExtensionEndMarker : COMMA ELLIPSIS' pass def p_ExtensionAdditionList_1 (t): 'ExtensionAdditionList : COMMA ExtensionAddition' t[0] = [t[2]] def p_ExtensionAdditionList_2 (t): 'ExtensionAdditionList : ExtensionAdditionList COMMA ExtensionAddition' t[0] = t[1] + [t[3]] def p_ExtensionAddition_1 (t): 'ExtensionAddition : ExtensionAdditionGroup' t[0] = Node ('elt_type', val = t[1], optional = 0) def p_ExtensionAddition_2 (t): 'ExtensionAddition : ComponentType' t[0] = t[1] def p_ExtensionAdditionGroup (t): 'ExtensionAdditionGroup : LVERBRACK VersionNumber ComponentTypeList RVERBRACK' t[0] = ExtensionAdditionGroup (ver = t[2], elt_list = t[3]) def p_VersionNumber_1 (t): 'VersionNumber : ' def p_VersionNumber_2 (t): 'VersionNumber : NUMBER COLON' t[0] = t[1] def p_ComponentTypeList_1 (t): 'ComponentTypeList : ComponentType' t[0] = [t[1]] def p_ComponentTypeList_2 (t): 'ComponentTypeList : ComponentTypeList COMMA ComponentType' t[0] = t[1] + [t[3]] def p_ComponentType_1 (t): 'ComponentType : NamedType' t[0] = Node ('elt_type', val = t[1], optional = 0) def p_ComponentType_2 (t): 'ComponentType : NamedType OPTIONAL' t[0] = Node ('elt_type', val = t[1], optional = 1) def p_ComponentType_3 (t): 'ComponentType : NamedType DEFAULT DefaultValue' t[0] = Node ('elt_type', val = t[1], optional = 1, default = t[3]) def p_ComponentType_4 (t): 'ComponentType : COMPONENTS OF Type' t[0] = Node ('components_of', typ = t[3]) def p_DefaultValue_1 (t): '''DefaultValue : ReferencedValue | BooleanValue | ChoiceValue | IntegerValue | RealValue | hex_string | binary_string | char_string | ObjectClassFieldValue''' t[0] = t[1] def p_DefaultValue_2 (t): 'DefaultValue : lbraceignore rbraceignore' t[0] = '' # 24.17 def p_SequenceValue_1 (t): 'SequenceValue : LBRACE RBRACE' t[0] = [] #def p_SequenceValue_2 (t): # 'SequenceValue : LBRACE ComponentValueList RBRACE' # t[0] = t[2] #def p_ComponentValueList_1 (t): # 'ComponentValueList : NamedValue' # t[0] = [t[1]] #def p_ComponentValueList_2 (t): # 'ComponentValueList : ComponentValueList COMMA NamedValue' # t[0] = t[1] + [t[3]] # 25 Notation for sequence-of types ------------------------------------------- # 25.1 def p_SequenceOfType (t): '''SequenceOfType : SEQUENCE OF Type | SEQUENCE OF NamedType''' t[0] = SequenceOfType (val = t[3], size_constr = None) # 26 Notation for set types --------------------------------------------------- # 26.1 def p_SetType_1 (t): 'SetType : SET LBRACE RBRACE' t[0] = SetType (elt_list = []) def p_SetType_2 (t): 'SetType : SET LBRACE ComponentTypeLists RBRACE' t[0] = SetType (elt_list = t[3]['elt_list']) if 'ext_list' in t[3]: t[0].ext_list = t[3]['ext_list'] if 'elt_list2' in t[3]: t[0].elt_list2 = t[3]['elt_list2'] # 27 Notation for set-of types ------------------------------------------------ # 27.1 def p_SetOfType (t): '''SetOfType : SET OF Type | SET OF NamedType''' t[0] = SetOfType (val = t[3]) # 28 Notation for choice types ------------------------------------------------ # 28.1 def p_ChoiceType (t): 'ChoiceType : CHOICE LBRACE AlternativeTypeLists RBRACE' if 'ext_list' in t[3]: t[0] = ChoiceType (elt_list = t[3]['elt_list'], ext_list = t[3]['ext_list']) else: t[0] = ChoiceType (elt_list = t[3]['elt_list']) def p_AlternativeTypeLists_1 (t): 'AlternativeTypeLists : AlternativeTypeList' t[0] = {'elt_list' : t[1]} def p_AlternativeTypeLists_2 (t): 'AlternativeTypeLists : AlternativeTypeList COMMA ExtensionAndException ExtensionAdditionAlternatives OptionalExtensionMarker' t[0] = {'elt_list' : t[1], 'ext_list' : t[4]} def p_ExtensionAdditionAlternatives_1 (t): 'ExtensionAdditionAlternatives : ExtensionAdditionAlternativesList' t[0] = t[1] def p_ExtensionAdditionAlternatives_2 (t): 'ExtensionAdditionAlternatives : ' t[0] = [] def p_ExtensionAdditionAlternativesList_1 (t): 'ExtensionAdditionAlternativesList : COMMA ExtensionAdditionAlternative' t[0] = t[2] def p_ExtensionAdditionAlternativesList_2 (t): 'ExtensionAdditionAlternativesList : ExtensionAdditionAlternativesList COMMA ExtensionAdditionAlternative' t[0] = t[1] + t[3] def p_ExtensionAdditionAlternative_1 (t): 'ExtensionAdditionAlternative : NamedType' t[0] = [t[1]] def p_ExtensionAdditionAlternative_2 (t): 'ExtensionAdditionAlternative : ExtensionAdditionAlternativesGroup' t[0] = t[1] def p_ExtensionAdditionAlternativesGroup (t): 'ExtensionAdditionAlternativesGroup : LVERBRACK VersionNumber AlternativeTypeList RVERBRACK' t[0] = t[3] def p_AlternativeTypeList_1 (t): 'AlternativeTypeList : NamedType' t[0] = [t[1]] def p_AlternativeTypeList_2 (t): 'AlternativeTypeList : AlternativeTypeList COMMA NamedType' t[0] = t[1] + [t[3]] # 28.10 def p_ChoiceValue_1 (t): '''ChoiceValue : identifier COLON Value | identifier COLON NullValue ''' val = t[3] if not isinstance(val, Value): val = Value(val=val) t[0] = ChoiceValue (choice = t[1], val = val) # 29 Notation for selection types # 29.1 def p_SelectionType (t): # 'SelectionType : identifier LT Type' t[0] = SelectionType (typ = t[3], sel = t[1]) # 30 Notation for tagged types ------------------------------------------------ # 30.1 def p_TaggedType_1 (t): 'TaggedType : Tag Type' t[1].mode = 'default' t[0] = t[2] t[0].AddTag(t[1]) def p_TaggedType_2 (t): '''TaggedType : Tag IMPLICIT Type | Tag EXPLICIT Type''' t[1].mode = t[2] t[0] = t[3] t[0].AddTag(t[1]) def p_Tag (t): 'Tag : LBRACK Class ClassNumber RBRACK' t[0] = Tag(cls = t[2], num = t[3]) def p_ClassNumber_1 (t): 'ClassNumber : number' t[0] = t[1] def p_ClassNumber_2 (t): 'ClassNumber : DefinedValue' t[0] = t[1] def p_Class_1 (t): '''Class : UNIVERSAL | APPLICATION | PRIVATE''' t[0] = t[1] def p_Class_2 (t): 'Class :' t[0] = 'CONTEXT' # 31 Notation for the object identifier type ---------------------------------- # 31.1 def p_ObjectIdentifierType (t): 'ObjectIdentifierType : OBJECT IDENTIFIER' t[0] = ObjectIdentifierType() # 31.3 def p_ObjectIdentifierValue (t): 'ObjectIdentifierValue : LBRACE oid_comp_list RBRACE' t[0] = ObjectIdentifierValue (comp_list=t[2]) def p_oid_comp_list_1 (t): 'oid_comp_list : oid_comp_list ObjIdComponents' t[0] = t[1] + [t[2]] def p_oid_comp_list_2 (t): 'oid_comp_list : ObjIdComponents' t[0] = [t[1]] def p_ObjIdComponents (t): '''ObjIdComponents : NameForm | NumberForm | NameAndNumberForm''' t[0] = t[1] def p_NameForm (t): '''NameForm : LCASE_IDENT | LCASE_IDENT_ASSIGNED''' t [0] = t[1] def p_NumberForm (t): '''NumberForm : NUMBER''' # | DefinedValue''' t [0] = t[1] def p_NameAndNumberForm (t): '''NameAndNumberForm : LCASE_IDENT_ASSIGNED LPAREN NumberForm RPAREN | LCASE_IDENT LPAREN NumberForm RPAREN''' t[0] = Node('name_and_number', ident = t[1], number = t[3]) # 32 Notation for the relative object identifier type ------------------------- # 32.1 def p_RelativeOIDType (t): 'RelativeOIDType : RELATIVE_OID' t[0] = RelativeOIDType() # 33 Notation for the embedded-pdv type --------------------------------------- # 33.1 def p_EmbeddedPDVType (t): 'EmbeddedPDVType : EMBEDDED PDV' t[0] = EmbeddedPDVType() # 34 Notation for the external type ------------------------------------------- # 34.1 def p_ExternalType (t): 'ExternalType : EXTERNAL' t[0] = ExternalType() # 36 Notation for character string types -------------------------------------- # 36.1 def p_CharacterStringType (t): '''CharacterStringType : RestrictedCharacterStringType | UnrestrictedCharacterStringType''' t[0] = t[1] # 37 Definition of restricted character string types -------------------------- def p_RestrictedCharacterStringType_1 (t): 'RestrictedCharacterStringType : BMPString' t[0] = BMPStringType () def p_RestrictedCharacterStringType_2 (t): 'RestrictedCharacterStringType : GeneralString' t[0] = GeneralStringType () def p_RestrictedCharacterStringType_3 (t): 'RestrictedCharacterStringType : GraphicString' t[0] = GraphicStringType () def p_RestrictedCharacterStringType_4 (t): 'RestrictedCharacterStringType : IA5String' t[0] = IA5StringType () def p_RestrictedCharacterStringType_5 (t): 'RestrictedCharacterStringType : ISO646String' t[0] = ISO646StringType () def p_RestrictedCharacterStringType_6 (t): 'RestrictedCharacterStringType : NumericString' t[0] = NumericStringType () def p_RestrictedCharacterStringType_7 (t): 'RestrictedCharacterStringType : PrintableString' t[0] = PrintableStringType () def p_RestrictedCharacterStringType_8 (t): 'RestrictedCharacterStringType : TeletexString' t[0] = TeletexStringType () def p_RestrictedCharacterStringType_9 (t): 'RestrictedCharacterStringType : T61String' t[0] = T61StringType () def p_RestrictedCharacterStringType_10 (t): 'RestrictedCharacterStringType : UniversalString' t[0] = UniversalStringType () def p_RestrictedCharacterStringType_11 (t): 'RestrictedCharacterStringType : UTF8String' t[0] = UTF8StringType () def p_RestrictedCharacterStringType_12 (t): 'RestrictedCharacterStringType : VideotexString' t[0] = VideotexStringType () def p_RestrictedCharacterStringType_13 (t): 'RestrictedCharacterStringType : VisibleString' t[0] = VisibleStringType () # 40 Definition of unrestricted character string types ------------------------ # 40.1 def p_UnrestrictedCharacterStringType (t): 'UnrestrictedCharacterStringType : CHARACTER STRING' t[0] = UnrestrictedCharacterStringType () # 41 Notation for types defined in clauses 42 to 44 --------------------------- # 42 Generalized time --------------------------------------------------------- def p_UsefulType_1 (t): 'UsefulType : GeneralizedTime' t[0] = GeneralizedTime() # 43 Universal time ----------------------------------------------------------- def p_UsefulType_2 (t): 'UsefulType : UTCTime' t[0] = UTCTime() # 44 The object descriptor type ----------------------------------------------- def p_UsefulType_3 (t): 'UsefulType : ObjectDescriptor' t[0] = ObjectDescriptor() # 45 Constrained types -------------------------------------------------------- # 45.1 def p_ConstrainedType_1 (t): 'ConstrainedType : Type Constraint' t[0] = t[1] t[0].AddConstraint(t[2]) def p_ConstrainedType_2 (t): 'ConstrainedType : TypeWithConstraint' t[0] = t[1] # 45.5 def p_TypeWithConstraint_1 (t): '''TypeWithConstraint : SET Constraint OF Type | SET SizeConstraint OF Type''' t[0] = SetOfType (val = t[4], constr = t[2]) def p_TypeWithConstraint_2 (t): '''TypeWithConstraint : SEQUENCE Constraint OF Type | SEQUENCE SizeConstraint OF Type''' t[0] = SequenceOfType (val = t[4], constr = t[2]) def p_TypeWithConstraint_3 (t): '''TypeWithConstraint : SET Constraint OF NamedType | SET SizeConstraint OF NamedType''' t[0] = SetOfType (val = t[4], constr = t[2]) def p_TypeWithConstraint_4 (t): '''TypeWithConstraint : SEQUENCE Constraint OF NamedType | SEQUENCE SizeConstraint OF NamedType''' t[0] = SequenceOfType (val = t[4], constr = t[2]) # 45.6 # 45.7 def p_Constraint (t): 'Constraint : LPAREN ConstraintSpec ExceptionSpec RPAREN' t[0] = t[2] def p_ConstraintSpec (t): '''ConstraintSpec : ElementSetSpecs | GeneralConstraint''' t[0] = t[1] # 46 Element set specification ------------------------------------------------ # 46.1 def p_ElementSetSpecs_1 (t): 'ElementSetSpecs : RootElementSetSpec' t[0] = t[1] def p_ElementSetSpecs_2 (t): 'ElementSetSpecs : RootElementSetSpec COMMA ELLIPSIS' t[0] = t[1] t[0].ext = True def p_ElementSetSpecs_3 (t): 'ElementSetSpecs : RootElementSetSpec COMMA ELLIPSIS COMMA AdditionalElementSetSpec' t[0] = t[1] t[0].ext = True def p_RootElementSetSpec (t): 'RootElementSetSpec : ElementSetSpec' t[0] = t[1] def p_AdditionalElementSetSpec (t): 'AdditionalElementSetSpec : ElementSetSpec' t[0] = t[1] def p_ElementSetSpec (t): 'ElementSetSpec : Unions' t[0] = t[1] def p_Unions_1 (t): 'Unions : Intersections' t[0] = t[1] def p_Unions_2 (t): 'Unions : UElems UnionMark Intersections' t[0] = Constraint(type = 'Union', subtype = [t[1], t[3]]) def p_UElems (t): 'UElems : Unions' t[0] = t[1] def p_Intersections_1 (t): 'Intersections : IntersectionElements' t[0] = t[1] def p_Intersections_2 (t): 'Intersections : IElems IntersectionMark IntersectionElements' t[0] = Constraint(type = 'Intersection', subtype = [t[1], t[3]]) def p_IElems (t): 'IElems : Intersections' t[0] = t[1] def p_IntersectionElements (t): 'IntersectionElements : Elements' t[0] = t[1] def p_UnionMark (t): '''UnionMark : BAR | UNION''' def p_IntersectionMark (t): '''IntersectionMark : CIRCUMFLEX | INTERSECTION''' # 46.5 def p_Elements_1 (t): 'Elements : SubtypeElements' t[0] = t[1] def p_Elements_2 (t): 'Elements : LPAREN ElementSetSpec RPAREN' t[0] = t[2] # 47 Subtype elements --------------------------------------------------------- # 47.1 General def p_SubtypeElements (t): '''SubtypeElements : SingleValue | ContainedSubtype | ValueRange | PermittedAlphabet | SizeConstraint | TypeConstraint | InnerTypeConstraints | PatternConstraint''' t[0] = t[1] # 47.2 Single value # 47.2.1 def p_SingleValue (t): 'SingleValue : Value' t[0] = Constraint(type = 'SingleValue', subtype = t[1]) # 47.3 Contained subtype # 47.3.1 def p_ContainedSubtype (t): 'ContainedSubtype : Includes Type' t[0] = Constraint(type = 'ContainedSubtype', subtype = t[2]) def p_Includes (t): '''Includes : INCLUDES | ''' # 47.4 Value range # 47.4.1 def p_ValueRange (t): 'ValueRange : LowerEndpoint RANGE UpperEndpoint' t[0] = Constraint(type = 'ValueRange', subtype = [t[1], t[3]]) # 47.4.3 def p_LowerEndpoint_1 (t): 'LowerEndpoint : LowerEndValue' t[0] = t[1] def p_LowerEndpoint_2 (t): 'LowerEndpoint : LowerEndValue LT' t[0] = t[1] # but not inclusive range def p_UpperEndpoint_1 (t): 'UpperEndpoint : UpperEndValue' t[0] = t[1] def p_UpperEndpoint_2 (t): 'UpperEndpoint : LT UpperEndValue' t[0] = t[1] # but not inclusive range # 47.4.4 def p_LowerEndValue (t): '''LowerEndValue : Value | MIN''' t[0] = t[1] # XXX def p_UpperEndValue (t): '''UpperEndValue : Value | MAX''' t[0] = t[1] # 47.5 Size constraint # 47.5.1 def p_SizeConstraint (t): 'SizeConstraint : SIZE Constraint' t[0] = Constraint (type = 'Size', subtype = t[2]) # 47.6 Type constraint # 47.6.1 def p_TypeConstraint (t): 'TypeConstraint : Type' t[0] = Constraint (type = 'Type', subtype = t[1]) # 47.7 Permitted alphabet # 47.7.1 def p_PermittedAlphabet (t): 'PermittedAlphabet : FROM Constraint' t[0] = Constraint (type = 'From', subtype = t[2]) # 47.8 Inner subtyping # 47.8.1 def p_InnerTypeConstraints (t): '''InnerTypeConstraints : WITH COMPONENT SingleTypeConstraint | WITH COMPONENTS MultipleTypeConstraints''' pass # ignore PER invisible constraint # 47.8.3 def p_SingleTypeConstraint (t): 'SingleTypeConstraint : Constraint' t[0] = t[1] # 47.8.4 def p_MultipleTypeConstraints (t): '''MultipleTypeConstraints : FullSpecification | PartialSpecification''' t[0] = t[1] def p_FullSpecification (t): 'FullSpecification : LBRACE TypeConstraints RBRACE' t[0] = t[2] def p_PartialSpecification (t): 'PartialSpecification : LBRACE ELLIPSIS COMMA TypeConstraints RBRACE' t[0] = t[4] def p_TypeConstraints_1 (t): 'TypeConstraints : named_constraint' t [0] = [t[1]] def p_TypeConstraints_2 (t): 'TypeConstraints : TypeConstraints COMMA named_constraint' t[0] = t[1] + [t[3]] def p_named_constraint_1 (t): 'named_constraint : identifier constraint' return Node ('named_constraint', ident = t[1], constr = t[2]) def p_named_constraint_2 (t): 'named_constraint : constraint' return Node ('named_constraint', constr = t[1]) def p_constraint (t): 'constraint : value_constraint presence_constraint' t[0] = Node ('constraint', value = t[1], presence = t[2]) def p_value_constraint_1 (t): 'value_constraint : Constraint' t[0] = t[1] def p_value_constraint_2 (t): 'value_constraint : ' pass def p_presence_constraint_1 (t): '''presence_constraint : PRESENT | ABSENT | OPTIONAL''' t[0] = t[1] def p_presence_constraint_2 (t): '''presence_constraint : ''' pass # 47.9 Pattern constraint # 47.9.1 def p_PatternConstraint (t): 'PatternConstraint : PATTERN Value' t[0] = Constraint (type = 'Pattern', subtype = t[2]) # 49 The exception identifier # 49.4 def p_ExceptionSpec_1 (t): 'ExceptionSpec : EXCLAMATION ExceptionIdentification' pass def p_ExceptionSpec_2 (t): 'ExceptionSpec : ' pass def p_ExceptionIdentification (t): '''ExceptionIdentification : SignedNumber | DefinedValue | Type COLON Value ''' pass # /*-----------------------------------------------------------------------*/ # /* Value Notation Productions */ # /*-----------------------------------------------------------------------*/ def p_binary_string (t): 'binary_string : BSTRING' t[0] = BStringValue(val = t[1]) def p_hex_string (t): 'hex_string : HSTRING' t[0] = HStringValue(val = t[1]) def p_char_string (t): 'char_string : QSTRING' t[0] = t[1] def p_number (t): 'number : NUMBER' t[0] = t[1] #--- ITU-T Recommendation X.208 ----------------------------------------------- # 27 Notation for the any type ------------------------------------------------ # 27.1 def p_AnyType (t): '''AnyType : ANY | ANY DEFINED BY identifier''' t[0] = AnyType() #--- ITU-T Recommendation X.681 ----------------------------------------------- # 7 ASN.1 lexical items ------------------------------------------------------- # 7.1 Information object class references def p_objectclassreference (t): 'objectclassreference : CLASS_IDENT' t[0] = Class_Ref(val=t[1]) # 7.2 Information object references def p_objectreference (t): 'objectreference : LCASE_IDENT' t[0] = t[1] # 7.3 Information object set references #def p_objectsetreference (t): # 'objectsetreference : UCASE_IDENT' # t[0] = t[1] # 7.4 Type field references # ucasefieldreference # 7.5 Value field references # lcasefieldreference # 7.6 Value set field references # ucasefieldreference # 7.7 Object field references # lcasefieldreference # 7.8 Object set field references # ucasefieldreference def p_ucasefieldreference (t): 'ucasefieldreference : AMPERSAND UCASE_IDENT' t[0] = '&' + t[2] def p_lcasefieldreference (t): 'lcasefieldreference : AMPERSAND LCASE_IDENT' t[0] = '&' + t[2] # 8 Referencing definitions # 8.1 def p_DefinedObjectClass (t): '''DefinedObjectClass : objectclassreference | UsefulObjectClassReference''' t[0] = t[1] global obj_class obj_class = t[0].val def p_DefinedObject (t): '''DefinedObject : objectreference''' t[0] = t[1] # 8.4 def p_UsefulObjectClassReference (t): '''UsefulObjectClassReference : TYPE_IDENTIFIER | ABSTRACT_SYNTAX''' t[0] = Class_Ref(val=t[1]) # 9 Information object class definition and assignment # 9.1 def p_ObjectClassAssignment (t): '''ObjectClassAssignment : CLASS_IDENT ASSIGNMENT ObjectClass | UCASE_IDENT ASSIGNMENT ObjectClass''' t[0] = t[3] t[0].SetName(t[1]) if isinstance(t[0], ObjectClassDefn): t[0].reg_types() # 9.2 def p_ObjectClass (t): '''ObjectClass : DefinedObjectClass | ObjectClassDefn | ParameterizedObjectClass ''' t[0] = t[1] # 9.3 def p_ObjectClassDefn (t): '''ObjectClassDefn : CLASS LBRACE FieldSpecs RBRACE | CLASS LBRACE FieldSpecs RBRACE WithSyntaxSpec''' t[0] = ObjectClassDefn(fields = t[3]) def p_FieldSpecs_1 (t): 'FieldSpecs : FieldSpec' t[0] = [t[1]] def p_FieldSpecs_2 (t): 'FieldSpecs : FieldSpecs COMMA FieldSpec' t[0] = t[1] + [t[3]] def p_WithSyntaxSpec (t): 'WithSyntaxSpec : WITH SYNTAX lbraceignore rbraceignore' t[0] = None # 9.4 def p_FieldSpec (t): '''FieldSpec : TypeFieldSpec | FixedTypeValueFieldSpec | VariableTypeValueFieldSpec | FixedTypeValueSetFieldSpec | ObjectFieldSpec | ObjectSetFieldSpec ''' t[0] = t[1] # 9.5 def p_TypeFieldSpec (t): '''TypeFieldSpec : ucasefieldreference | ucasefieldreference TypeOptionalitySpec ''' t[0] = TypeFieldSpec() t[0].SetName(t[1]) def p_TypeOptionalitySpec_1 (t): 'TypeOptionalitySpec ::= OPTIONAL' pass def p_TypeOptionalitySpec_2 (t): 'TypeOptionalitySpec ::= DEFAULT Type' pass # 9.6 def p_FixedTypeValueFieldSpec (t): '''FixedTypeValueFieldSpec : lcasefieldreference Type | lcasefieldreference Type UNIQUE | lcasefieldreference Type ValueOptionalitySpec | lcasefieldreference Type UNIQUE ValueOptionalitySpec ''' t[0] = FixedTypeValueFieldSpec(typ = t[2]) t[0].SetName(t[1]) def p_ValueOptionalitySpec_1 (t): 'ValueOptionalitySpec ::= OPTIONAL' pass def p_ValueOptionalitySpec_2 (t): 'ValueOptionalitySpec ::= DEFAULT Value' pass # 9.8 def p_VariableTypeValueFieldSpec (t): '''VariableTypeValueFieldSpec : lcasefieldreference FieldName | lcasefieldreference FieldName ValueOptionalitySpec ''' t[0] = VariableTypeValueFieldSpec() t[0].SetName(t[1]) # 9.9 def p_FixedTypeValueSetFieldSpec (t): '''FixedTypeValueSetFieldSpec : ucasefieldreference Type | ucasefieldreference Type ValueSetOptionalitySpec ''' t[0] = FixedTypeValueSetFieldSpec() t[0].SetName(t[1]) def p_ValueSetOptionalitySpec_1 (t): 'ValueSetOptionalitySpec ::= OPTIONAL' pass def p_ValueSetOptionalitySpec_2 (t): 'ValueSetOptionalitySpec ::= DEFAULT ValueSet' pass # 9.11 def p_ObjectFieldSpec (t): '''ObjectFieldSpec : lcasefieldreference DefinedObjectClass | lcasefieldreference DefinedObjectClass ObjectOptionalitySpec ''' t[0] = ObjectFieldSpec(cls=t[2]) t[0].SetName(t[1]) global obj_class obj_class = None def p_ObjectOptionalitySpec_1 (t): 'ObjectOptionalitySpec ::= OPTIONAL' pass def p_ObjectOptionalitySpec_2 (t): 'ObjectOptionalitySpec ::= DEFAULT Object' pass # 9.12 def p_ObjectSetFieldSpec (t): '''ObjectSetFieldSpec : ucasefieldreference DefinedObjectClass | ucasefieldreference DefinedObjectClass ObjectSetOptionalitySpec ''' t[0] = ObjectSetFieldSpec(cls=t[2]) t[0].SetName(t[1]) def p_ObjectSetOptionalitySpec_1 (t): 'ObjectSetOptionalitySpec ::= OPTIONAL' pass def p_ObjectSetOptionalitySpec_2 (t): 'ObjectSetOptionalitySpec ::= DEFAULT ObjectSet' pass # 9.13 def p_PrimitiveFieldName (t): '''PrimitiveFieldName : ucasefieldreference | lcasefieldreference ''' t[0] = t[1] # 9.13 def p_FieldName_1 (t): 'FieldName : PrimitiveFieldName' t[0] = t[1] def p_FieldName_2 (t): 'FieldName : FieldName DOT PrimitiveFieldName' t[0] = t[1] + '.' + t[3] # 11 Information object definition and assignment # 11.1 def p_ObjectAssignment (t): 'ObjectAssignment : objectreference DefinedObjectClass ASSIGNMENT Object' t[0] = ObjectAssignment (ident = t[1], cls=t[2].val, val=t[4]) global obj_class obj_class = None # 11.3 def p_Object (t): '''Object : DefinedObject | ObjectDefn | ParameterizedObject''' t[0] = t[1] # 11.4 def p_ObjectDefn (t): 'ObjectDefn : lbraceobject bodyobject rbraceobject' t[0] = t[2] # {...} block of object definition def p_lbraceobject(t): 'lbraceobject : braceobjectbegin LBRACE' t[0] = t[1] def p_braceobjectbegin(t): 'braceobjectbegin : ' global lexer global obj_class if set_class_syntax(obj_class): state = 'INITIAL' else: lexer.level = 1 state = 'braceignore' lexer.push_state(state) def p_rbraceobject(t): 'rbraceobject : braceobjectend RBRACE' t[0] = t[2] def p_braceobjectend(t): 'braceobjectend : ' global lexer lexer.pop_state() set_class_syntax(None) def p_bodyobject_1 (t): 'bodyobject : ' t[0] = { } def p_bodyobject_2 (t): 'bodyobject : cls_syntax_list' t[0] = t[1] def p_cls_syntax_list_1 (t): 'cls_syntax_list : cls_syntax_list cls_syntax' t[0] = t[1] t[0].update(t[2]) def p_cls_syntax_list_2 (t): 'cls_syntax_list : cls_syntax' t[0] = t[1] # X.681 def p_cls_syntax_1 (t): 'cls_syntax : Type IDENTIFIED BY Value' t[0] = { get_class_fieled(' ') : t[1], get_class_fieled(' '.join((t[2], t[3]))) : t[4] } def p_cls_syntax_2 (t): 'cls_syntax : HAS PROPERTY Value' t[0] = { get_class_fieled(' '.join(t[1:-1])) : t[-1:][0] } # X.880 def p_cls_syntax_3 (t): '''cls_syntax : ERRORS ObjectSet | LINKED ObjectSet | RETURN RESULT BooleanValue | SYNCHRONOUS BooleanValue | INVOKE PRIORITY Value | RESULT_PRIORITY Value | PRIORITY Value | ALWAYS RESPONDS BooleanValue | IDEMPOTENT BooleanValue ''' t[0] = { get_class_fieled(' '.join(t[1:-1])) : t[-1:][0] } def p_cls_syntax_4 (t): '''cls_syntax : ARGUMENT Type | RESULT Type | PARAMETER Type ''' t[0] = { get_class_fieled(t[1]) : t[2] } def p_cls_syntax_5 (t): 'cls_syntax : CODE Value' fld = get_class_fieled(t[1]); t[0] = { fld : t[2] } if isinstance(t[2], ChoiceValue): fldt = fld + '.' + t[2].choice t[0][fldt] = t[2] def p_cls_syntax_6 (t): '''cls_syntax : ARGUMENT Type OPTIONAL BooleanValue | RESULT Type OPTIONAL BooleanValue | PARAMETER Type OPTIONAL BooleanValue ''' t[0] = { get_class_fieled(t[1]) : t[2], get_class_fieled(' '.join((t[1], t[3]))) : t[4] } # 12 Information object set definition and assignment # 12.1 def p_ObjectSetAssignment (t): 'ObjectSetAssignment : UCASE_IDENT CLASS_IDENT ASSIGNMENT ObjectSet' t[0] = Node('ObjectSetAssignment', name=t[1], cls=t[2], val=t[4]) # 12.3 def p_ObjectSet (t): 'ObjectSet : lbraceignore rbraceignore' t[0] = None # 14 Notation for the object class field type --------------------------------- # 14.1 def p_ObjectClassFieldType (t): 'ObjectClassFieldType : DefinedObjectClass DOT FieldName' t[0] = get_type_from_class(t[1], t[3]) # 14.6 def p_ObjectClassFieldValue (t): '''ObjectClassFieldValue : OpenTypeFieldVal''' t[0] = t[1] def p_OpenTypeFieldVal (t): '''OpenTypeFieldVal : Type COLON Value | NullType COLON NullValue''' t[0] = t[3] # 15 Information from objects ------------------------------------------------- # 15.1 def p_ValueFromObject (t): 'ValueFromObject : LCASE_IDENT DOT FieldName' t[0] = t[1] + '.' + t[3] # Annex C - The instance-of type ---------------------------------------------- # C.2 def p_InstanceOfType (t): 'InstanceOfType : INSTANCE OF DefinedObjectClass' t[0] = InstanceOfType() # --- tables --- useful_object_class_types = { # Annex A 'TYPE-IDENTIFIER.&id' : lambda : ObjectIdentifierType(), 'TYPE-IDENTIFIER.&Type' : lambda : OpenType(), # Annex B 'ABSTRACT-SYNTAX.&id' : lambda : ObjectIdentifierType(), 'ABSTRACT-SYNTAX.&Type' : lambda : OpenType(), 'ABSTRACT-SYNTAX.&property' : lambda : BitStringType(), } object_class_types = { } object_class_typerefs = { } object_class_classrefs = { } # dummy types class _VariableTypeValueFieldSpec (AnyType): pass class _FixedTypeValueSetFieldSpec (AnyType): pass class_types_creator = { 'BooleanType' : lambda : BooleanType(), 'IntegerType' : lambda : IntegerType(), 'ObjectIdentifierType' : lambda : ObjectIdentifierType(), 'OpenType' : lambda : OpenType(), # dummy types '_VariableTypeValueFieldSpec' : lambda : _VariableTypeValueFieldSpec(), '_FixedTypeValueSetFieldSpec' : lambda : _FixedTypeValueSetFieldSpec(), } class_names = { } x681_syntaxes = { 'TYPE-IDENTIFIER' : { ' ' : '&Type', 'IDENTIFIED' : 'IDENTIFIED', #'BY' : 'BY', 'IDENTIFIED BY' : '&id', }, 'ABSTRACT-SYNTAX' : { ' ' : '&Type', 'IDENTIFIED' : 'IDENTIFIED', #'BY' : 'BY', 'IDENTIFIED BY' : '&id', 'HAS' : 'HAS', 'PROPERTY' : 'PROPERTY', 'HAS PROPERTY' : '&property', }, } class_syntaxes_enabled = { 'TYPE-IDENTIFIER' : True, 'ABSTRACT-SYNTAX' : True, } class_syntaxes = { 'TYPE-IDENTIFIER' : x681_syntaxes['TYPE-IDENTIFIER'], 'ABSTRACT-SYNTAX' : x681_syntaxes['ABSTRACT-SYNTAX'], } class_current_syntax = None def get_syntax_tokens(syntaxes): tokens = { } for s in (syntaxes): for k in (list(syntaxes[s].keys())): if k.find(' ') < 0: tokens[k] = k tokens[k] = tokens[k].replace('-', '_') return list(tokens.values()) tokens = tokens + get_syntax_tokens(x681_syntaxes) def set_class_syntax(syntax): global class_syntaxes_enabled global class_current_syntax #print "set_class_syntax", syntax, class_current_syntax if class_syntaxes_enabled.get(syntax, False): class_current_syntax = syntax return True else: class_current_syntax = None return False def is_class_syntax(name): global class_syntaxes global class_current_syntax #print "is_class_syntax", name, class_current_syntax if not class_current_syntax: return False return name in class_syntaxes[class_current_syntax] def get_class_fieled(name): if not class_current_syntax: return None return class_syntaxes[class_current_syntax][name] def is_class_ident(name): return name in class_names def add_class_ident(name): #print "add_class_ident", name class_names[name] = name def get_type_from_class(cls, fld): flds = fld.split('.') if (isinstance(cls, Class_Ref)): key = cls.val + '.' + flds[0] else: key = cls + '.' + flds[0] if key in object_class_classrefs: return get_type_from_class(object_class_classrefs[key], '.'.join(flds[1:])) if key in object_class_typerefs: return Type_Ref(val=object_class_typerefs[key]) creator = lambda : AnyType() creator = useful_object_class_types.get(key, creator) creator = object_class_types.get(key, creator) return creator() def set_type_to_class(cls, fld, pars): #print "set_type_to_class", cls, fld, pars key = cls + '.' + fld typename = 'OpenType' if (len(pars) > 0): typename = pars[0] else: pars.append(typename) typeref = None if (len(pars) > 1): if (isinstance(pars[1], Class_Ref)): pars[1] = pars[1].val typeref = pars[1] msg = None if key in object_class_types: msg = object_class_types[key]().type if key in object_class_typerefs: msg = "TypeReference " + object_class_typerefs[key] if key in object_class_classrefs: msg = "ClassReference " + object_class_classrefs[key] if msg == ' '.join(pars): msg = None if msg: msg0 = "Can not define CLASS field %s as '%s'\n" % (key, ' '.join(pars)) msg1 = "Already defined as '%s'" % (msg) raise CompError(msg0 + msg1) if (typename == 'ClassReference'): if not typeref: return False object_class_classrefs[key] = typeref return True if (typename == 'TypeReference'): if not typeref: return False object_class_typerefs[key] = typeref return True creator = class_types_creator.get(typename) if creator: object_class_types[key] = creator return True else: return False def import_class_from_module(mod, cls): add_class_ident(cls) mcls = "$%s$%s" % (mod, cls) for k in list(object_class_classrefs.keys()): kk = k.split('.', 1) if kk[0] == mcls: object_class_classrefs[cls + '.' + kk[0]] = object_class_classrefs[k] for k in list(object_class_typerefs.keys()): kk = k.split('.', 1) if kk[0] == mcls: object_class_typerefs[cls + '.' + kk[0]] = object_class_typerefs[k] for k in list(object_class_types.keys()): kk = k.split('.', 1) if kk[0] == mcls: object_class_types[cls + '.' + kk[0]] = object_class_types[k] #--- ITU-T Recommendation X.682 ----------------------------------------------- # 8 General constraint specification ------------------------------------------ # 8.1 def p_GeneralConstraint (t): '''GeneralConstraint : UserDefinedConstraint | TableConstraint | ContentsConstraint''' t[0] = t[1] # 9 User-defined constraints -------------------------------------------------- # 9.1 def p_UserDefinedConstraint (t): 'UserDefinedConstraint : CONSTRAINED BY LBRACE UserDefinedConstraintParameterList RBRACE' t[0] = Constraint(type = 'UserDefined', subtype = t[4]) def p_UserDefinedConstraintParameterList_1 (t): 'UserDefinedConstraintParameterList : ' t[0] = [] def p_UserDefinedConstraintParameterList_2 (t): 'UserDefinedConstraintParameterList : UserDefinedConstraintParameter' t[0] = [t[1]] def p_UserDefinedConstraintParameterList_3 (t): 'UserDefinedConstraintParameterList : UserDefinedConstraintParameterList COMMA UserDefinedConstraintParameter' t[0] = t[1] + [t[3]] # 9.3 def p_UserDefinedConstraintParameter (t): 'UserDefinedConstraintParameter : Type' t[0] = t[1] # 10 Table constraints, including component relation constraints -------------- # 10.3 def p_TableConstraint (t): '''TableConstraint : SimpleTableConstraint | ComponentRelationConstraint''' t[0] = Constraint(type = 'Table', subtype = t[1]) def p_SimpleTableConstraint (t): 'SimpleTableConstraint : LBRACE UCASE_IDENT RBRACE' t[0] = t[2] # 10.7 def p_ComponentRelationConstraint (t): 'ComponentRelationConstraint : LBRACE UCASE_IDENT RBRACE LBRACE AtNotations RBRACE' t[0] = t[2] + str(t[5]) def p_AtNotations_1 (t): 'AtNotations : AtNotation' t[0] = [t[1]] def p_AtNotations_2 (t): 'AtNotations : AtNotations COMMA AtNotation' t[0] = t[1] + [t[3]] def p_AtNotation_1 (t): 'AtNotation : AT ComponentIdList' t[0] = '@' + t[2] def p_AtNotation_2 (t): 'AtNotation : AT DOT Level ComponentIdList' t[0] = '@.' + t[3] + t[4] def p_Level_1 (t): 'Level : DOT Level' t[0] = '.' + t[2] def p_Level_2 (t): 'Level : ' t[0] = '' def p_ComponentIdList_1 (t): 'ComponentIdList : LCASE_IDENT' t[0] = t[1] def p_ComponentIdList_2 (t): 'ComponentIdList : ComponentIdList DOT LCASE_IDENT' t[0] = t[1] + '.' + t[3] # 11 Contents constraints ----------------------------------------------------- # 11.1 def p_ContentsConstraint (t): 'ContentsConstraint : CONTAINING type_ref' t[0] = Constraint(type = 'Contents', subtype = t[2]) #--- ITU-T Recommendation X.683 ----------------------------------------------- # 8 Parameterized assignments ------------------------------------------------- # 8.1 def p_ParameterizedAssignment (t): '''ParameterizedAssignment : ParameterizedTypeAssignment | ParameterizedObjectClassAssignment | ParameterizedObjectAssignment | ParameterizedObjectSetAssignment''' t[0] = t[1] # 8.2 def p_ParameterizedTypeAssignment (t): 'ParameterizedTypeAssignment : UCASE_IDENT ParameterList ASSIGNMENT Type' t[0] = t[4] t[0].SetName(t[1]) # t[0].SetName(t[1] + 'xxx') def p_ParameterizedObjectClassAssignment (t): '''ParameterizedObjectClassAssignment : CLASS_IDENT ParameterList ASSIGNMENT ObjectClass | UCASE_IDENT ParameterList ASSIGNMENT ObjectClass''' t[0] = t[4] t[0].SetName(t[1]) if isinstance(t[0], ObjectClassDefn): t[0].reg_types() def p_ParameterizedObjectAssignment (t): 'ParameterizedObjectAssignment : objectreference ParameterList DefinedObjectClass ASSIGNMENT Object' t[0] = ObjectAssignment (ident = t[1], cls=t[3].val, val=t[5]) global obj_class obj_class = None def p_ParameterizedObjectSetAssignment (t): 'ParameterizedObjectSetAssignment : UCASE_IDENT ParameterList DefinedObjectClass ASSIGNMENT ObjectSet' t[0] = Node('ObjectSetAssignment', name=t[1], cls=t[3].val, val=t[5]) # 8.3 def p_ParameterList (t): 'ParameterList : lbraceignore rbraceignore' #def p_ParameterList (t): # 'ParameterList : LBRACE Parameters RBRACE' # t[0] = t[2] #def p_Parameters_1 (t): # 'Parameters : Parameter' # t[0] = [t[1]] #def p_Parameters_2 (t): # 'Parameters : Parameters COMMA Parameter' # t[0] = t[1] + [t[3]] #def p_Parameter_1 (t): # 'Parameter : Type COLON Reference' # t[0] = [t[1], t[3]] #def p_Parameter_2 (t): # 'Parameter : Reference' # t[0] = t[1] # 9 Referencing parameterized definitions ------------------------------------- # 9.1 def p_ParameterizedReference (t): 'ParameterizedReference : Reference LBRACE RBRACE' t[0] = t[1] #t[0].val += 'xxx' # 9.2 def p_ParameterizedType (t): 'ParameterizedType : type_ref ActualParameterList' t[0] = t[1] #t[0].val += 'xxx' def p_ParameterizedObjectClass (t): 'ParameterizedObjectClass : DefinedObjectClass ActualParameterList' t[0] = t[1] #t[0].val += 'xxx' def p_ParameterizedObject (t): 'ParameterizedObject : DefinedObject ActualParameterList' t[0] = t[1] #t[0].val += 'xxx' # 9.5 def p_ActualParameterList (t): 'ActualParameterList : lbraceignore rbraceignore' #def p_ActualParameterList (t): # 'ActualParameterList : LBRACE ActualParameters RBRACE' # t[0] = t[2] #def p_ActualParameters_1 (t): # 'ActualParameters : ActualParameter' # t[0] = [t[1]] #def p_ActualParameters_2 (t): # 'ActualParameters : ActualParameters COMMA ActualParameter' # t[0] = t[1] + [t[3]] #def p_ActualParameter (t): # '''ActualParameter : Type # | Value''' # t[0] = t[1] #--- ITU-T Recommendation X.880 ----------------------------------------------- x880_classes = { 'OPERATION' : { '&ArgumentType' : [], '&argumentTypeOptional' : [ 'BooleanType' ], '&returnResult' : [ 'BooleanType' ], '&ResultType' : [], '&resultTypeOptional' : [ 'BooleanType' ], '&Errors' : [ 'ClassReference', 'ERROR' ], '&Linked' : [ 'ClassReference', 'OPERATION' ], '&synchronous' : [ 'BooleanType' ], '&idempotent' : [ 'BooleanType' ], '&alwaysReturns' : [ 'BooleanType' ], '&InvokePriority' : [ '_FixedTypeValueSetFieldSpec' ], '&ResultPriority' : [ '_FixedTypeValueSetFieldSpec' ], '&operationCode' : [ 'TypeReference', 'Code' ], }, 'ERROR' : { '&ParameterType' : [], '&parameterTypeOptional' : [ 'BooleanType' ], '&ErrorPriority' : [ '_FixedTypeValueSetFieldSpec' ], '&errorCode' : [ 'TypeReference', 'Code' ], }, 'OPERATION-PACKAGE' : { '&Both' : [ 'ClassReference', 'OPERATION' ], '&Consumer' : [ 'ClassReference', 'OPERATION' ], '&Supplier' : [ 'ClassReference', 'OPERATION' ], '&id' : [ 'ObjectIdentifierType' ], }, 'CONNECTION-PACKAGE' : { '&bind' : [ 'ClassReference', 'OPERATION' ], '&unbind' : [ 'ClassReference', 'OPERATION' ], '&responderCanUnbind' : [ 'BooleanType' ], '&unbindCanFail' : [ 'BooleanType' ], '&id' : [ 'ObjectIdentifierType' ], }, 'CONTRACT' : { '&connection' : [ 'ClassReference', 'CONNECTION-PACKAGE' ], '&OperationsOf' : [ 'ClassReference', 'OPERATION-PACKAGE' ], '&InitiatorConsumerOf' : [ 'ClassReference', 'OPERATION-PACKAGE' ], '&InitiatorSupplierOf' : [ 'ClassReference', 'OPERATION-PACKAGE' ], '&id' : [ 'ObjectIdentifierType' ], }, 'ROS-OBJECT-CLASS' : { '&Is' : [ 'ClassReference', 'ROS-OBJECT-CLASS' ], '&Initiates' : [ 'ClassReference', 'CONTRACT' ], '&Responds' : [ 'ClassReference', 'CONTRACT' ], '&InitiatesAndResponds' : [ 'ClassReference', 'CONTRACT' ], '&id' : [ 'ObjectIdentifierType' ], }, } x880_syntaxes = { 'OPERATION' : { 'ARGUMENT' : '&ArgumentType', 'ARGUMENT OPTIONAL' : '&argumentTypeOptional', 'RESULT' : '&ResultType', 'RESULT OPTIONAL' : '&resultTypeOptional', 'RETURN' : 'RETURN', 'RETURN RESULT' : '&returnResult', 'ERRORS' : '&Errors', 'LINKED' : '&Linked', 'SYNCHRONOUS' : '&synchronous', 'IDEMPOTENT' : '&idempotent', 'ALWAYS' : 'ALWAYS', 'RESPONDS' : 'RESPONDS', 'ALWAYS RESPONDS' : '&alwaysReturns', 'INVOKE' : 'INVOKE', 'PRIORITY' : 'PRIORITY', 'INVOKE PRIORITY' : '&InvokePriority', 'RESULT-PRIORITY': '&ResultPriority', 'CODE' : '&operationCode', }, 'ERROR' : { 'PARAMETER' : '&ParameterType', 'PARAMETER OPTIONAL' : '&parameterTypeOptional', 'PRIORITY' : '&ErrorPriority', 'CODE' : '&errorCode', }, # 'OPERATION-PACKAGE' : { # }, # 'CONNECTION-PACKAGE' : { # }, # 'CONTRACT' : { # }, # 'ROS-OBJECT-CLASS' : { # }, } def x880_module_begin(): #print "x880_module_begin()" for name in list(x880_classes.keys()): add_class_ident(name) def x880_import(name): if name in x880_syntaxes: class_syntaxes_enabled[name] = True class_syntaxes[name] = x880_syntaxes[name] if name in x880_classes: add_class_ident(name) for f in (list(x880_classes[name].keys())): set_type_to_class(name, f, x880_classes[name][f]) tokens = tokens + get_syntax_tokens(x880_syntaxes) # {...} OID value #def p_lbrace_oid(t): # 'lbrace_oid : brace_oid_begin LBRACE' # t[0] = t[1] #def p_brace_oid_begin(t): # 'brace_oid_begin : ' # global in_oid # in_oid = True #def p_rbrace_oid(t): # 'rbrace_oid : brace_oid_end RBRACE' # t[0] = t[2] #def p_brace_oid_end(t): # 'brace_oid_end : ' # global in_oid # in_oid = False # {...} block to be ignored def p_lbraceignore(t): 'lbraceignore : braceignorebegin LBRACE' t[0] = t[1] def p_braceignorebegin(t): 'braceignorebegin : ' global lexer lexer.level = 1 lexer.push_state('braceignore') def p_rbraceignore(t): 'rbraceignore : braceignoreend RBRACE' t[0] = t[2] def p_braceignoreend(t): 'braceignoreend : ' global lexer lexer.pop_state() def p_error(t): global input_file raise ParseError(t, input_file) def p_pyquote (t): '''pyquote : PYQUOTE''' t[0] = PyQuote (val = t[1]) def testlex (s): lexer.input (s) while True: token = lexer.token () if not token: break print(token) def do_module (ast, defined_dict): assert (ast.type == 'Module') ctx = Ctx (defined_dict) print(ast.to_python (ctx)) print(ctx.output_assignments ()) print(ctx.output_pyquotes ()) def eth_do_module (ast, ectx): assert (ast.type == 'Module') if ectx.dbg('s'): print(ast.str_depth(0)) ast.to_eth(ectx) def testyacc(s, fn, defined_dict): ast = yacc.parse(s, debug=0) time_str = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()) print("""#!/usr/bin/env python # Auto-generated from %s at %s from PyZ3950 import asn1""" % (fn, time_str)) for module in ast: eth_do_module (module, defined_dict) # Wireshark compiler def eth_usage(): print(""" asn2wrs [-h|?] [-d dbg] [-b] [-p proto] [-c cnf_file] [-e] input_file(s) ... -h|? : Usage -b : BER (default is PER) -u : Unaligned (default is aligned) -p proto : Protocol name (implies -S). Default is module-name from input_file (renamed by #.MODULE if present) -o name : Output files name core (default is <proto>) -O dir : Output directory for dissector -c cnf_file : Conformance file -I path : Path for conformance file includes -e : Create conformance file for exported types -E : Just create conformance file for exported types -S : Single output for multiple modules -s template : Single file output (template is input file without .c/.h extension) -k : Keep intermediate files though single file output is used -L : Suppress #line directive from .cnf file -D dir : Directory for input_file(s) (default: '.') -C : Add check for SIZE constraints -r prefix : Remove the prefix from type names input_file(s) : Input ASN.1 file(s) -d dbg : Debug output, dbg = [l][y][p][s][a][t][c][m][o] l - lex y - yacc p - parsing s - internal ASN.1 structure a - list of assignments t - tables c - conformance values m - list of compiled modules with dependency o - list of output files """) def eth_main(): global input_file global g_conform global lexer print("ASN.1 to Wireshark dissector compiler"); try: opts, args = getopt.getopt(sys.argv[1:], "h?d:D:buXp:FTo:O:c:I:eESs:kLCr:"); except getopt.GetoptError: eth_usage(); sys.exit(2) if len(args) < 1: eth_usage(); sys.exit(2) conform = EthCnf() conf_to_read = None output = EthOut() ectx = EthCtx(conform, output) ectx.encoding = 'per' ectx.proto_opt = None ectx.fld_opt = {} ectx.tag_opt = False ectx.outnm_opt = None ectx.aligned = True ectx.dbgopt = '' ectx.new = True ectx.expcnf = False ectx.justexpcnf = False ectx.merge_modules = False ectx.group_by_prot = False ectx.conform.last_group = 0 ectx.conform.suppress_line = False; ectx.output.outnm = None ectx.output.single_file = None ectx.constraints_check = False; for o, a in opts: if o in ("-h", "-?"): eth_usage(); sys.exit(2) if o in ("-c",): conf_to_read = relpath(a) if o in ("-I",): ectx.conform.include_path.append(relpath(a)) if o in ("-E",): ectx.expcnf = True ectx.justexpcnf = True if o in ("-D",): ectx.srcdir = relpath(a) if o in ("-C",): ectx.constraints_check = True if o in ("-L",): ectx.suppress_line = True if o in ("-X",): warnings.warn("Command line option -X is obsolete and can be removed") if o in ("-T",): warnings.warn("Command line option -T is obsolete and can be removed") if conf_to_read: ectx.conform.read(conf_to_read) for o, a in opts: if o in ("-h", "-?", "-c", "-I", "-E", "-D", "-C", "-X", "-T"): pass # already processed else: par = [] if a: par.append(a) ectx.conform.set_opt(o, par, "commandline", 0) (ld, yd, pd) = (0, 0, 0); if ectx.dbg('l'): ld = 1 if ectx.dbg('y'): yd = 1 if ectx.dbg('p'): pd = 2 lexer = lex.lex(debug=ld) parser = yacc.yacc(method='LALR', debug=yd, outputdir='.') parser.defaulted_states = {} g_conform = ectx.conform ast = [] for fn in args: input_file = fn lexer.lineno = 1 if (ectx.srcdir): fn = ectx.srcdir + '/' + fn # Read ASN.1 definition, trying one of the common encodings. data = open(fn, "rb").read() for encoding in ('utf-8', 'windows-1252'): try: data = data.decode(encoding) break except Exception: warnings.warn_explicit("Decoding %s as %s failed, trying next." % (fn, encoding), UserWarning, '', 0) # Py2 compat, name.translate in eth_output_hf_arr fails with unicode if not isinstance(data, str): data = data.encode('utf-8') ast.extend(yacc.parse(data, lexer=lexer, debug=pd)) ectx.eth_clean() if (ectx.merge_modules): # common output for all module ectx.eth_clean() for module in ast: eth_do_module(module, ectx) ectx.eth_prepare() ectx.eth_do_output() elif (ectx.groups()): # group by protocols/group groups = [] pr2gr = {} if (ectx.group_by_prot): # group by protocols for module in ast: prot = module.get_proto(ectx) if prot not in pr2gr: pr2gr[prot] = len(groups) groups.append([]) groups[pr2gr[prot]].append(module) else: # group by groups pass for gm in (groups): ectx.eth_clean() for module in gm: eth_do_module(module, ectx) ectx.eth_prepare() ectx.eth_do_output() else: # output for each module for module in ast: ectx.eth_clean() eth_do_module(module, ectx) ectx.eth_prepare() ectx.eth_do_output() if ectx.dbg('m'): ectx.dbg_modules() if ectx.dbg('c'): ectx.conform.dbg_print() if not ectx.justexpcnf: ectx.conform.unused_report() if ectx.dbg('o'): ectx.output.dbg_print() ectx.output.make_single_file(ectx.suppress_line) # Python compiler def main(): if sys.version_info[0] < 3: print("This requires Python 3") sys.exit(2) testfn = testyacc if len (sys.argv) == 1: while True: s = eval(input ('Query: ')) if len (s) == 0: break testfn (s, 'console', {}) else: defined_dict = {} for fn in sys.argv [1:]: f = open (fn, "r") testfn (f.read (), fn, defined_dict) f.close () lexer.lineno = 1 #--- BODY --------------------------------------------------------------------- if __name__ == '__main__': if (os.path.splitext(os.path.basename(sys.argv[0]))[0].lower() in ('asn2wrs', 'asn2eth')): eth_main() else: main() #------------------------------------------------------------------------------ # # Editor modelines - https://www.wireshark.org/tools/modelines.html # # c-basic-offset: 4; tab-width: 8; indent-tabs-mode: nil # vi: set shiftwidth=4 tabstop=8 expandtab: # :indentSize=4:tabSize=8:noTabs=true:
Shell Script
wireshark/tools/bsd-setup.sh
#!/usr/bin/env sh # Setup development environment on BSD-like platforms. # # Tested on: FreeBSD, OpenBSD, NetBSD. # # Wireshark - Network traffic analyzer # By Gerald Combs <[email protected]> # Copyright 1998 Gerald Combs # # SPDX-License-Identifier: GPL-2.0-or-later # # We drag in tools that might not be needed by all users; it's easier # that way. # # We do not use Bash as the shell for this script, and use the POSIX # syntax for function definition rather than the # "function <name>() { ... }" syntax, as FreeBSD 13, at least, does # not have Bash, and its /bin/sh doesn't support the other syntax. # print_usage() { printf "\\nUtility to setup a bsd-based system for Wireshark Development.\\n" printf "The basic usage installs the needed software\\n\\n" printf "Usage: $0 [--install-optional] [...other options...]\\n" printf "\\t--install-optional: install optional software as well\\n" printf "\\t[other]: other options are passed as-is to pkg manager.\\n" } ADDITIONAL=0 OPTIONS= for arg; do case $arg in --help) print_usage exit 0 ;; --install-optional) ADDITIONAL=1 ;; *) OPTIONS="$OPTIONS $arg" ;; esac done # Check if the user is root if [ $(id -u) -ne 0 ] then echo "You must be root." exit 1 fi BASIC_LIST="\ cmake \ qt6 \ git \ pcre2 \ speexdsp" ADDITIONAL_LIST="\ gettext-tools \ snappy \ bcg729 \ libssh \ libmaxminddb \ libsmi \ brotli \ zstd \ lua52 \ " # Uncomment to add PNG compression utilities used by compress-pngs: # ADDITIONAL_LIST="$ADDITIONAL_LIST \ # advancecomp \ # optipng \ # pngcrush" # Guess which package manager we will use PM=`which pkgin 2> /dev/null || which pkg 2> /dev/null || which pkg_add 2> /dev/null` case $PM in */pkgin) PM_OPTIONS="install" PM_SEARCH="pkgin search" PM_MUST_GLOB=no ;; */pkg) PM_OPTIONS="install" PM_SEARCH="pkg search" PM_MUST_GLOB=yes ;; */pkg_add) PM_OPTIONS="" PM_SEARCH="pkg_info" PM_MUST_GLOB=no ;; esac echo "Using $PM ($PM_SEARCH)" # Adds package $2 to list variable $1 if the package is found add_package() { local list="$1" pkgname="$2" # fail if the package is not known if [ "$PM_MUST_GLOB" = yes ] then # # We need to do a glob search, with a "*" at the # end, so we only find packages that *begin* with # the name; otherwise, searching for pkg-config # could find packages that *don't* begin with # pkg-config, but have it later in the name # (FreeBSD 11 has one such package), so when # we then try to install it, that fails. Doing # an *exact* search fails, as that requires that # the package name include the version number. # $PM_SEARCH -g "$pkgname*" > /dev/null 2>&1 || return 1 else $PM_SEARCH "$pkgname" > /dev/null 2>&1 || return 1 fi # package is found, append it to list eval "${list}=\"\${${list}} \${pkgname}\"" } # pkg-config: NetBSD # pkgconf: FreeBSD add_package BASIC_LIST pkg-config || add_package BASIC_LIST pkgconf || echo "pkg-config is unavailable" # c-ares: FreeBSD # libcares: OpenBSD add_package BASIC_LIST c-ares || add_package BASIC_LIST libcares || echo "c-ares is unavailable" # rubygem-asciidoctor: FreeBSD add_package ADDITIONAL_LIST rubygem-asciidoctor || echo "asciidoctor is unavailable" # liblz4: FreeBSD # lz4: NetBSD add_package ADDITIONAL_LIST liblz4 || add_package ADDITIONAL_LIST lz4 || echo "lz4 is unavailable" # nghttp2: NetBSD add_package ADDITIONAL_LIST nghttp2 || echo "nghttp2 is unavailable" # spandsp: NetBSD add_package ADDITIONAL_LIST spandsp || echo "spandsp is unavailable" # ninja: FreeBSD, OpenBSD # ninja-build: NetBSD add_package ADDITIONAL_LIST ninja-build || add_package ADDITIONAL_LIST ninja || echo "ninja is unavailable" # libilbc: FreeBSD add_package ADDITIONAL_LIST libilbc || echo "libilbc is unavailable" # Add OS-specific required/optional packages # Those not listed don't require additions. case `uname` in FreeBSD | NetBSD) add_package ADDITIONAL_LIST libgcrypt || echo "libgcrypt is unavailable" ;; esac ACTUAL_LIST=$BASIC_LIST # Now arrange for optional support libraries if [ $ADDITIONAL -ne 0 ] then ACTUAL_LIST="$ACTUAL_LIST $ADDITIONAL_LIST" fi $PM $PM_OPTIONS $ACTUAL_LIST $OPTIONS if [ ! $? ] then exit 2 fi if [ $ADDITIONAL -eq 0 ] then echo -e "\n*** Optional packages not installed. Rerun with --install-optional to have them.\n" fi
Perl
wireshark/tools/checkAPIs.pl
#!/usr/bin/env perl # # Copyright 2006, Jeff Morriss <jeff.morriss.ws[AT]gmail.com> # # A simple tool to check source code for function calls that should not # be called by Wireshark code and to perform certain other checks. # # Usage: # checkAPIs.pl [-M] [-g group1] [-g group2] ... # [-s summary-group1] [-s summary-group2] ... # [--nocheck-hf] # [--nocheck-value-string-array] # [--nocheck-shadow] # [--debug] # file1 file2 ... # # Wireshark - Network traffic analyzer # By Gerald Combs <[email protected]> # Copyright 1998 Gerald Combs # # SPDX-License-Identifier: GPL-2.0-or-later # use strict; use Encode; use English; use Getopt::Long; use Text::Balanced qw(extract_bracketed); my %APIs = ( # API groups. # Group name, e.g. 'prohibited' # '<name>' => { # 'count_errors' => 1, # 1 if these are errors, 0 if warnings # 'functions' => [ 'f1', 'f2', ...], # Function array # 'function-counts' => {'f1',0, 'f2',0, ...}, # Function Counts hash (initialized in the code) # } # # APIs that MUST NOT be used in Wireshark 'prohibited' => { 'count_errors' => 1, 'functions' => [ # Memory-unsafe APIs # Use something that won't overwrite the end of your buffer instead # of these. # # Microsoft provides lists of unsafe functions and their # recommended replacements in "Security Development Lifecycle # (SDL) Banned Function Calls" # https://docs.microsoft.com/en-us/previous-versions/bb288454(v=msdn.10) # and "Deprecated CRT Functions" # https://docs.microsoft.com/en-us/previous-versions/ms235384(v=vs.100) # 'atoi', # use wsutil/strtoi.h functions 'gets', 'sprintf', 'g_sprintf', 'vsprintf', 'g_vsprintf', 'strcpy', 'strncpy', 'strcat', 'strncat', 'cftime', 'ascftime', ### non-portable APIs # use glib (g_*) versions instead of these: 'ntohl', 'ntohs', 'htonl', 'htons', 'strdup', 'strndup', # Windows doesn't have this; use g_ascii_strtoull() instead 'strtoull', ### non-portable: fails on Windows Wireshark built with VC newer than VC6 # See https://gitlab.com/wireshark/wireshark/-/issues/6695#note_400659130 'g_fprintf', 'g_vfprintf', # use native snprintf() and vsnprintf() instead of these: 'g_snprintf', 'g_vsnprintf', ### non-ANSI C # use memset, memcpy, memcmp instead of these: 'bzero', 'bcopy', 'bcmp', # The MSDN page for ZeroMemory recommends SecureZeroMemory # instead. 'ZeroMemory', # use wmem_*, ep_*, or g_* functions instead of these: # (One thing to be aware of is that space allocated with malloc() # may not be freeable--at least on Windows--with g_free() and # vice-versa.) 'malloc', 'calloc', 'realloc', 'valloc', 'free', 'cfree', # Locale-unsafe APIs # These may have unexpected behaviors in some locales (e.g., # "I" isn't always the upper-case form of "i", and "i" isn't # always the lower-case form of "I"). Use the g_ascii_* version # instead. 'isalnum', 'isascii', 'isalpha', 'iscntrl', 'isdigit', 'islower', 'isgraph', 'isprint', 'ispunct', 'isspace', 'isupper', 'isxdigit', 'tolower', 'atof', 'strtod', 'strcasecmp', 'strncasecmp', # Deprecated in glib 2.68 in favor of g_memdup2 # We have our local implementation for older versions 'g_memdup', 'g_strcasecmp', 'g_strncasecmp', 'g_strup', 'g_strdown', 'g_string_up', 'g_string_down', 'strerror', # use g_strerror # Use the ws_* version of these: # (Necessary because on Windows we use UTF8 for throughout the code # so we must tweak that to UTF16 before operating on the file. Code # using these functions will work unless the file/path name contains # non-ASCII chars.) 'open', 'rename', 'mkdir', 'stat', 'unlink', 'remove', 'fopen', 'freopen', 'fstat', 'lseek', # Misc 'tmpnam', # use mkstemp '_snwprintf' # use StringCchPrintf ] }, ### Soft-Deprecated functions that should not be used in new code but # have not been entirely removed from old code. These will become errors # once they've been removed from all existing code. 'soft-deprecated' => { 'count_errors' => 0, 'functions' => [ 'tvb_length_remaining', # replaced with tvb_captured_length_remaining # Locale-unsafe APIs # These may have unexpected behaviors in some locales (e.g., # "I" isn't always the upper-case form of "i", and "i" isn't # always the lower-case form of "I"). Use the g_ascii_* version # instead. 'toupper' ] }, # APIs that SHOULD NOT be used in Wireshark (any more) 'deprecated' => { 'count_errors' => 1, 'functions' => [ 'perror', # Use g_strerror() and report messages in whatever # fashion is appropriate for the code in question. 'ctime', # Use abs_time_secs_to_str() 'next_tvb_add_port', # Use next_tvb_add_uint() (and a matching change # of NTVB_PORT -> NTVB_UINT) ### Deprecated GLib/GObject functions/macros # (The list is based upon the GLib 2.30.2 & GObject 2.30.2 documentation; # An entry may be commented out if it is currently # being used in Wireshark and if the replacement functionality # is not available in all the GLib versions that Wireshark # currently supports. # Note: Wireshark currently (Jan 2012) requires GLib 2.14 or newer. # The Wireshark build currently (Jan 2012) defines G_DISABLE_DEPRECATED # so use of any of the following should cause the Wireshark build to fail and # therefore the tests for obsolete GLib function usage in checkAPIs should not be needed. 'G_ALLOC_AND_FREE', 'G_ALLOC_ONLY', 'g_allocator_free', # "use slice allocator" (avail since 2.10,2.14) 'g_allocator_new', # "use slice allocator" (avail since 2.10,2.14) 'g_async_queue_ref_unlocked', # g_async_queue_ref() (OK since 2.8) 'g_async_queue_unref_and_unlock', # g_async_queue_unref() (OK since 2.8) 'g_atomic_int_exchange_and_add', # since 2.30 'g_basename', 'g_blow_chunks', # "use slice allocator" (avail since 2.10,2.14) 'g_cache_value_foreach', # g_cache_key_foreach() 'g_chunk_free', # g_slice_free (avail since 2.10) 'g_chunk_new', # g_slice_new (avail since 2.10) 'g_chunk_new0', # g_slice_new0 (avail since 2.10) 'g_completion_add_items', # since 2.26 'g_completion_clear_items', # since 2.26 'g_completion_complete', # since 2.26 'g_completion_complete_utf8', # since 2.26 'g_completion_free', # since 2.26 'g_completion_new', # since 2.26 'g_completion_remove_items', # since 2.26 'g_completion_set_compare', # since 2.26 'G_CONST_RETURN', # since 2.26 'g_date_set_time', # g_date_set_time_t (avail since 2.10) 'g_dirname', 'g_format_size_for_display', # since 2.30: use g_format_size() 'G_GNUC_FUNCTION', 'G_GNUC_PRETTY_FUNCTION', 'g_hash_table_freeze', 'g_hash_table_thaw', 'G_HAVE_GINT64', 'g_io_channel_close', 'g_io_channel_read', 'g_io_channel_seek', 'g_io_channel_write', 'g_list_pop_allocator', # "does nothing since 2.10" 'g_list_push_allocator', # "does nothing since 2.10" 'g_main_destroy', 'g_main_is_running', 'g_main_iteration', 'g_main_new', 'g_main_pending', 'g_main_quit', 'g_main_run', 'g_main_set_poll_func', 'g_mapped_file_free', # [as of 2.22: use g_map_file_unref] 'g_mem_chunk_alloc', # "use slice allocator" (avail since 2.10) 'g_mem_chunk_alloc0', # "use slice allocator" (avail since 2.10) 'g_mem_chunk_clean', # "use slice allocator" (avail since 2.10) 'g_mem_chunk_create', # "use slice allocator" (avail since 2.10) 'g_mem_chunk_destroy', # "use slice allocator" (avail since 2.10) 'g_mem_chunk_free', # "use slice allocator" (avail since 2.10) 'g_mem_chunk_info', # "use slice allocator" (avail since 2.10) 'g_mem_chunk_new', # "use slice allocator" (avail since 2.10) 'g_mem_chunk_print', # "use slice allocator" (avail since 2.10) 'g_mem_chunk_reset', # "use slice allocator" (avail since 2.10) 'g_node_pop_allocator', # "does nothing since 2.10" 'g_node_push_allocator', # "does nothing since 2.10" 'g_relation_count', # since 2.26 'g_relation_delete', # since 2.26 'g_relation_destroy', # since 2.26 'g_relation_exists', # since 2.26 'g_relation_index', # since 2.26 'g_relation_insert', # since 2.26 'g_relation_new', # since 2.26 'g_relation_print', # since 2.26 'g_relation_select', # since 2.26 'g_scanner_add_symbol', 'g_scanner_remove_symbol', 'g_scanner_foreach_symbol', 'g_scanner_freeze_symbol_table', 'g_scanner_thaw_symbol_table', 'g_slist_pop_allocator', # "does nothing since 2.10" 'g_slist_push_allocator', # "does nothing since 2.10" 'g_source_get_current_time', # since 2.28: use g_source_get_time() 'g_strcasecmp', # 'g_strdown', # 'g_string_down', # 'g_string_sprintf', # use g_string_printf() instead 'g_string_sprintfa', # use g_string_append_printf instead 'g_string_up', # 'g_strncasecmp', # 'g_strup', # 'g_tree_traverse', 'g_tuples_destroy', # since 2.26 'g_tuples_index', # since 2.26 'g_unicode_canonical_decomposition', # since 2.30: use g_unichar_fully_decompose() 'G_UNICODE_COMBINING_MARK', # since 2.30:use G_UNICODE_SPACING_MARK 'g_value_set_boxed_take_ownership', # GObject 'g_value_set_object_take_ownership', # GObject 'g_value_set_param_take_ownership', # GObject 'g_value_set_string_take_ownership', # Gobject 'G_WIN32_DLLMAIN_FOR_DLL_NAME', 'g_win32_get_package_installation_directory', 'g_win32_get_package_installation_subdirectory', 'qVariantFromValue' ] }, 'dissectors-prohibited' => { 'count_errors' => 1, 'functions' => [ # APIs that make the program exit. Dissectors shouldn't call these. 'abort', 'assert', 'assert_perror', 'exit', 'g_assert', 'g_error', ] }, 'dissectors-restricted' => { 'count_errors' => 0, 'functions' => [ # APIs that print to the terminal. Dissectors shouldn't call these. # FIXME: Explain what to use instead. 'printf', 'g_warning', ] }, ); my @apiGroups = qw(prohibited deprecated soft-deprecated); # Defines array of pairs function/variable which are excluded # from prefs_register_*_preference checks my @excludePrefsCheck = ( [ qw(prefs_register_password_preference), '(const char **)arg->pref_valptr' ], [ qw(prefs_register_string_preference), '(const char **)arg->pref_valptr' ], ); # Given a ref to a hash containing "functions" and "functions_count" entries: # Determine if any item of the list of APIs contained in the array referenced by "functions" # exists in the file. # For each API which appears in the file: # Push the API onto the provided list; # Add the number of times the API appears in the file to the total count # for the API (stored as the value of the API key in the hash referenced by "function_counts"). sub findAPIinFile($$$) { my ($groupHashRef, $fileContentsRef, $foundAPIsRef) = @_; for my $api ( @{$groupHashRef->{functions}} ) { my $cnt = 0; # Match function calls, but ignore false positives from: # C++ method definition: int MyClass::open(...) # Method invocation: myClass->open(...); # Function declaration: int open(...); # Method invocation: QString().sprintf(...) while (${$fileContentsRef} =~ m/ \W (?<!::|->|\w\ ) (?<!\.) $api \W* \( /gx) { $cnt += 1; } if ($cnt > 0) { push @{$foundAPIsRef}, $api; $groupHashRef->{function_counts}->{$api} += 1; } } } # APIs which (generally) should not be called with an argument of tvb_get_ptr() my @TvbPtrAPIs = ( # Use NULL for the value_ptr instead of tvb_get_ptr() (only if the # given offset and length are equal) with these: 'proto_tree_add_bytes_format', 'proto_tree_add_bytes_format_value', 'proto_tree_add_ether', # Use the tvb_* version of these: # Use tvb_bytes_to_str[_punct] instead of: 'bytes_to_str', 'bytes_to_str_punct', 'SET_ADDRESS', 'SET_ADDRESS_HF', ); sub checkAPIsCalledWithTvbGetPtr($$$) { my ($APIs, $fileContentsRef, $foundAPIsRef) = @_; for my $api (@{$APIs}) { my @items; my $cnt = 0; @items = (${$fileContentsRef} =~ m/ ($api [^;]* ; ) /xsg); while (@items) { my ($item) = @items; shift @items; if ($item =~ / tvb_get_ptr /xos) { $cnt += 1; } } if ($cnt > 0) { push @{$foundAPIsRef}, $api; } } } # List of possible shadow variable (Majority coming from macOS..) my @ShadowVariable = ( 'index', 'time', 'strlen', 'system' ); sub check_shadow_variable($$$) { my ($groupHashRef, $fileContentsRef, $foundAPIsRef) = @_; for my $api ( @{$groupHashRef} ) { my $cnt = 0; while (${$fileContentsRef} =~ m/ \s $api \s*+ [^\(\w] /gx) { $cnt += 1; } if ($cnt > 0) { push @{$foundAPIsRef}, $api; } } } sub check_snprintf_plus_strlen($$) { my ($fileContentsRef, $filename) = @_; my @items; # This catches both snprintf() and g_snprint. # If we need to do more APIs, we can make this function look more like # checkAPIsCalledWithTvbGetPtr(). @items = (${$fileContentsRef} =~ m/ (snprintf [^;]* ; ) /xsg); while (@items) { my ($item) = @items; shift @items; if ($item =~ / strlen\s*\( /xos) { print STDERR "Warning: ".$filename." uses snprintf + strlen to assemble strings.\n"; last; } } } #### Regex for use when searching for value-string definitions my $StaticRegex = qr/ static \s+ /xs; my $ConstRegex = qr/ const \s+ /xs; my $Static_andor_ConstRegex = qr/ (?: $StaticRegex $ConstRegex | $StaticRegex | $ConstRegex) /xs; my $ValueStringVarnameRegex = qr/ (?:value|val64|string|range|bytes)_string /xs; my $ValueStringRegex = qr/ $Static_andor_ConstRegex ($ValueStringVarnameRegex) \ + [^;*#]+ = [^;]+ [{] .+? [}] \s*? ; /xs; my $EnumValRegex = qr/ $Static_andor_ConstRegex enum_val_t \ + [^;*]+ = [^;]+ [{] .+? [}] \s*? ; /xs; my $NewlineStringRegex = qr/ ["] [^"]* \\n [^"]* ["] /xs; sub check_value_string_arrays($$$) { my ($fileContentsRef, $filename, $debug_flag) = @_; my $cnt = 0; # Brute force check for value_string (and string_string or range_string) arrays # which are missing {0, NULL} as the final (terminating) array entry # Assumption: definition is of form (pseudo-Regex): # " (static const|static|const) (value|string|range)_string .+ = { .+ ;" # (possibly over multiple lines) while (${$fileContentsRef} =~ / ( $ValueStringRegex ) /xsog) { # XXX_string array definition found; check if NULL terminated my $vs = my $vsx = $1; my $type = $2; if ($debug_flag) { $vsx =~ / ( .+ $ValueStringVarnameRegex [^=]+ ) = /xo; printf STDERR "==> %-35.35s: %s\n", $filename, $1; printf STDERR "%s\n", $vs; } $vs =~ s{ \s } {}xg; # Check for expected trailer my $expectedTrailer; my $trailerHint; if ($type eq "string_string") { # XXX shouldn't we reject 0 since it is gchar*? $expectedTrailer = "(NULL|0), NULL"; $trailerHint = "NULL, NULL"; } elsif ($type eq "range_string") { $expectedTrailer = "0(x0+)?, 0(x0+)?, NULL"; $trailerHint = "0, 0, NULL"; } elsif ($type eq "bytes_string") { # XXX shouldn't we reject 0 since it is guint8*? $expectedTrailer = "(NULL|0), 0, NULL"; $trailerHint = "NULL, NULL"; } else { $expectedTrailer = "0(x?0+)?, NULL"; $trailerHint = "0, NULL"; } if ($vs !~ / [{] $expectedTrailer [}] ,? [}] ; $/x) { $vsx =~ /( $ValueStringVarnameRegex [^=]+ ) = /xo; printf STDERR "Error: %-35.35s: {%s} is required as the last %s array entry: %s\n", $filename, $trailerHint, $type, $1; $cnt++; } if ($vs !~ / (static)? const $ValueStringVarnameRegex /xo) { $vsx =~ /( $ValueStringVarnameRegex [^=]+ ) = /xo; printf STDERR "Error: %-35.35s: Missing 'const': %s\n", $filename, $1; $cnt++; } if ($vs =~ / $NewlineStringRegex /xo && $type ne "bytes_string") { $vsx =~ /( $ValueStringVarnameRegex [^=]+ ) = /xo; printf STDERR "Error: %-35.35s: XXX_string contains a newline: %s\n", $filename, $1; $cnt++; } } # Brute force check for enum_val_t arrays which are missing {NULL, NULL, ...} # as the final (terminating) array entry # For now use the same option to turn this and value_string checking on and off. # (Is the option even necessary?) # Assumption: definition is of form (pseudo-Regex): # " (static const|static|const) enum_val_t .+ = { .+ ;" # (possibly over multiple lines) while (${$fileContentsRef} =~ / ( $EnumValRegex ) /xsog) { # enum_val_t array definition found; check if NULL terminated my $vs = my $vsx = $1; if ($debug_flag) { $vsx =~ / ( .+ enum_val_t [^=]+ ) = /xo; printf STDERR "==> %-35.35s: %s\n", $filename, $1; printf STDERR "%s\n", $vs; } $vs =~ s{ \s } {}xg; # README.developer says # "Don't put a comma after the last tuple of an initializer of an array" # However: since this usage is present in some number of cases, we'll allow for now if ($vs !~ / NULL, NULL, -?[0-9] [}] ,? [}] ; $/xo) { $vsx =~ /( enum_val_t [^=]+ ) = /xo; printf STDERR "Error: %-35.35s: {NULL, NULL, ...} is required as the last enum_val_t array entry: %s\n", $filename, $1; $cnt++; } if ($vs !~ / (static)? const enum_val_t /xo) { $vsx =~ /( enum_val_t [^=]+ ) = /xo; printf STDERR "Error: %-35.35s: Missing 'const': %s\n", $filename, $1; $cnt++; } if ($vs =~ / $NewlineStringRegex /xo) { $vsx =~ /( (?:value|string|range)_string [^=]+ ) = /xo; printf STDERR "Error: %-35.35s: enum_val_t contains a newline: %s\n", $filename, $1; $cnt++; } } return $cnt; } sub check_included_files($$) { my ($fileContentsRef, $filename) = @_; my @incFiles; @incFiles = (${$fileContentsRef} =~ m/\#include \s* ([<"].+[>"])/gox); # files in the ui/qt directory should include the ui class includes # by using #include <> # this ensures that Visual Studio picks up these files from the # build directory if we're compiling with cmake if ($filename =~ m#ui/qt/# ) { foreach (@incFiles) { if ( m#"ui_.*\.h"$# ) { # strip the quotes to get the base name # for the error message s/\"//g; print STDERR "$filename: ". "Please use #include <$_> ". "instead of #include \"$_\".\n"; } } } } sub check_proto_tree_add_XXX($$) { my ($fileContentsRef, $filename) = @_; my @items; my $errorCount = 0; @items = (${$fileContentsRef} =~ m/ (proto_tree_add_[_a-z0-9]+) \( ([^;]*) \) \s* ; /xsg); while (@items) { my ($func) = @items; shift @items; my ($args) = @items; shift @items; #Check to make sure tvb_get* isn't used to pass into a proto_tree_add_<datatype>, when #proto_tree_add_item could just be used instead if ($args =~ /,\s*tvb_get_/xos) { if (($func =~ m/^proto_tree_add_(time|bytes|ipxnet|ipv4|ipv6|ether|guid|oid|string|boolean|float|double|uint|uint64|int|int64|eui64|bitmask_list_value)$/) ) { print STDERR "Error: ".$filename." uses $func with tvb_get_*. Use proto_tree_add_item instead\n"; $errorCount++; # Print out the function args to make it easier # to find the offending code. But first make # it readable by eliminating extra white space. $args =~ s/\s+/ /g; print STDERR "\tArgs: " . $args . "\n"; } } # Remove anything inside parenthesis in the arguments so we # don't get false positives when someone calls # proto_tree_add_XXX(..., tvb_YYY(..., ENC_ZZZ)) # and allow there to be newlines inside $args =~ s/\(.*\)//sg; #Check for accidental usage of ENC_ parameter if ($args =~ /,\s*ENC_/xos) { if (!($func =~ /proto_tree_add_(time|item|bitmask|[a-z0-9]+_bits_format_value|bits_item|bits_ret_val|item_ret_int|item_ret_uint|bytes_item|checksum)/xos) ) { print STDERR "Error: ".$filename." uses $func with ENC_*.\n"; $errorCount++; # Print out the function args to make it easier # to find the offending code. But first make # it readable by eliminating extra white space. $args =~ s/\s+/ /g; print STDERR "\tArgs: " . $args . "\n"; } } } return $errorCount; } # Verify that all declared ett_ variables are registered. # Don't bother trying to check usage (for now)... sub check_ett_registration($$) { my ($fileContentsRef, $filename) = @_; my @ett_declarations; my @ett_address_uses; my %ett_uses; my @unUsedEtts; my $errorCount = 0; # A pattern to match ett variable names. Obviously this assumes that # they start with `ett_` my $EttVarName = qr{ (?: ett_[a-z0-9_]+ (?:\[[0-9]+\])? ) }xi; # Find all the ett_ variables declared in the file @ett_declarations = (${$fileContentsRef} =~ m{ ^ # assume declarations are on their own line (?:static\s+)? # some declarations aren't static g?int # could be int or gint \s+ ($EttVarName) # variable name \s*=\s* -1\s*; }xgiom); if (!@ett_declarations) { # Only complain if the file looks like a dissector #print STDERR "Found no etts in ".$filename."\n" if # (${$fileContentsRef} =~ m{proto_register_field_array}os); return; } #print "Found these etts in ".$filename.": ".join(' ', @ett_declarations)."\n\n"; # Find all the uses of the *addresses* of ett variables in the file. # (We assume if someone is using the address they're using it to # register the ett.) @ett_address_uses = (${$fileContentsRef} =~ m{ &\s*($EttVarName) }xgiom); if (!@ett_address_uses) { print STDERR "Found no ett address uses in ".$filename."\n"; # Don't treat this as an error. # It's more likely a problem with checkAPIs. return; } #print "Found these etts addresses used in ".$filename.": ".join(' ', @ett_address_uses)."\n\n"; # Convert to a hash for fast lookup $ett_uses{$_}++ for (@ett_address_uses); # Find which declared etts are not used. while (@ett_declarations) { my ($ett_var) = @ett_declarations; shift @ett_declarations; push(@unUsedEtts, $ett_var) if (not exists $ett_uses{$ett_var}); } if (@unUsedEtts) { print STDERR "Error: found these unused ett variables in ".$filename.": ".join(' ', @unUsedEtts)."\n"; $errorCount++; } return $errorCount; } # Given the file contents and a file name, check all of the hf entries for # various problems (such as those checked for in proto.c). sub check_hf_entries($$) { my ($fileContentsRef, $filename) = @_; my $errorCount = 0; my @items; my $hfRegex = qr{ \{ \s* &\s*([A-Z0-9_\[\]-]+) # &hf \s*,\s* }xis; @items = (${$fileContentsRef} =~ m{ $hfRegex # &hf \{\s* ("[A-Z0-9 '\./\(\)_:-]+") # name \s*,\s* (NULL|"[A-Z0-9_\.-]*") # abbrev \s*,\s* (FT_[A-Z0-9_]+) # field type \s*,\s* ([A-Z0-9x\|_\s]+) # display \s*,\s* ([^,]+?) # convert \s*,\s* ([A-Z0-9_]+) # bitmask \s*,\s* (NULL|"[A-Z0-9 '\./\(\)\?_:-]+") # blurb (NULL or a string) \s*,\s* HFILL # HFILL }xgios); #print "Found @items items\n"; while (@items) { ##my $errorCount_save = $errorCount; my ($hf, $name, $abbrev, $ft, $display, $convert, $bitmask, $blurb) = @items; shift @items; shift @items; shift @items; shift @items; shift @items; shift @items; shift @items; shift @items; $display =~ s/\s+//g; $convert =~ s/\s+//g; # GET_VALS_EXTP is a macro in packet-mq.h for packet-mq.c and packet-mq-pcf.c $convert =~ s/\bGET_VALS_EXTP\(/VALS_EXT_PTR\(/; #print "name=$name, abbrev=$abbrev, ft=$ft, display=$display, convert=>$convert<, bitmask=$bitmask, blurb=$blurb\n"; if ($abbrev eq '""' || $abbrev eq "NULL") { print STDERR "Error: $hf does not have an abbreviation in $filename\n"; $errorCount++; } if ($abbrev =~ m/\.\.+/) { print STDERR "Error: the abbreviation for $hf ($abbrev) contains two or more sequential periods in $filename\n"; $errorCount++; } if ($name eq $abbrev) { print STDERR "Error: the abbreviation for $hf ($abbrev) matches the field name ($name) in $filename\n"; $errorCount++; } if (lc($name) eq lc($blurb)) { print STDERR "Error: the blurb for $hf ($blurb) matches the field name ($name) in $filename\n"; $errorCount++; } if ($name =~ m/"\s+/) { print STDERR "Error: the name for $hf ($name) has leading space in $filename\n"; $errorCount++; } if ($name =~ m/\s+"/) { print STDERR "Error: the name for $hf ($name) has trailing space in $filename\n"; $errorCount++; } if ($blurb =~ m/"\s+/) { print STDERR "Error: the blurb for $hf ($blurb) has leading space in $filename\n"; $errorCount++; } if ($blurb =~ m/\s+"/) { print STDERR "Error: the blurb for $hf ($blurb) has trailing space in $filename\n"; $errorCount++; } if ($abbrev =~ m/\s+/) { print STDERR "Error: the abbreviation for $hf ($abbrev) has white space in $filename\n"; $errorCount++; } if ("\"".$hf ."\"" eq $name) { print STDERR "Error: name is the hf_variable_name in field $name ($abbrev) in $filename\n"; $errorCount++; } if ("\"".$hf ."\"" eq $abbrev) { print STDERR "Error: abbreviation is the hf_variable_name in field $name ($abbrev) in $filename\n"; $errorCount++; } if ($ft ne "FT_BOOLEAN" && $convert =~ m/^TFS\(.*\)/) { print STDERR "Error: $hf uses a true/false string but is an $ft instead of FT_BOOLEAN in $filename\n"; $errorCount++; } if ($ft eq "FT_BOOLEAN" && $convert =~ m/^VALS\(.*\)/) { print STDERR "Error: $hf uses a value_string but is an FT_BOOLEAN in $filename\n"; $errorCount++; } if (($ft eq "FT_BOOLEAN") && ($bitmask !~ /^(0x)?0+$/) && ($display =~ /^BASE_/)) { print STDERR "Error: $hf: FT_BOOLEAN with a bitmask must specify a 'parent field width' for 'display' in $filename\n"; $errorCount++; } if (($ft eq "FT_BOOLEAN") && ($convert !~ m/^((0[xX]0?)?0$|NULL$|TFS)/)) { print STDERR "Error: $hf: FT_BOOLEAN with non-null 'convert' field missing TFS in $filename\n"; $errorCount++; } if ($convert =~ m/RVALS/ && $display !~ m/BASE_RANGE_STRING/) { print STDERR "Error: $hf uses RVALS but 'display' does not include BASE_RANGE_STRING in $filename\n"; $errorCount++; } if ($convert =~ m/VALS64/ && $display !~ m/BASE_VAL64_STRING/) { print STDERR "Error: $hf uses VALS64 but 'display' does not include BASE_VAL64_STRING in $filename\n"; $errorCount++; } if ($display =~ /BASE_EXT_STRING/ && $convert !~ /^(VALS_EXT_PTR\(|&)/) { print STDERR "Error: $hf: BASE_EXT_STRING should use VALS_EXT_PTR for 'strings' instead of '$convert' in $filename\n"; $errorCount++; } if ($ft =~ m/^FT_U?INT(8|16|24|32)$/ && $convert =~ m/^VALS64\(/) { print STDERR "Error: $hf: 32-bit field must use VALS instead of VALS64 in $filename\n"; $errorCount++; } if ($ft =~ m/^FT_U?INT(40|48|56|64)$/ && $convert =~ m/^VALS\(/) { print STDERR "Error: $hf: 64-bit field must use VALS64 instead of VALS in $filename\n"; $errorCount++; } if ($convert =~ m/^(VALS|VALS64|RVALS)\(&.*\)/) { print STDERR "Error: $hf is passing the address of a pointer to $1 in $filename\n"; $errorCount++; } if ($convert !~ m/^((0[xX]0?)?0$|NULL$|VALS|VALS64|VALS_EXT_PTR|RVALS|TFS|CF_FUNC|FRAMENUM_TYPE|&|STRINGS_ENTERPRISES)/ && $display !~ /BASE_CUSTOM/) { print STDERR "Error: non-null $hf 'convert' field missing 'VALS|VALS64|RVALS|TFS|CF_FUNC|FRAMENUM_TYPE|&|STRINGS_ENTERPRISES' in $filename ?\n"; $errorCount++; } ## Benign... ## if (($ft eq "FT_BOOLEAN") && ($bitmask =~ /^(0x)?0+$/) && ($display ne "BASE_NONE")) { ## print STDERR "Error: $abbrev: FT_BOOLEAN with no bitmask must use BASE_NONE for 'display' in $filename\n"; ## $errorCount++; ## } ##if ($errorCount != $errorCount_save) { ## print STDERR "name=$name, abbrev=$abbrev, ft=$ft, display=$display, convert=>$convert<, bitmask=$bitmask, blurb=$blurb\n"; ##} } return $errorCount; } sub check_pref_var_dupes($$) { my ($filecontentsref, $filename) = @_; my $errorcount = 0; # Avoid flagging the actual prototypes return 0 if $filename =~ /prefs\.[ch]$/; # remove macro lines my $filecontents = ${$filecontentsref}; $filecontents =~ s { ^\s*\#.*$} []xogm; # At what position is the variable in the prefs_register_*_preference() call? my %prefs_register_var_pos = ( static_text => undef, obsolete => undef, # ignore decode_as_range => -2, range => -2, filename => -2, # second to last enum => -3, # third to last # everything else is the last argument ); my @dupes; my %count; while ($filecontents =~ /prefs_register_(\w+?)_preference/gs) { my ($func) = "prefs_register_$1_preference"; my ($args) = extract_bracketed(substr($filecontents, $+[0]), '()'); $args = substr($args, 1, -1); # strip parens my $pos = $prefs_register_var_pos{$1}; next if exists $prefs_register_var_pos{$1} and not defined $pos; $pos //= -1; my $var = (split /\s*,\s*(?![^(]*\))/, $args)[$pos]; # only commas outside parens my $ignore = 0; for my $row (@excludePrefsCheck) { my ($rfunc, $rvar) = @$row; if (($rfunc eq $func) && ($rvar eq $var)) { $ignore = 1 } } if (!$ignore) { push @dupes, $var if $count{$var}++ == 1; } } if (@dupes) { print STDERR "$filename: error: found these preference variables used in more than one prefs_register_*_preference:\n\t".join(', ', @dupes)."\n"; $errorcount++; } return $errorcount; } # Check for forbidden control flow changes, see epan/exceptions.h sub check_try_catch($$) { my ($fileContentsRef, $filename) = @_; my $errorCount = 0; # Match TRY { ... } ENDTRY (with an optional '\' in case of a macro). my @items = (${$fileContentsRef} =~ m/ \bTRY\s*\{ (.+?) \}\s* \\? \s*ENDTRY\b /xsg); for my $block (@items) { if ($block =~ m/ \breturn\b /x) { print STDERR "Error: return is forbidden in TRY/CATCH in $filename\n"; $errorCount++; } my @gotoLabels = $block =~ m/ \bgoto\s+ (\w+) /xsg; my %seen = (); for my $gotoLabel (@gotoLabels) { if ($seen{$gotoLabel}) { next; } $seen{$gotoLabel} = 1; if ($block !~ /^ \s* $gotoLabel \s* :/xsgm) { print STDERR "Error: goto to label '$gotoLabel' outside TRY/CATCH is forbidden in $filename\n"; $errorCount++; } } } return $errorCount; } sub print_usage { print "Usage: checkAPIs.pl [-M] [-h] [-g group1[:count]] [-g group2] ... \n"; print " [-summary-group group1] [-summary-group group2] ... \n"; print " [--sourcedir=srcdir] \n"; print " [--nocheck-hf]\n"; print " [--nocheck-value-string-array] \n"; print " [--nocheck-shadow]\n"; print " [--debug]\n"; print " [--file=/path/to/file_list]\n"; print " file1 file2 ...\n"; print "\n"; print " -M: Generate output for -g in 'machine-readable' format\n"; print " -p: used by the git pre-commit hook\n"; print " -h: help, print usage message\n"; print " -g <group>: Check input files for use of APIs in <group>\n"; print " (in addition to the default groups)\n"; print " Maximum uses can be specified with <group>:<count>\n"; print " -summary-group <group>: Output summary (count) for each API in <group>\n"; print " (-g <group> also req'd)\n"; print " --nocheck-hf: Skip header field definition checks\n"; print " --nocheck-value-string-array: Skip value string array checks\n"; print " --nocheck-shadow: Skip shadow variable checks\n"; print " --debug: UNDOCUMENTED\n"; print "\n"; print " Default Groups[-g]: ", join (", ", sort @apiGroups), "\n"; print " Available Groups: ", join (", ", sort keys %APIs), "\n"; } # ------------- # action: remove '#if 0'd code from the input string # args codeRef, fileName # returns: codeRef # # Essentially: split the input into blocks of code or lines of #if/#if 0/etc. # Remove blocks that follow '#if 0' until '#else/#endif' is found. { # block begin my $debug = 0; sub remove_if0_code { my ($codeRef, $fileName) = @_; # Preprocess output (ensure trailing LF and no leading WS before '#') $$codeRef =~ s/^\s*#/#/m; if ($$codeRef !~ /\n$/) { $$codeRef .= "\n"; } # Split into blocks of normal code or lines with conditionals. my $ifRegExp = qr/if 0|if|else|endif/; my @blocks = split(/^(#\s*(?:$ifRegExp).*\n)/m, $$codeRef); my ($if_lvl, $if0_lvl, $if0) = (0,0,0); my $lines = ''; for my $block (@blocks) { my $if; if ($block =~ /^#\s*($ifRegExp)/) { # #if/#if 0/#else/#endif processing $if = $1; if ($debug == 99) { print(STDERR "if0=$if0 if0_lvl=$if0_lvl lvl=$if_lvl [$if] - $block"); } if ($if eq 'if') { $if_lvl += 1; } elsif ($if eq 'if 0') { $if_lvl += 1; if ($if0_lvl == 0) { $if0_lvl = $if_lvl; $if0 = 1; # inside #if 0 } } elsif ($if eq 'else') { if ($if0_lvl == $if_lvl) { $if0 = 0; } } elsif ($if eq 'endif') { if ($if0_lvl == $if_lvl) { $if0 = 0; $if0_lvl = 0; } $if_lvl -= 1; if ($if_lvl < 0) { die "patsub: #if/#endif mismatch in $fileName" } } } if ($debug == 99) { print(STDERR "if0=$if0 if0_lvl=$if0_lvl lvl=$if_lvl\n"); } # Keep preprocessor lines and blocks that are not enclosed in #if 0 if ($if or $if0 != 1) { $lines .= $block; } } $$codeRef = $lines; ($debug == 2) && print "==> After Remove if0: code: [$fileName]\n$$codeRef\n===<\n"; return $codeRef; } } # block end # The below Regexp are based on those from: # https://web.archive.org/web/20080614012925/http://aspn.activestate.com/ASPN/Cookbook/Rx/Recipe/59811 # They are in the public domain. # 2. A regex which matches double-quoted strings. # ?s added so that strings containing a 'line continuation' # ( \ followed by a new-line) will match. my $DoubleQuotedStr = qr{ (?: ["] (?s: \\. | [^\"\\])* ["]) }x; # 3. A regex which matches single-quoted strings. my $SingleQuotedStr = qr{ (?: \' (?: \\. | [^\'\\])* [']) }x; # # MAIN # my $errorCount = 0; # The default list, which can be expanded. my @apiSummaryGroups = (); my $machine_readable_output = 0; # default: disabled my $check_hf = 1; # default: enabled my $check_value_string_array= 1; # default: enabled my $check_shadow = 1; # default: enabled my $debug_flag = 0; # default: disabled my $source_dir = ""; my $filenamelist = ""; my $help_flag = 0; my $pre_commit = 0; my $result = GetOptions( 'group=s' => \@apiGroups, 'summary-group=s' => \@apiSummaryGroups, 'Machine-readable' => \$machine_readable_output, 'check-hf!' => \$check_hf, 'check-value-string-array!' => \$check_value_string_array, 'check-shadow!' => \$check_shadow, 'sourcedir=s' => \$source_dir, 'debug' => \$debug_flag, 'pre-commit' => \$pre_commit, 'file=s' => \$filenamelist, 'help' => \$help_flag ); if (!$result || $help_flag) { print_usage(); exit(1); } # the pre-commit hook only calls checkAPIs one file at a time, so this # is safe to do globally (and easier) if ($pre_commit) { my $filename = $ARGV[0]; # if the filename is packet-*.c or packet-*.h, then we set the abort and termoutput groups. if ($filename =~ /\bpacket-[^\/\\]+\.[ch]$/) { push @apiGroups, "abort"; push @apiGroups, "termoutput"; } } # Add a 'function_count' anonymous hash to each of the 'apiGroup' entries in the %APIs hash. for my $apiGroup (keys %APIs) { my @functions = @{$APIs{$apiGroup}{functions}}; $APIs{$apiGroup}->{function_counts} = {}; @{$APIs{$apiGroup}->{function_counts}}{@functions} = (); # Add fcn names as keys to the anonymous hash $APIs{$apiGroup}->{max_function_count} = -1; if ($APIs{$apiGroup}->{count_errors}) { $APIs{$apiGroup}->{max_function_count} = 0; } $APIs{$apiGroup}->{cur_function_count} = 0; } my @filelist; push @filelist, @ARGV; if ("$filenamelist" ne "") { # We have a file containing a list of files to check (possibly in # addition to those on the command line). open(FC, $filenamelist) || die("Couldn't open $filenamelist"); while (<FC>) { # file names can be separated by ; push @filelist, split(';'); } close(FC); } die "no files to process" unless (scalar @filelist); # Read through the files; do various checks while ($_ = pop @filelist) { my $filename = $_; my $fileContents = ''; my @foundAPIs = (); my $line; if ($source_dir and ! -e $filename) { $filename = $source_dir . '/' . $filename; } if (! -e $filename) { warn "No such file: \"$filename\""; next; } # delete leading './' $filename =~ s{ ^ \. / } {}xo; unless (-f $filename) { print STDERR "Warning: $filename is not of type file - skipping.\n"; next; } # Read in the file (ouch, but it's easier that way) open(FC, $filename) || die("Couldn't open $filename"); $line = 1; while (<FC>) { $fileContents .= $_; eval { decode( 'UTF-8', $_, Encode::FB_CROAK ) }; if ($EVAL_ERROR) { print STDERR "Error: Found an invalid UTF-8 sequence on line " .$line. " of " .$filename."\n"; $errorCount++; } $line++; } close(FC); if (($fileContents =~ m{ \$Id .* \$ }xo)) { print STDERR "Warning: ".$filename." has an SVN Id tag. Please remove it!\n"; } if (($fileContents =~ m{ tab-width:\s*[0-7|9]+ | tabstop=[0-7|9]+ | tabSize=[0-7|9]+ }xo)) { # To quote Icf0831717de10fc615971fa1cf75af2f1ea2d03d : # HT tab stops are set every 8 spaces on UN*X; UN*X tools that treat an HT character # as tabbing to 4-space tab stops, or that even are configurable but *default* to # 4-space tab stops (I'm looking at *you*, Xcode!) are broken. tab-width: 4, # tabstop=4, and tabSize=4 are errors if you ever expect anybody to look at your file # with a UN*X tool, and every text file will probably be looked at by a UN*X tool at # some point, so Don't Do That. # # Can I get an "amen!"? print STDERR "Error: Found modelines with tabstops set to something other than 8 in " .$filename."\n"; $errorCount++; } # Remove C/C++ comments # The below pattern is modified (to keep newlines at the end of C++-style comments) from that at: # https://perldoc.perl.org/perlfaq6.html#How-do-I-use-a-regular-expression-to-strip-C-style-comments-from-a-file? $fileContents =~ s#/\*[^*]*\*+([^/*][^*]*\*+)*/|//([^\\]|[^\n][\n]?)*?\n|("(\\.|[^"\\])*"|'(\\.|[^'\\])*'|.[^/"'\\]*)#defined $3 ? $3 : "\n"#gse; # optionally check the hf entries (including those under #if 0) if ($check_hf) { $errorCount += check_hf_entries(\$fileContents, $filename); } if ($fileContents =~ m{ %ll }xo) { # use PRI[dux...]N instead of ll print STDERR "Error: Found %ll in " .$filename."\n"; $errorCount++; } if ($fileContents =~ m{ %hh }xo) { # %hh is C99 and Windows doesn't like it: # http://connect.microsoft.com/VisualStudio/feedback/details/416843/sscanf-cannot-not-handle-hhd-format # Need to use temporary variables instead. print STDERR "Error: Found %hh in " .$filename."\n"; $errorCount++; } # check for files that we should not include directly # this must be done before quoted strings (#include "file.h") are removed check_included_files(\$fileContents, $filename); # Check for value_string and enum_val_t errors: NULL termination, # const-nes, and newlines within strings if ($check_value_string_array) { $errorCount += check_value_string_arrays(\$fileContents, $filename, $debug_flag); } # Remove all the quoted strings $fileContents =~ s{ $DoubleQuotedStr | $SingleQuotedStr } []xog; $errorCount += check_pref_var_dupes(\$fileContents, $filename); # Remove all blank lines $fileContents =~ s{ ^ \s* $ } []xog; # Remove all '#if 0'd' code remove_if0_code(\$fileContents, $filename); $errorCount += check_ett_registration(\$fileContents, $filename); #checkAPIsCalledWithTvbGetPtr(\@TvbPtrAPIs, \$fileContents, \@foundAPIs); #if (@foundAPIs) { # print STDERR "Found APIs with embedded tvb_get_ptr() calls in ".$filename." : ".join(',', @foundAPIs)."\n" #} if ($check_shadow) { check_shadow_variable(\@ShadowVariable, \$fileContents, \@foundAPIs); if (@foundAPIs) { print STDERR "Warning: Found shadow variable(s) in ".$filename." : ".join(',', @foundAPIs)."\n" } } check_snprintf_plus_strlen(\$fileContents, $filename); $errorCount += check_proto_tree_add_XXX(\$fileContents, $filename); $errorCount += check_try_catch(\$fileContents, $filename); # Check and count APIs for my $groupArg (@apiGroups) { my $pfx = "Warning"; @foundAPIs = (); my @groupParts = split(/:/, $groupArg); my $apiGroup = $groupParts[0]; my $curFuncCount = 0; if (scalar @groupParts > 1) { $APIs{$apiGroup}->{max_function_count} = $groupParts[1]; } findAPIinFile($APIs{$apiGroup}, \$fileContents, \@foundAPIs); for my $api (keys %{$APIs{$apiGroup}->{function_counts}} ) { $curFuncCount += $APIs{$apiGroup}{function_counts}{$api}; } # If we have a max function count and we've exceeded it, treat it # as an error. if (!$APIs{$apiGroup}->{count_errors} && $APIs{$apiGroup}->{max_function_count} >= 0) { if ($curFuncCount > $APIs{$apiGroup}->{max_function_count}) { print STDERR $pfx . ": " . $apiGroup . " exceeds maximum function count: " . $APIs{$apiGroup}->{max_function_count} . "\n"; $APIs{$apiGroup}->{count_errors} = 1; } } if ($curFuncCount <= $APIs{$apiGroup}->{max_function_count}) { next; } if ($APIs{$apiGroup}->{count_errors}) { # the use of "prohibited" APIs is an error, increment the error count $errorCount += @foundAPIs; $pfx = "Error"; } if (@foundAPIs && ! $machine_readable_output) { print STDERR $pfx . ": Found " . $apiGroup . " APIs in ".$filename.": ".join(',', @foundAPIs)."\n"; } if (@foundAPIs && $machine_readable_output) { for my $api (@foundAPIs) { printf STDERR "%-8.8s %-20.20s %-30.30s %-45.45s\n", $pfx, $apiGroup, $filename, $api; } } } } # Summary: Print Use Counts of each API in each requested summary group if (scalar @apiSummaryGroups > 0) { my $fileline = join(", ", @ARGV); printf "\nSummary for " . substr($fileline, 0, 65) . "…\n"; for my $apiGroup (@apiSummaryGroups) { printf "\nUse counts for %s (maximum allowed total is %d)\n", $apiGroup, $APIs{$apiGroup}->{max_function_count}; for my $api (sort {"\L$a" cmp "\L$b"} (keys %{$APIs{$apiGroup}->{function_counts}} )) { if ($APIs{$apiGroup}{function_counts}{$api} < 1) { next; } printf "%5d %-40.40s\n", $APIs{$apiGroup}{function_counts}{$api}, $api; } } } exit($errorCount > 120 ? 120 : $errorCount); # # Editor modelines - https://www.wireshark.org/tools/modelines.html # # Local variables: # c-basic-offset: 8 # tab-width: 8 # indent-tabs-mode: nil # End: # # vi: set shiftwidth=8 tabstop=8 expandtab: # :indentSize=8:tabSize=8:noTabs=true: #
Perl
wireshark/tools/checkfiltername.pl
#!/usr/bin/perl my $debug = 0; # 0: off # 1: specific debug # 2: full debug # # verify that display filter names correspond with the PROTABBREV of # of the dissector. Enforces the dissector to have a source # filename of format packet-PROTABBREV.c # # Usage: checkfiltername.pl <file or files> # # Copyright 2011 Michael Mann (see AUTHORS file) # # Wireshark - Network traffic analyzer # By Gerald Combs <[email protected]> # Copyright 1998 Gerald Combs # # SPDX-License-Identifier: GPL-2.0-or-later # # Example: # ~/work/wireshark/trunk/epan/dissectors> ../../tools/checkfiltername.pl packet-3com-xns.c # packet-3com-xns.c (2 (of 2) fields) # 102 3comxns.type doesn't match PROTOABBREV of 3com-xns # 106 3comxns.type doesn't match PROTOABBREV of 3com-xns # # or checkfiltername.pl packet-*.c, which will check all the dissector files. # # use warnings; use strict; use Getopt::Long; my @elements; my @elements_dup; my @protocols; my %filters; my %expert_filters; my @acceptedprefixes = ("dcerpc-"); my @asn1automatedfilelist; my @dcerpcautomatedfilelist; my @idl2wrsautomatedfilelist; my @filemanipulationfilelist; my @prefixfilelist; my @nofieldfilelist; my %unique; my @uniquefilelist; my @noregprotocolfilelist; my @periodinfilternamefilelist; my $showlinenoFlag = ''; my $showautomatedFlag = ''; my $state = ""; # "s_unknown", # "s_start", # "s_in_hf_register_info", # "s_hf_register_info_entry", # "s_header_field_info_entry", # "s_header_field_info_entry_start", # "s_header_field_info_entry_name", # "s_header_field_info_entry_abbrev", # "s_header_field_info_entry_abbrev_end", # "s_start_expert", # "s_in_ei_register_info", # "s_ei_register_info_entry", # "s_ei_register_info_entry_start", # "s_ei_register_info_entry_abbrev_end", # "s_nofields" my $restofline; my $filecount = 0; my $currfile = ""; my $protabbrev = ""; my $protabbrev_index; my $PFNAME_value = ""; my $linenumber = 1; my $totalerrorcount = 0; my $errorfilecount = 0; my $onefield = 0; my $nofields = 0; my $noperiod = 0; my $noregprotocol = 1; my $automated = 0; my $more_tokens; my $showall = 0; my $comment = 0; sub checkprotoabbrev { my $abbrev = ""; my $abbrevpos; my $proto_abbrevpos1; my $proto_abbrevpos2; my $afterabbrev = ""; my $check_dup_abbrev = ""; my $modprotabbrev = ""; my $errorline = 0; my $prefix; if (($automated == 0) || ($showall == 1)) { $abbrevpos = index($_[0], "."); if ($abbrevpos == -1) { $abbrev = $_[0]; } else { $abbrev = substr($_[0], 0, $abbrevpos); $afterabbrev = substr($_[0], $abbrevpos+1, length($_[0])-$abbrevpos); $check_dup_abbrev = $afterabbrev; $afterabbrev = substr($afterabbrev, 0, length($abbrev)); } if ($abbrev ne $protabbrev) { $errorline = 1; #check if there is a supported protocol that matches the abbrev. #This may be a case of filename != PROTOABBREV foreach (@protocols) { if ($abbrev eq $_) { $errorline = 0; } elsif (index($_, ".") != -1) { #compare from start of string for each period found $proto_abbrevpos1 = 0; while ((($proto_abbrevpos2 = index($_, ".", $proto_abbrevpos1)) != -1) && ($errorline == 1)) { if ($abbrev eq substr($_, 0, $proto_abbrevpos2)) { $errorline = 0; } $proto_abbrevpos1 = $proto_abbrevpos2+1; } } } } # find any underscores that preface or follow a period if (((index($_[0], "._") >= 0) || (index($_[0], "_.") >= 0)) && #ASN.1 dissectors can intentionally generating this field name, so don't fault the dissector (index($_[0], "_untag_item_element") < 0)) { if ($showlinenoFlag) { push(@elements, "$_[1] $_[0] contains an unnecessary \'_\'\n"); } else { push(@elements, "$_[0] contains an unnecessary \'_\'\n"); } } if (($errorline == 1) && ($showall == 0)) { #try some "accepted" variations of PROTOABBREV #replace '-' with '_' $modprotabbrev = $protabbrev; $modprotabbrev =~ s/-/_/g; if ($abbrev eq $modprotabbrev) { $errorline = 0; } #remove '-' if ($errorline == 1) { $modprotabbrev = $protabbrev; $modprotabbrev =~ s/-//g; if ($abbrev eq $modprotabbrev) { $errorline = 0; } } #remove '_' if ($errorline == 1) { $modprotabbrev = $protabbrev; $modprotabbrev =~ s/_//g; if ($abbrev eq $modprotabbrev) { $errorline = 0; } } if ($errorline == 1) { #remove any "accepted" prefix to see if there is still a problem foreach (@acceptedprefixes) { if ($protabbrev =~ /^$_/) { $modprotabbrev = substr($protabbrev, length($_)); if ($abbrev eq $modprotabbrev) { push(@prefixfilelist, "$currfile\n"); $errorline = 0; } } } } else { push(@filemanipulationfilelist, "$currfile\n"); } #now check the acceptable "fields from a different protocol" if ($errorline == 1) { if (is_from_other_protocol_allowed($_[0], $currfile) == 1) { $errorline = 0; } } #now check the acceptable "fields that include a version number" if ($errorline == 1) { if (is_protocol_version_allowed($_[0], $currfile) == 1) { $errorline = 0; } } } if ($errorline == 1) { $debug>1 && print "$_[1] $_[0] doesn't match PROTOABBREV of $protabbrev\n"; if ($showlinenoFlag) { push(@elements, "$_[1] $_[0] doesn't match PROTOABBREV of $protabbrev\n"); } else { push(@elements, "$_[0] doesn't match PROTOABBREV of $protabbrev\n"); } } if (($abbrev ne "") && (lc($abbrev) eq lc($afterabbrev))) { # Allow ASN.1 generated files to duplicate part of proto name if ((!(grep {$currfile eq $_ } @asn1automatedfilelist)) && # Check allowed list (is_proto_dup_allowed($abbrev, $check_dup_abbrev) == 0)) { if ($showlinenoFlag) { push(@elements_dup, "$_[1] $_[0] duplicates PROTOABBREV of $abbrev\n"); } else { push(@elements_dup, "$_[0] duplicates PROTOABBREV of $abbrev\n"); } } } } } sub printprevfile { my $totalfields = keys(%filters); my $count_ele; my $count_dup; my $total_count; foreach (sort keys %filters) { checkprotoabbrev ($filters{$_}, $_); } foreach (sort keys %expert_filters) { checkprotoabbrev ($expert_filters{$_}, $_); } $count_ele = @elements; $count_dup = @elements_dup; $total_count = $count_ele+$count_dup; if ($noregprotocol == 1) { #if no protocol is registered, only worry about duplicates if ($currfile ne "") { push(@noregprotocolfilelist, "$currfile\n"); } if ($count_dup > 0) { $errorfilecount++; $totalerrorcount += $count_dup; } if (($showall == 1) || ($count_dup > 0)) { print "\n\n$currfile - NO PROTOCOL REGISTERED\n"; if ($showall == 1) { #everything is included, so count all errors $totalerrorcount += $count_ele; if (($count_ele > 0) && ($count_dup == 0)) { $errorfilecount++; } foreach (@elements) { print $_; } } foreach (@elements_dup) { print $_; } } } else { if ($total_count > 0) { $errorfilecount++; $totalerrorcount += $total_count; } if (($automated == 0) || ($showall == 1)) { if ($total_count > 0) { if ($automated == 1) { if ($showall == 1) { print "\n\n$currfile - AUTOMATED ($total_count (of $totalfields) fields)\n"; } } else { print "\n\n$currfile ($total_count (of $totalfields) fields)\n"; } foreach (@elements) { print $_; } foreach (@elements_dup) { print $_; } } if ((($nofields) || ($totalfields == 0)) && ($currfile ne "")) { if ($showall == 1) { print "\n\n$currfile - NO FIELDS\n"; } push(@nofieldfilelist, "$currfile\n"); } } } } #-------------------------------------------------------------------- # This is a list of dissectors that intentionally have filter names # where the second segment duplicates (at least partially) the name # of the first. The most common case is in ASN.1 dissectors, but # those can be dealt with by looking at the first few lines of the # dissector. This list has been vetted and justification will need # to be provided to add to it. Acknowledge these dissectors aren't # a problem for the pre-commit script #-------------------------------------------------------------------- sub is_proto_dup_allowed { if (($_[0] eq "amf") && (index($_[1], "amf0") >= 0)) {return 1;} if (($_[0] eq "amf") && (index($_[1], "amf3") >= 0)) {return 1;} if (($_[0] eq "amqp") && (index($_[1], "amqp") >= 0)) {return 1;} if (($_[0] eq "bat") && (index($_[1], "batman") >= 0)) {return 1;} if (($_[0] eq "browser") && (index($_[1], "browser_") >= 0)) {return 1;} if (($_[0] eq "data") && (index($_[1], "data") >= 0)) {return 1;} if (($_[0] eq "dlsw") && (index($_[1], "dlsw_version") >= 0)) {return 1;} if (($_[0] eq "dns") && (index($_[1], "dnskey") >= 0)) {return 1;} if (($_[0] eq "ecmp") && (index($_[1], "ecmp_") >= 0)) {return 1;} if (($_[0] eq "exported_pdu") && (index($_[1], "exported_pdu") >= 0)) {return 1;} if (($_[0] eq "fc") && (index($_[1], "fctl") >= 0)) {return 1;} if (($_[0] eq "fcs") && (index($_[1], "fcsmask") >= 0)) {return 1;} if (($_[0] eq "fmp") && (index($_[1], "fmp") >= 0)) {return 1;} if (($_[0] eq "fr") && (index($_[1], "frame_relay") >= 0)) {return 1;} if (($_[0] eq "lustre") && (index($_[1], "lustre_") >= 0)) {return 1;} if (($_[0] eq "mac") && (index($_[1], "macd") >= 0)) {return 1;} if (($_[0] eq "mac") && (index($_[1], "macis") >= 0)) {return 1;} if (($_[0] eq "mih") && (index($_[1], "mihf") >= 0)) {return 1;} if (($_[0] eq "mih") && (index($_[1], "mihcap") >= 0)) {return 1;} if (($_[0] eq "ncp") && (index($_[1], "ncp") >= 0)) {return 1;} if (($_[0] eq "nfs") && (index($_[1], "nfs") >= 0)) {return 1;} if (($_[0] eq "oxid") && (index($_[1], "oxid") >= 0)) {return 1;} if (($_[0] eq "rquota") && (index($_[1], "rquota") >= 0)) {return 1;} if (($_[0] eq "pfcp") && (index($_[1], "pfcp") >= 0)) {return 1;} if (($_[0] eq "sm") && (index($_[1], "sm_") >= 0)) {return 1;} if (($_[0] eq "smpp") && (index($_[1], "smppplus") >= 0)) {return 1;} if (($_[0] eq "spray") && (index($_[1], "sprayarr") >= 0)) {return 1;} if (($_[0] eq "stat") && (index($_[1], "stat_") >= 0)) {return 1;} if (($_[0] eq "stat") && (index($_[1], "state") >= 0)) {return 1;} if (($_[0] eq "tds") && (index($_[1], "tds_") >= 0)) {return 1;} if (($_[0] eq "time") && (index($_[1], "time") >= 0)) {return 1;} if (($_[0] eq "tn3270") && (index($_[1], "tn3270e") >= 0)) {return 1;} if (($_[0] eq "usb") && (index($_[1], "usb") >= 0)) {return 1;} if (($_[0] eq "xml") && (index($_[1], "xml") >= 0)) {return 1;} return 0; } #-------------------------------------------------------------------- # This is a list of dissectors that intentionally have filter names # shared with other dissectors. This list has been vetted and # justification will need to be provided to add to it. # Acknowledge these dissectors aren't a problem for the pre-commit script #-------------------------------------------------------------------- sub is_from_other_protocol_allowed { my $proto_filename; my $dir_index = rindex($_[1], "\\"); #handle directory names on all platforms if ($dir_index < 0) { $dir_index = rindex($_[1], "/"); } if ($dir_index < 0) { $proto_filename = $_[1]; } else { $proto_filename = substr($_[1], $dir_index+1); } # XXX - may be faster to hash this (note 1-many relationship)? if (($proto_filename eq "packet-atalk.c") && (index($_[0], "llc") >= 0)) {return 1;} if (($proto_filename eq "packet-awdl.c") && (index($_[0], "llc") >= 0)) {return 1;} if (($proto_filename eq "packet-bpdu.c") && (index($_[0], "mstp") >= 0)) {return 1;} if (($proto_filename eq "packet-bssap.c") && (index($_[0], "bsap") >= 0)) {return 1;} if (($proto_filename eq "packet-caneth.c") && (index($_[0], "can") >= 0)) {return 1;} if (($proto_filename eq "packet-cimetrics.c") && (index($_[0], "llc") >= 0)) {return 1;} if (($proto_filename eq "packet-cipsafety.c") && (index($_[0], "cip") >= 0)) {return 1;} if (($proto_filename eq "packet-cipsafety.c") && (index($_[0], "enip") >= 0)) {return 1;} if (($proto_filename eq "packet-dcerpc-netlogon.c") && (index($_[0], "ntlmssp") >= 0)) {return 1;} if (($proto_filename eq "packet-dcom-oxid.c") && (index($_[0], "dcom") >= 0)) {return 1;} if (($proto_filename eq "packet-dvb-data-mpe.c") && (index($_[0], "mpeg_sect") >= 0)) {return 1;} if (($proto_filename eq "packet-dvb-ipdc.c") && (index($_[0], "ipdc") >= 0)) {return 1;} if (($proto_filename eq "packet-enip.c") && (index($_[0], "cip") >= 0)) {return 1;} if (($proto_filename eq "packet-extreme.c") && (index($_[0], "llc") >= 0)) {return 1;} if (($proto_filename eq "packet-fmp_notify.c") && (index($_[0], "fmp") >= 0)) {return 1;} if (($proto_filename eq "packet-foundry.c") && (index($_[0], "llc") >= 0)) {return 1;} if (($proto_filename eq "packet-glusterfs.c") && (index($_[0], "gluster") >= 0)) {return 1;} if (($proto_filename eq "packet-h248_annex_e.c") && (index($_[0], "h248") >= 0)) {return 1;} if (($proto_filename eq "packet-h248_q1950.c") && (index($_[0], "h248") >= 0)) {return 1;} if (($proto_filename eq "packet-ieee1722.c") && (index($_[0], "can") >= 0)) {return 1;} if (($proto_filename eq "packet-ieee80211.c") && (index($_[0], "eapol") >= 0)) {return 1;} if (($proto_filename eq "packet-ieee80211-radio.c") && (index($_[0], "wlan") >= 0)) {return 1;} if (($proto_filename eq "packet-ieee80211-wlancap.c") && (index($_[0], "wlan") >= 0)) {return 1;} if (($proto_filename eq "packet-ieee802154.c") && (index($_[0], "wpan") >= 0)) {return 1;} if (($proto_filename eq "packet-isup.c") && (index($_[0], "ansi_isup") >= 0)) {return 1;} if (($proto_filename eq "packet-isup.c") && (index($_[0], "bat_ase") >= 0)) {return 1;} if (($proto_filename eq "packet-isup.c") && (index($_[0], "nsap") >= 0)) {return 1;} if (($proto_filename eq "packet-isup.c") && (index($_[0], "x213") >= 0)) {return 1;} if (($proto_filename eq "packet-iwarp-ddp-rdmap.c") && (index($_[0], "iwarp_ddp") >= 0)) {return 1;} if (($proto_filename eq "packet-iwarp-ddp-rdmap.c") && (index($_[0], "iwarp_rdma") >= 0)) {return 1;} if (($proto_filename eq "packet-k12.c") && (index($_[0], "aal2") >= 0)) {return 1;} if (($proto_filename eq "packet-k12.c") && (index($_[0], "atm") >= 0)) {return 1;} if (($proto_filename eq "packet-m3ua.c") && (index($_[0], "mtp3") >= 0)) {return 1;} if (($proto_filename eq "packet-mle.c") && (index($_[0], "wpan") >= 0)) {return 1;} if (($proto_filename eq "packet-mpeg-dsmcc.c") && (index($_[0], "mpeg_sect") >= 0)) {return 1;} if (($proto_filename eq "packet-mpeg-dsmcc.c") && (index($_[0], "etv.dsmcc") >= 0)) {return 1;} if (($proto_filename eq "packet-mpeg1.c") && (index($_[0], "rtp.payload_mpeg_") >= 0)) {return 1;} if (($proto_filename eq "packet-mysql.c") && (index($_[0], "mariadb") >= 0)) {return 1;} if (($proto_filename eq "packet-ndps.c") && (index($_[0], "spx.ndps_") >= 0)) {return 1;} if (($proto_filename eq "packet-pw-atm.c") && (index($_[0], "atm") >= 0)) {return 1;} if (($proto_filename eq "packet-pw-atm.c") && (index($_[0], "pw") >= 0)) {return 1;} if (($proto_filename eq "packet-scsi.c") && (index($_[0], "scsi_sbc") >= 0)) {return 1;} if (($proto_filename eq "packet-sndcp-xid.c") && (index($_[0], "llcgprs") >= 0)) {return 1;} if (($proto_filename eq "packet-wlccp.c") && (index($_[0], "llc") >= 0)) {return 1;} if (($proto_filename eq "packet-wps.c") && (index($_[0], "eap") >= 0)) {return 1;} if (($proto_filename eq "packet-wsp.c") && (index($_[0], "wap") >= 0)) {return 1;} if (($proto_filename eq "packet-xot.c") && (index($_[0], "x25") >= 0)) {return 1;} if (($proto_filename eq "packet-zbee-zcl-misc.c") && (index($_[0], "zbee_zcl_hvac") >= 0)) {return 1;} if (($proto_filename eq "packet-zbee-zcl-misc.c") && (index($_[0], "zbee_zcl_ias") >= 0)) {return 1;} #Understand why, but I think it could be prefixed with "dissector" #prefix (which isn't necessarily "protocol") if (($proto_filename eq "packet-rtcp.c") && (index($_[0], "srtcp") >= 0)) {return 1;} if (($proto_filename eq "packet-rtp.c") && (index($_[0], "srtp") >= 0)) {return 1;} if (($proto_filename eq "packet-dcom-cba-acco.c") && (index($_[0], "cba") >= 0)) {return 1;} if (($proto_filename eq "packet-dcom-cba.c") && (index($_[0], "cba") >= 0)) {return 1;} #XXX - HACK to get around nested "s in field name if (($proto_filename eq "packet-gsm_sim.c") && (index($_[0], "e\\") >= 0)) {return 1;} return 0; } #-------------------------------------------------------------------- # This is a list of dissectors that use their (protocol) version number # as part of the first display filter segment, which checkfiltername # usually complains about. Manually allow them so that they can pass # pre-commit script #-------------------------------------------------------------------- sub is_protocol_version_allowed { my $proto_filename; my $dir_index = rindex($_[1], "\\"); #handle directory names on all platforms if ($dir_index < 0) { $dir_index = rindex($_[1], "/"); } if ($dir_index < 0) { $proto_filename = $_[1]; } else { $proto_filename = substr($_[1], $dir_index+1); } # XXX - may be faster to hash this? if (($proto_filename eq "packet-ehs.c") && (index($_[0], "ehs2") >= 0)) {return 1;} if (($proto_filename eq "packet-hsrp.c") && (index($_[0], "hsrp2") >= 0)) {return 1;} if (($proto_filename eq "packet-ipv6.c") && (index($_[0], "ip") >= 0)) {return 1;} if (($proto_filename eq "packet-openflow_v1.c") && (index($_[0], "openflow") >= 0)) {return 1;} if (($proto_filename eq "packet-rtnet.c") && (index($_[0], "tdma-v1") >= 0)) {return 1;} if (($proto_filename eq "packet-scsi-osd.c") && (index($_[0], "scsi_osd2") >= 0)) {return 1;} if (($proto_filename eq "packet-sflow.c") && (index($_[0], "sflow_5") >= 0)) {return 1;} if (($proto_filename eq "packet-sflow.c") && (index($_[0], "sflow_245") >= 0)) {return 1;} if (($proto_filename eq "packet-tipc.c") && (index($_[0], "tipcv2") >= 0)) {return 1;} if (($proto_filename eq "packet-bluetooth.c") && (index($_[0], "llc.bluetooth_pid") >= 0)) {return 1;} return 0; } # --------------------------------------------------------------------- # # MAIN # GetOptions( 'showlineno' => \$showlinenoFlag, 'showautomated' => \$showautomatedFlag, ); while (<>) { if ($currfile !~ /$ARGV/) { &printprevfile(); # New file - reset array and state $filecount++; $currfile = $ARGV; #determine PROTABBREV for dissector based on file name format of (dirs)/packet-PROTABBREV.c or (dirs)/file-PROTABBREV.c $protabbrev_index = rindex($currfile, "packet-"); if ($protabbrev_index == -1) { $protabbrev_index = rindex($currfile, "file-"); if ($protabbrev_index == -1) { #ignore "non-dissector" files next; } $protabbrev = substr($currfile, $protabbrev_index+length("file-")); $protabbrev_index = rindex($protabbrev, "."); if ($protabbrev_index == -1) { print "$currfile doesn't fit format of file-PROTABBREV.c\n"; next; } } else { $protabbrev = substr($currfile, $protabbrev_index+length("packet-")); $protabbrev_index = rindex($protabbrev, "."); if ($protabbrev_index == -1) { print "$currfile doesn't fit format of packet-PROTABBREV.c\n"; next; } } $protabbrev = substr($protabbrev, 0, $protabbrev_index); $PFNAME_value = ""; $noregprotocol = 1; $automated = 0; $nofields = 0; $onefield = 0; $noperiod = 0; $linenumber = 1; %filters = ( ); %expert_filters = ( ); @protocols = ( ); @elements = ( ); @elements_dup = ( ); $state = "s_unknown"; } if (($automated == 0) && ($showautomatedFlag eq "")) { #DCERPC automated files if ($_ =~ "DO NOT EDIT") { push(@dcerpcautomatedfilelist, "$currfile\n"); $automated = 1; next; } #ASN.1 automated files elsif ($_ =~ "Generated automatically by the ASN.1 to Wireshark dissector compiler") { push(@asn1automatedfilelist, "$currfile\n"); $automated = 1; next; } #idl2wrs automated files elsif ($_ =~ "Autogenerated from idl2wrs") { push(@idl2wrsautomatedfilelist, "$currfile\n"); $automated = 1; next; } } # opening then closing comment if (/(.*?)\/\*.*\*\/(.*)/) { $comment = 0; $_ = "$1$2"; # closing then opening comment } elsif (/.*?\*\/(.*?)\/\*/) { $comment = 1; $_ = "$1"; # opening comment } elsif (/(.*?)\/\*/) { $comment = 1; $_ = "$1"; # closing comment } elsif (/\*\/(.*?)/) { $comment = 0; $_ = "$1"; } elsif ($comment == 1) { $linenumber++; next; } # unhandled: more than one complete comment per line chomp; #proto_register_protocol state machine $restofline = $_; $more_tokens = 1; #PFNAME is a popular #define for the proto filter name, so use it for testing if ($restofline =~ /#define\s*PFNAME\s*\"([^\"]*)\"/) { $PFNAME_value = $1; $debug>1 && print "PFNAME: '$1'\n"; } until ($more_tokens == 0) { if (($restofline =~ /proto_register_protocol\s*\((.*)/) || ($restofline =~ /proto_register_protocol_in_name_only\s*\((.*)/)) { $noregprotocol = 0; $restofline = $1; $state = "s_proto_start"; } elsif (($state eq "s_proto_start") && ($restofline =~ /^(\s*\"([^\"]*)\"\s*,)\s*(.*)/)) { $restofline = $3; $state = "s_proto_long_name"; $debug>1 && print "proto long name: '$2'\n"; } elsif (($state eq "s_proto_start") && ($restofline =~ /^(\s*(([\w\d])+)\s*,)\s*(.*)/)) { $restofline = $4; $state = "s_proto_long_name"; $debug>1 && print "proto long name: '$2'\n"; } elsif (($state eq "s_proto_long_name") && ($restofline =~ /^(\s*\"([^\"]*)\"\s*,)\s*(.*)/)) { $restofline = $3; $state = "s_proto_short_name"; $debug>1 && print "proto short name: '$2'\n"; } elsif (($state eq "s_proto_long_name") && ($restofline =~ /^(\s*(([\w\d])+)\s*,)\s*(.*)/)) { $restofline = $4; $state = "s_proto_short_name"; $debug>1 && print "proto short name: '$2'\n"; } elsif (($state eq "s_proto_short_name") && ($restofline =~ /\s*PFNAME\s*(.*)/)) { $more_tokens = 0; $state = "s_proto_filter_name"; if ((index($PFNAME_value, ".") != -1) && ($noperiod == 0)) { push(@periodinfilternamefilelist, "$currfile\n"); $noperiod = 1; } push(@protocols, $PFNAME_value); $debug>1 && print "proto filter name: '$PFNAME_value'\n"; } elsif (($state eq "s_proto_short_name") && ($restofline =~ /\s*\"([^\"]*)\"\s*(.*)/)) { $more_tokens = 0; $state = "s_proto_filter_name"; if ((index($1, ".") != -1) && ($noperiod == 0)) { push(@periodinfilternamefilelist, "$currfile\n"); $noperiod = 1; } push(@protocols, $1); $debug>1 && print "proto filter name: '$1'\n"; } elsif (($state eq "s_proto_short_name") && ($restofline =~ /\s*(([\w\d])+)\s*(.*)/)) { $more_tokens = 0; $state = "s_proto_filter_name"; $debug>1 && print "proto filter name: '$1'\n"; } else { $more_tokens = 0; } } #retrieving display filters state machine $restofline = $_; $more_tokens = 1; until ($more_tokens == 0) { if ($restofline =~ /\s*static\s*hf_register_info\s*(\w+)\[\](.*)/) { $restofline = $2; $state = "s_start"; $debug>1 && print "$linenumber $state\n"; } elsif ($restofline =~ /\s*static\s*ei_register_info\s*(\w+)\[\](.*)/) { $restofline = $2; $state = "s_start_expert"; $debug>1 && print "$linenumber $state\n"; } elsif (($state eq "s_start") && ($restofline =~ /\W+{(.*)/)) { $restofline = $1; $state = "s_in_hf_register_info"; $debug>1 && print "$linenumber $state\n"; } elsif (($state eq "s_in_hf_register_info") && ($restofline =~ /\W+{(.*)/)) { $restofline = $1; $state = "s_hf_register_info_entry"; $debug>1 && print "$linenumber $state\n"; $onefield = 1; } elsif (($state eq "s_in_hf_register_info") && ($restofline =~ /\s*};(.*)/)) { $restofline = $1; if ($onefield == 0) { $debug && print "$linenumber NO FIELDS!!!\n"; $nofields = 1; $state = "s_nofields"; $more_tokens = 0; } else { $state = "s_unknown"; } } elsif (($state eq "s_hf_register_info_entry") && ($restofline =~ /\s*&\s*(hf_\w*(\[w*\])?)\s*,?(.*)/)) { $restofline = $3; $debug>1 && print "$linenumber hf_register_info_entry: $1\n"; $state = "s_header_field_info_entry"; } elsif (($state eq "s_header_field_info_entry") && ($restofline =~ /\s*{(.*)/)) { $restofline = $1; $state = "s_header_field_info_entry_start"; $debug>1 && print "$linenumber $state\n"; } elsif (($state eq "s_header_field_info_entry_start") && ($restofline =~ /((\"([^\"]*)\")|(\w+))\s*,(.*)/)) { $restofline = $5; $debug>1 && print "$linenumber header_field_info_entry_name: $1\n"; $state = "s_header_field_info_entry_name"; } elsif (($state eq "s_header_field_info_entry_name") && ($restofline =~ /\"([^\"]*)\"\s*,?(.*)/)) { $restofline = $2; $debug>1 && print "$linenumber header_field_info_entry_abbrev: $1\n"; $state = "s_header_field_info_entry_abbrev"; $filters{$linenumber} = $1; } elsif (($state eq "s_header_field_info_entry_abbrev") && ($restofline =~ /[^}]*}(.*)/)) { $restofline = $1; $state = "s_header_field_info_entry_abbrev_end"; $debug>1 && print "$linenumber $state\n"; } elsif (($state eq "s_header_field_info_entry_abbrev_end") && ($restofline =~ /[^}]*}(.*)/)) { $restofline = $1; $state = "s_in_hf_register_info"; $debug>1 && print "$linenumber $state\n"; } elsif (($state eq "s_start_expert") && ($restofline =~ /\W+{(.*)/)) { $restofline = $1; $state = "s_in_ei_register_info"; $debug>1 && print "$linenumber $state\n"; } elsif (($state eq "s_in_ei_register_info") && ($restofline =~ /\W+{(.*)/)) { $restofline = $1; $state = "s_ei_register_info_entry"; $debug>1 && print "$linenumber $state\n"; } elsif (($state eq "s_in_ei_register_info") && ($restofline =~ /\s*};(.*)/)) { $restofline = $1; $state = "s_unknown"; } elsif (($state eq "s_ei_register_info_entry") && ($restofline =~ /\s*{(.*)/)) { $restofline = $1; $state = "s_ei_register_info_entry_start"; $debug>1 && print "$linenumber $state\n"; } elsif (($state eq "s_ei_register_info_entry_start") && ($restofline =~ /\"([^\"]*)\"\s*,(.*)/)) { $restofline = $2; $debug>1 && print "$linenumber ei_register_info_entry_abbrev: $1\n"; $expert_filters{$linenumber} = $1; $state = "s_ei_register_info_entry_abbrev_end"; } elsif (($state eq "s_ei_register_info_entry_abbrev_end") && ($restofline =~ /[^}]*}(.*)/)) { $restofline = $1; $state = "s_in_ei_register_info"; $debug>1 && print "$linenumber $state\n"; } else { $more_tokens = 0; } } $linenumber++; } &printprevfile(); if ($totalerrorcount > 0) { print "\n\nTOTAL ERRORS: $totalerrorcount"; if ($filecount > 1) { print " ($errorfilecount files)\n"; print "NO FIELDS: " . scalar(@nofieldfilelist) . "\n"; print "AUTOMATED: " . (scalar(@asn1automatedfilelist) + scalar(@dcerpcautomatedfilelist) + scalar(@idl2wrsautomatedfilelist)) . "\n"; print "NO PROTOCOL: " . scalar(@noregprotocolfilelist) . "\n"; print "\nASN.1 AUTOMATED FILE LIST\n"; foreach (@asn1automatedfilelist) { print $_; } print "\nDCE/RPC AUTOMATED FILE LIST\n"; foreach (@dcerpcautomatedfilelist) { print $_; } print "\nIDL2WRS AUTOMATED FILE LIST\n"; foreach (@idl2wrsautomatedfilelist) { print $_; } print "\n\"FILE MANIPULATION\" FILE LIST\n"; @uniquefilelist = grep{ not $unique{$_}++} @filemanipulationfilelist; foreach (@uniquefilelist) { print $_; } print "\nREMOVE PREFIX FILE LIST\n"; @uniquefilelist = grep{ not $unique{$_}++} @prefixfilelist; foreach (@uniquefilelist) { print $_; } print "\nNO PROTOCOL REGISTERED FILE LIST\n"; foreach (@noregprotocolfilelist) { print $_; } print "\nNO FIELDS FILE LIST\n"; foreach (@nofieldfilelist) { print $_; } print "\nPERIOD IN PROTO FILTER NAME FILE LIST\n"; foreach (@periodinfilternamefilelist) { print $_; } } else { print "\n"; } exit(1); # exit 1 if ERROR } __END__
Perl
wireshark/tools/checkhf.pl
#!/usr/bin/env perl # # Copyright 2013, William Meier (See AUTHORS file) # # Validate hf_... and ei_... usage for a dissector file; # # Usage: checkhf.pl [--debug=?] <file or files> # # Wireshark - Network traffic analyzer # By Gerald Combs <[email protected]> # Copyright 1998 Gerald Combs # # SPDX-License-Identifier: GPL-2.0-or-later # ## Note: This program is a re-implementation of the ## original checkhf.pl written and (C) by Joerg Mayer. ## The overall objective of the new implementation was to reduce ## the number of false positives which occurred with the ## original checkhf.pl ## ## This program can be used to scan original .c source files or source ## files which have been passed through a C pre-processor. ## Operating on pre-processed source files is optimal; There should be ## minimal false positives. ## If the .c input is an original source file there may very well be ## false positives/negatives due to the fact that the hf_... variables & etc ## may be created via macros. ## ## ----- (The following is extracted from the original checkhf.pl with thanks to Joerg) ------- ## Example: ## ~/work/wireshark/trunk/epan/dissectors> ../../tools/checkhf.pl packet-afs.c ## Unused entry: packet-afs.c, hf_afs_ubik_voteend ## Unused entry: packet-afs.c, hf_afs_ubik_errcode ## Unused entry: packet-afs.c, hf_afs_ubik_votetype ## ERROR: NO ARRAY: packet-afs.c, hf_afs_fs_ipaddr ## ## or checkhf.pl packet-*.c, which will check all the dissector files. ## ## NOTE: This tool currently generates false positives! ## ## The "NO ARRAY" messages - if accurate - points to an error that will ## cause (t|wire)shark to report a DISSECTOR_BUG when a packet containing ## this particular element is being dissected. ## ## The "Unused entry" message indicates the opposite: We define an entry but ## never use it (e.g., in a proto_...add... function). ## ------------------------------------------------------------------------------------ # ------------------------------------------------------------------------------------ # Main # # Logic: # 1. Clean the input: remove blank lines, comments, quoted strings and code under '#if 0'. # 2. hf_defs: # Find (and remove from input) list of hf_... variable # definitions ('static? g?int hf_... ;') # 2. hf_array_entries: # Find (and remove from input) list of hf_... variables # referenced in the hf[] entries; # 3. hf_usage: # From the remaining input, extract list of all strings of form hf_... # (which may include strings which are not actually valid # hf_... variable references). # 4. Checks: # If entries in hf_defs not in hf_usage then "unused" (for static hf_defs only) # If entries in hf_defs not in hf_array_entries then "ERROR: NO ARRAY"; use strict; use warnings; use Getopt::Long; my $help_flag = ''; my $debug = 0; # default: off; 1=cmt; 2=#if0; 3=hf_defs; 4=hf_array_entries; 5=hfusage (See code) my $sts = GetOptions( 'debug=i' => \$debug, 'help|?' => \$help_flag ); if (!$sts || $help_flag || !$ARGV[0]) { usage(); } my $error = 0; while (my $filename = $ARGV[0]) { shift; my ($file_contents); my (%hf_defs, %hf_static_defs, %hf_array_entries, %hf_usage); my ($unused_href, $no_array_href); my (%ei_defs, %ei_static_defs, %ei_array_entries, %ei_usage); my ($unused_ei, $no_array_ei); read_file(\$filename, \$file_contents); remove_comments (\$file_contents, $filename); remove_blank_lines (\$file_contents, $filename); $file_contents =~ s/^\s+//m; # Remove leading spaces remove_quoted_strings(\$file_contents, $filename); remove_if0_code (\$file_contents, $filename); find_remove_hf_defs (\$file_contents, $filename, \%hf_defs); find_remove_hf_array_entries (\$file_contents, $filename, \%hf_array_entries); find_remove_proto_get_id_hf_assignments(\$file_contents, $filename, \%hf_array_entries); find_hf_usage (\$file_contents, $filename, \%hf_usage); find_remove_ei_defs (\$file_contents, $filename, \%ei_defs); find_remove_ei_array_entries (\$file_contents, $filename, \%ei_array_entries); find_ei_usage (\$file_contents, $filename, \%ei_usage); # Tests (See above) # 1. Are all the static hf_defs and ei_defs entries in hf_usage and ei_usage? # if not: "Unused entry:" # # create a hash containing entries just for the static definitions @hf_static_defs{grep {$hf_defs{$_} == 0} keys %hf_defs} = (); # All values in the new hash will be undef @ei_static_defs{grep {$ei_defs{$_} == 0} keys %ei_defs} = (); # All values in the new hash will be undef $unused_href = diff_hash(\%hf_static_defs, \%hf_usage); remove_hf_pid_from_unused_if_add_oui_call(\$file_contents, $filename, $unused_href); $unused_ei = diff_hash(\%ei_static_defs, \%ei_usage); print_list("Unused href entry: $filename: ", $unused_href); print_list("Unused ei entry: $filename: ", $unused_ei); # 2. Are all the hf_defs and ei_ entries (static and global) in [hf|ei]_array_entries ? # (Note: if a static hf_def or ei is "unused", don't check for same in [hf|ei]_array_entries) # if not: "ERROR: NO ARRAY" ## Checking for missing global defs currently gives false positives ## So: only check static defs for now. ## $no_array_href = diff_hash(\%hf_defs, \%hf_array_entries); $no_array_href = diff_hash(\%hf_static_defs, \%hf_array_entries); $no_array_href = diff_hash($no_array_href, $unused_href); # Remove "unused" hf_... from no_array list $no_array_ei = diff_hash(\%ei_static_defs, \%ei_array_entries); $no_array_ei = diff_hash($no_array_ei, $unused_ei); # Remove "unused" ei_... from no_array list print_list("ERROR: NO ARRAY: $filename: ", $no_array_href); print_list("ERROR: NO ARRAY: $filename: ", $no_array_ei); if ((keys %{$no_array_href}) != 0) { $error += 1; } if ((keys %{$no_array_ei}) != 0) { $error += 1; } } exit (($error == 0) ? 0 : 1); # exit 1 if ERROR # --------------------------------------------------------------------- # sub usage { print "Usage: $0 [--debug=n] Filename [...]\n"; exit(1); } # --------------------------------------------------------------------- # action: read contents of a file to specified string # arg: filename_ref, file_contents_ref sub read_file { my ($filename_ref, $file_contents_ref) = @_; die "No such file: \"${$filename_ref}\"\n" if (! -e ${$filename_ref}); # delete leading './' ${$filename_ref} =~ s{ ^ [.] / } {}xmso; # Read in the file (ouch, but it's easier that way) open(my $fci, "<:crlf", ${$filename_ref}) || die("Couldn't open ${$filename_ref}"); ${$file_contents_ref} = do { local( $/ ) ; <$fci> } ; close($fci); return; } # --------------------------------------------------------------------- # action: Create a hash containing entries in 'a' that are not in 'b' # arg: a_href, b_href # returns: pointer to hash sub diff_hash { my ($a_href, $b_href) = @_; my %diffs; @diffs{grep {! exists $b_href->{$_}} keys %{$a_href}} = (); # All values in the new hash will be undef return \%diffs; } # --------------------------------------------------------------------- # action: print a list # arg: hdr, list_href sub print_list { my ($hdr, $list_href) = @_; print map {"$hdr$_\n"} sort keys %{$list_href}; return; } # ------------ # action: remove blank lines from input string # arg: code_ref, filename sub remove_blank_lines { my ($code_ref, $filename) = @_; ${$code_ref} =~ s{ ^ \s* \n ? } {}xmsog; return; } sub get_quoted_str_regex { # A regex which matches double-quoted strings. # 's' modifier added so that strings containing a 'line continuation' # ( \ followed by a new-line) will match. my $double_quoted_str = qr{ (?: ["] (?: \\. | [^\"\\\n])* ["]) }xmso; # A regex which matches single-quoted strings. my $single_quoted_str = qr{ (?: ['] (?: \\. | [^\'\\\n])* [']) }xmso; return qr{ $double_quoted_str | $single_quoted_str }xmso; } # ------------ # action: remove comments from input string # arg: code_ref, filename sub remove_comments { my ($code_ref, $filename) = @_; # The below Regexp is based on one from: # https://web.archive.org/web/20080614012925/http://aspn.activestate.com/ASPN/Cookbook/Rx/Recipe/59811 # It is in the public domain. # A complicated regex which matches C-style comments. my $c_comment_regex = qr{ / [*] [^*]* [*]+ (?: [^/*] [^*]* [*]+ )* / }xmso; ${$code_ref} =~ s{ $c_comment_regex } {}xmsog; # Remove single-line C++-style comments. Be careful not to break up strings # like "coap://", so match double quoted strings, single quoted characters, # division operator and other characters before the actual "//" comment. my $quoted_str = get_quoted_str_regex(); my $cpp_comment_regex = qr{ ^((?: $quoted_str | /(?!/) | [^'"/\n] )*) // .*$ }xm; ${$code_ref} =~ s{ $cpp_comment_regex } { $1 }xmg; ($debug == 1) && print "==> After Remove Comments: code: [$filename]\n${$code_ref}\n===<\n"; return; } # ------------ # action: remove quoted strings from input string # arg: code_ref, filename sub remove_quoted_strings { my ($code_ref, $filename) = @_; my $quoted_str = get_quoted_str_regex(); ${$code_ref} =~ s{ $quoted_str } {}xmsog; ($debug == 1) && print "==> After Remove quoted strings: code: [$filename]\n${$code_ref}\n===<\n"; return; } # ------------- # action: remove '#if 0'd code from the input string # args codeRef, fileName # returns: codeRef # # Essentially: split the input into blocks of code or lines of #if/#if 0/etc. # Remove blocks that follow '#if 0' until '#else/#endif' is found. { # block begin sub remove_if0_code { my ($codeRef, $fileName) = @_; # Preprocess outputput (ensure trailing LF and no leading WS before '#') $$codeRef =~ s/^\s*#/#/m; if ($$codeRef !~ /\n$/) { $$codeRef .= "\n"; } # Split into blocks of normal code or lines with conditionals. my $ifRegExp = qr/if 0|if|else|endif/; my @blocks = split(/^(#\s*(?:$ifRegExp).*\n)/m, $$codeRef); my ($if_lvl, $if0_lvl, $if0) = (0,0,0); my $lines = ''; for my $block (@blocks) { my $if; if ($block =~ /^#\s*($ifRegExp)/) { # #if/#if 0/#else/#endif processing $if = $1; if ($debug == 99) { print(STDERR "if0=$if0 if0_lvl=$if0_lvl lvl=$if_lvl [$if] - $block"); } if ($if eq 'if') { $if_lvl += 1; } elsif ($if eq 'if 0') { $if_lvl += 1; if ($if0_lvl == 0) { $if0_lvl = $if_lvl; $if0 = 1; # inside #if 0 } } elsif ($if eq 'else') { if ($if0_lvl == $if_lvl) { $if0 = 0; } } elsif ($if eq 'endif') { if ($if0_lvl == $if_lvl) { $if0 = 0; $if0_lvl = 0; } $if_lvl -= 1; if ($if_lvl < 0) { die "patsub: #if/#endif mismatch in $fileName" } } } if ($debug == 99) { print(STDERR "if0=$if0 if0_lvl=$if0_lvl lvl=$if_lvl\n"); } # Keep preprocessor lines and blocks that are not enclosed in #if 0 if ($if or $if0 != 1) { $lines .= $block; } } $$codeRef = $lines; ($debug == 2) && print "==> After Remove if0: code: [$fileName]\n$$codeRef\n===<\n"; return $codeRef; } } # block end # --------------------------------------------------------------------- # action: Add to hash an entry for each # 'static? g?int hf_...' definition (including array names) # in the input string. # The entry value will be 0 for 'static' definitions and 1 for 'global' definitions; # Remove each definition found from the input string. # args: code_ref, filename, hf_defs_href # returns: ref to the hash sub find_remove_hf_defs { my ($code_ref, $filename, $hf_defs_href) = @_; # Build pattern to match any of the following # static? g?int hf_foo = -1; # static? g?int hf_foo[xxx]; # static? g?int hf_foo[xxx] = { # p1: 'static? g?int hf_foo' my $p1_regex = qr{ ^ \s* (static \s+)? g?int \s+ (hf_[a-zA-Z0-9_]+) # hf_.. }xmso; # p2a: ' = -1;' my $p2a_regex = qr{ \s* = \s* (?: - \s* 1 ) \s* ; }xmso; # p2b: '[xxx];' or '[xxx] = {' my $p2b_regex = qr/ \s* \[ [^\]]+ \] \s* (?: = \s* [{] | ; ) /xmso; my $hf_def_regex = qr{ $p1_regex (?: $p2a_regex | $p2b_regex ) }xmso; while (${$code_ref} =~ m{ $hf_def_regex }xmsog) { #print ">%s< >$2<\n", (defined $1) ? $1 ; ""; $hf_defs_href->{$2} = (defined $1) ? 0 : 1; # 'static' if $1 is defined. } ($debug == 3) && debug_print_hash("VD: $filename", $hf_defs_href); # VariableDefinition # remove all ${$code_ref} =~ s{ $hf_def_regex } {}xmsog; ($debug == 3) && print "==> After remove hf_defs: code: [$filename]\n${$code_ref}\n===<\n"; return; } # --------------------------------------------------------------------- # action: Add to hash an entry (hf_...) for each hf[] entry. # Remove each hf[] entries found from the input string. # args: code_ref, filename, hf_array_entries_href sub find_remove_hf_array_entries { my ($code_ref, $filename, $hf_array_entries_href) = @_; # hf[] entry regex (to extract an hf_index_name and associated field type) my $hf_array_entry_regex = qr / [{] \s* & \s* ( [a-zA-Z0-9_]+ ) # &hf (?: \s* [[] [^]]+ []] # optional array ref ) ? \s* , \s* [{] [^}]+ , \s* (FT_[a-zA-Z0-9_]+) # field type \s* , [^}]+ , \s* (?: HFILL | HF_REF_TYPE_NONE ) [^}]* } [\s,]* [}] /xmso; # find all the hf[] entries (searching ${$code_ref}). while (${$code_ref} =~ m{ $hf_array_entry_regex }xmsog) { ($debug == 98) && print "+++ $1 $2\n"; $hf_array_entries_href->{$1} = undef; } ($debug == 4) && debug_print_hash("AE: $filename", $hf_array_entries_href); # ArrayEntry # now remove all ${$code_ref} =~ s{ $hf_array_entry_regex } {}xmsog; ($debug == 4) && print "==> After remove hf_array_entries: code: [$filename]\n${$code_ref}\n===<\n"; return; } # --------------------------------------------------------------------- # action: Add to hash an entry (hf_...) for each hf_... var # found in statements of the form: # 'hf_... = proto_registrar_get_id_byname ...' # 'hf_... = proto_get_id_by_filtername ...' # Remove each such statement found from the input string. # args: code_ref, filename, hf_array_entries_href sub find_remove_proto_get_id_hf_assignments { my ($code_ref, $filename, $hf_array_entries_href) = @_; my $_regex = qr{ ( hf_ [a-zA-Z0-9_]+ ) \s* = \s* (?: proto_registrar_get_id_byname | proto_get_id_by_filter_name ) }xmso; my @hfvars = ${$code_ref} =~ m{ $_regex }xmsog; if (@hfvars == 0) { return; } # found: # Sanity check: hf_vars shouldn't already be in hf_array_entries if (defined @$hf_array_entries_href{@hfvars}) { printf "? one or more of [@hfvars] initialized via proto_registrar_get_by_name() also in hf[] ??\n"; } # Now: add to hf_array_entries @$hf_array_entries_href{@hfvars} = (); ($debug == 4) && debug_print_hash("PR: $filename", $hf_array_entries_href); # remove from input (so not considered as 'usage') ${$code_ref} =~ s{ $_regex } {}xmsog; ($debug == 4) && print "==> After remove proto_registrar_by_name: code: [$filename]\n${$code_ref}\n===<\n"; return; } # --------------------------------------------------------------------- # action: Add to hash all hf_... strings remaining in input string. # arga: code_ref, filename, hf_usage_href # return: ref to hf_usage hash # # The hash will include *all* strings of form hf_... # which are in the input string (even strings which # aren't actually vars). # We don't care since we'll be checking only # known valid vars against these strings. sub find_hf_usage { my ($code_ref, $filename, $hf_usage_href) = @_; my $hf_usage_regex = qr{ \b ( hf_[a-zA-Z0-9_]+ ) # hf_... }xmso; while (${$code_ref} =~ m{ $hf_usage_regex }xmsog) { #print "$1\n"; $hf_usage_href->{$1} += 1; } ($debug == 5) && debug_print_hash("VU: $filename", $hf_usage_href); # VariableUsage return; } # --------------------------------------------------------------------- # action: Remove from 'unused' hash an instance of a variable named hf_..._pid # if the source has a call to llc_add_oui() or ieee802a_add_oui(). # (This is rather a bit of a hack). # arga: code_ref, filename, unused_href sub remove_hf_pid_from_unused_if_add_oui_call { my ($code_ref, $filename, $unused_href) = @_; if ((keys %{$unused_href}) == 0) { return; } my @hfvars = grep { m/ ^ hf_ [a-zA-Z0-9_]+ _pid $ /xmso} keys %{$unused_href}; if ((@hfvars == 0) || (@hfvars > 1)) { return; # if multiple unused hf_..._pid } if (${$code_ref} !~ m{ llc_add_oui | ieee802a_add_oui }xmso) { return; } # hf_...pid unused var && a call to ..._add_oui(); delete entry from unused # XXX: maybe hf_..._pid should really be added to hfUsed ? delete @$unused_href{@hfvars}; return; } # --------------------------------------------------------------------- # action: Add to hash an entry for each # 'static? expert_field ei_...' definition (including array names) # in the input string. # The entry value will be 0 for 'static' definitions and 1 for 'global' definitions; # Remove each definition found from the input string. # args: code_ref, filename, hf_defs_href # returns: ref to the hash sub find_remove_ei_defs { my ($code_ref, $filename, $ei_defs_eiref) = @_; # Build pattern to match any of the following # static? expert_field ei_foo = -1; # static? expert_field ei_foo[xxx]; # static? expert_field ei_foo[xxx] = { # p1: 'static? expert_field ei_foo' my $p1_regex = qr{ ^ (static \s+)? expert_field \s+ (ei_[a-zA-Z0-9_]+) # ei_.. }xmso; # p2a: ' = EI_INIT;' my $p2a_regex = qr{ \s* = \s* (?: EI_INIT ) \s* ; }xmso; # p2b: '[xxx];' or '[xxx] = {' my $p2b_regex = qr/ \s* \[ [^\]]+ \] \s* (?: = \s* [{] | ; ) /xmso; my $ei_def_regex = qr{ $p1_regex (?: $p2a_regex | $p2b_regex ) }xmso; while (${$code_ref} =~ m{ $ei_def_regex }xmsog) { #print ">%s< >$2<\n", (defined $1) ? $1 ; ""; $ei_defs_eiref->{$2} = (defined $1) ? 0 : 1; # 'static' if $1 is defined. } ($debug == 3) && debug_print_hash("VD: $filename", $ei_defs_eiref); # VariableDefinition # remove all ${$code_ref} =~ s{ $ei_def_regex } {}xmsog; ($debug == 3) && print "==> After remove ei_defs: code: [$filename]\n${$code_ref}\n===<\n"; return; } # --------------------------------------------------------------------- # action: Add to hash an entry (ei_...) for each ei[] entry. # Remove each ei[] entries found from the input string. # args: code_ref, filename, ei_array_entries_href sub find_remove_ei_array_entries { my ($code_ref, $filename, $ei_array_entries_eiref) = @_; # ei[] entry regex (to extract an ei_index_name and associated field type) my $ei_array_entry_regex = qr / { \s* & \s* ( [a-zA-Z0-9_]+ ) # &ei (?: \s* [ [^]]+ ] # optional array ref ) ? \s* , \s* { # \s* "[^"]+" # (filter string has been removed already) \s* , \s* PI_[A-Z0-9_]+ # event group \s* , \s* PI_[A-Z0-9_]+ # event severity \s* , [^,]* # description string (already removed) or NULL , \s* EXPFILL \s* } \s* } /xs; # find all the ei[] entries (searching ${$code_ref}). while (${$code_ref} =~ m{ $ei_array_entry_regex }xsg) { ($debug == 98) && print "+++ $1\n"; $ei_array_entries_eiref->{$1} = undef; } ($debug == 4) && debug_print_hash("AE: $filename", $ei_array_entries_eiref); # ArrayEntry # now remove all ${$code_ref} =~ s{ $ei_array_entry_regex } {}xmsog; ($debug == 4) && print "==> After remove ei_array_entries: code: [$filename]\n${$code_ref}\n===<\n"; return; } # --------------------------------------------------------------------- # action: Add to hash all ei_... strings remaining in input string. # arga: code_ref, filename, ei_usage_eiref # return: ref to ei_usage hash # # The hash will include *all* strings of form ei_... # which are in the input string (even strings which # aren't actually vars). # We don't care since we'll be checking only # known valid vars against these strings. sub find_ei_usage { my ($code_ref, $filename, $ei_usage_eiref) = @_; my $ei_usage_regex = qr{ \b ( ei_[a-zA-Z0-9_]+ ) # ei_... }xmso; while (${$code_ref} =~ m{ $ei_usage_regex }xmsog) { #print "$1\n"; $ei_usage_eiref->{$1} += 1; } ($debug == 5) && debug_print_hash("VU: $filename", $ei_usage_eiref); # VariableUsage return; } # --------------------------------------------------------------------- sub debug_print_hash { my ($title, $href) = @_; ##print "==> $title\n"; for my $k (sort keys %{$href}) { my $h = defined($href->{$k}) ? $href->{$k} : "undef"; printf "%-40.40s %5.5s %s\n", $title, $h, $k; } }
Python
wireshark/tools/checklicenses.py
#!/usr/bin/env python3 # Copyright (c) 2013 The Chromium Authors. All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause # """Makes sure that all files contain proper licensing information.""" import optparse import os.path import subprocess import sys def PrintUsage(): print("""Usage: python checklicenses.py [--root <root>] [tocheck] --root Specifies the repository root. This defaults to ".." relative to the script file. This will be correct given the normal location of the script in "<root>/tools". --ignore-suppressions Ignores path-specific allowed license. Useful when trying to remove a suppression/allowed entry. --list-allowed Print a list of allowed licenses and exit. tocheck Specifies the directory, relative to root, to check. This defaults to "." so it checks everything. Examples: python checklicenses.py python checklicenses.py --root ~/chromium/src third_party""") ALLOWED_LICENSES = [ 'BSD (1 clause)', 'BSD (2 clause)', 'BSD (2 clause) GPL (v2 or later)', 'BSD (3 clause)', 'GPL (v2 or later)', 'GPL (v3 or later) (with Bison parser exception)', 'ISC', 'ISC GPL (v2 or later)', 'LGPL (v2 or later)', 'LGPL (v2.1 or later)', 'MIT/X11 (BSD like)', 'Public domain', 'Public domain GPL (v2 or later)', 'Public domain MIT/X11 (BSD like)', 'zlib/libpng', 'zlib/libpng GPL (v2 or later)', ] PATH_SPECIFIC_ALLOWED_LICENSES = { 'caputils/airpcap.h': [ 'BSD-3-Clause', ], 'wsutil/strnatcmp.c': [ 'Zlib', ], 'wsutil/strnatcmp.h': [ 'Zlib', ], 'resources/protocols/dtds': [ 'UNKNOWN', ], 'resources/protocols/diameter/dictionary.dtd': [ 'UNKNOWN', ], 'resources/protocols/wimaxasncp/dictionary.dtd': [ 'UNKNOWN', ], 'doc/': [ 'UNKNOWN', ], 'docbook/custom_layer_chm.xsl': [ 'UNKNOWN', ], 'docbook/custom_layer_single_html.xsl': [ 'UNKNOWN', ], 'docbook/ws.css' : [ 'UNKNOWN' ], 'fix': [ 'UNKNOWN', ], 'wsutil/g711.c': [ 'UNKNOWN', ], 'packaging/macosx': [ 'UNKNOWN', ], 'epan/except.c': [ 'UNKNOWN', ], 'epan/except.h': [ 'UNKNOWN', ], # Generated header files by lex/lemon/whatever 'epan/dtd_grammar.h': [ 'UNKNOWN', ], 'epan/dfilter/grammar.h': [ 'UNKNOWN', ], 'epan/dfilter/grammar.c': [ 'UNKNOWN', ], 'epan/dissectors/packet-ieee80211-radiotap-iter.': [ # Using ISC license only 'ISC GPL (v2)' ], # Mentions BSD-3-clause twice due to embedding of code: 'epan/dissectors/packet-communityid.c': [ 'BSD (3 clause) BSD (3 clause)', ], 'plugins/mate/mate_grammar.h': [ 'UNKNOWN', ], 'vcs_version.h': [ 'UNKNOWN', ], # Special IDL license that appears to be compatible as far as I (not a # lawyer) can tell. See # https://www.wireshark.org/lists/wireshark-dev/201310/msg00234.html 'epan/dissectors/pidl/idl_types.h': [ 'UNKNOWN', ], # The following tools are under incompatible licenses (mostly GPLv3 or # GPLv3+), but this is OK since they are not actually linked into Wireshark 'tools/pidl': [ 'UNKNOWN', ], 'tools/lemon': [ 'UNKNOWN', ], 'tools/licensecheck.pl': [ 'GPL (v2)' ], '.gitlab/': [ 'UNKNOWN', ], 'wsutil/safe-math.h': [ # Public domain (CC0) 'UNKNOWN', ], } def check_licenses(options, args): if options.list_allowed: print('\n'.join(ALLOWED_LICENSES)) sys.exit(0) # Figure out which directory we have to check. if len(args) == 0: # No directory to check specified, use the repository root. start_dir = options.base_directory elif len(args) == 1: # Directory specified. Start here. It's supposed to be relative to the # base directory. start_dir = os.path.abspath(os.path.join(options.base_directory, args[0])) else: # More than one argument, we don't handle this. PrintUsage() return 1 print("Using base directory: %s" % options.base_directory) print("Checking: %s" % start_dir) print("") licensecheck_path = os.path.abspath(os.path.join(options.base_directory, 'tools', 'licensecheck.pl')) licensecheck = subprocess.Popen([licensecheck_path, '-l', '150', '-r', start_dir], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = licensecheck.communicate() stdout = stdout.decode('utf-8') stderr = stderr.decode('utf-8') if options.verbose: print('----------- licensecheck stdout -----------') print(stdout) print('--------- end licensecheck stdout ---------') if licensecheck.returncode != 0 or stderr: print('----------- licensecheck stderr -----------') print(stderr) print('--------- end licensecheck stderr ---------') print("\nFAILED\n") return 1 success = True exit_status = 0 for line in stdout.splitlines(): filename, license = line.split(':', 1) filename = os.path.relpath(filename.strip(), options.base_directory) # All files in the build output directory are generated one way or another. # There's no need to check them. if os.path.dirname(filename).startswith('build'): continue # For now we're just interested in the license. license = license.replace('*No copyright*', '').strip() # Skip generated files. if 'GENERATED FILE' in license: continue # Support files which provide a choice between licenses. if any(item in ALLOWED_LICENSES for item in license.split(';')): continue if not options.ignore_suppressions: found_path_specific = False for prefix in PATH_SPECIFIC_ALLOWED_LICENSES: if (filename.startswith(prefix) and license in PATH_SPECIFIC_ALLOWED_LICENSES[prefix]): found_path_specific = True break if found_path_specific: continue reason = "License '%s' for '%s' is not allowed." % (license, filename) success = False print(reason) exit_status = 1 if success: print("\nSUCCESS\n") return 0 else: print("\nFAILED\n") return exit_status def main(): default_root = os.path.abspath( os.path.join(os.path.dirname(__file__), '..')) option_parser = optparse.OptionParser() option_parser.add_option('--root', default=default_root, dest='base_directory', help='Specifies the repository root. This defaults ' 'to "../.." relative to the script file, which ' 'will normally be the repository root.') option_parser.add_option('-v', '--verbose', action='store_true', default=False, help='Print debug logging') option_parser.add_option('--list-allowed', action='store_true', default=False, help='Print a list of allowed licenses and exit.') option_parser.add_option('--ignore-suppressions', action='store_true', default=False, help='Ignore path-specific allowed license.') options, args = option_parser.parse_args() return check_licenses(options, args) if '__main__' == __name__: sys.exit(main())
Python
wireshark/tools/check_dissector.py
#!/usr/bin/env python3 # Wireshark - Network traffic analyzer # By Gerald Combs <[email protected]> # Copyright 1998 Gerald Combs # # SPDX-License-Identifier: GPL-2.0-or-later import sys import os import signal import argparse # Run battery of tests on one or more dissectors. # For text colouring/highlighting. class bcolors: HEADER = '\033[95m' OKBLUE = '\033[94m' OKGREEN = '\033[92m' ADDED = '\033[45m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' # Try to exit soon after Ctrl-C is pressed. should_exit = False def signal_handler(sig, frame): global should_exit should_exit = True print('You pressed Ctrl+C - exiting') signal.signal(signal.SIGINT, signal_handler) # Command-line args parser = argparse.ArgumentParser(description="Run gamut of tests on dissector(s)") parser.add_argument('--file', action='append', help='specify individual dissector file to test') parser.add_argument('--file-list', action='store', help='file with list of dissectors') parser.add_argument('--build-folder', action='store', help='build folder') args = parser.parse_args() if not args.file and not args.file_list: print('Need to specify --file or --file-list') exit(1) # TODO: verify build-folder if set. # Get list of files to check. dissectors = [] # Individually-selected files if args.file: for f in args.file: if not os.path.isfile(f): print('Chosen file', f, 'does not exist.') exit(1) else: dissectors.append(f) # List of dissectors stored in a file if args.file_list: if not os.path.isfile(args.file_list): print('Dissector-list file', args.file_list, 'does not exist.') exit(1) else: with open(args.file_list, 'r') as f: contents = f.read().splitlines() for f in contents: if not os.path.isfile(f): print('Chosen file', f, 'does not exist.') exit(1) else: dissectors.append(f) # Tools that should be run on selected files. # Boolean arg is for whether build-dir is needed in order to run it. # 3rd is Windows support. tools = [ ('tools/delete_includes.py --folder .', True, True), ('tools/check_spelling.py', False, True), ('tools/check_tfs.py', False, True), ('tools/check_typed_item_calls.py --all-checks', False, True), ('tools/check_static.py', True, False), ('tools/check_dissector_urls.py', False, True), ('tools/check_val_to_str.py', False, True), ('tools/cppcheck/cppcheck.sh', False, True), ('tools/checkhf.pl', False, True), ('tools/checkAPIs.pl', False, True), ('tools/fix-encoding-args.pl', False, True), ('tools/checkfiltername.pl', False, True) ] def run_check(tool, dissectors, python): # Create command-line with all dissectors included command = '' # Don't trust shebang on windows. if sys.platform.startswith('win'): if python: command += 'python.exe ' else: command += 'perl.exe ' command += tool[0] if tool[1]: command += ' --build-folder ' + args.build_folder for d in dissectors: # Add this dissector file to command-line args command += ((' --file' if python else '') + ' ' + d) # Run it print(bcolors.BOLD + command + bcolors.ENDC) os.system(command) # Run all checks on all of my dissectors. for tool in tools: if should_exit: exit(1) if ((not sys.platform.startswith('win') or tool[2]) and # Supported on this platform? (not tool[1] or (tool[1] and args.build_folder))): # Have --build-folder if needed? # Run it. run_check(tool, dissectors, tool[0].find('.py') != -1)
Python
wireshark/tools/check_dissector_urls.py
#!/usr/bin/env python3 # Wireshark - Network traffic analyzer # By Gerald Combs <[email protected]> # Copyright 1998 Gerald Combs # # SPDX-License-Identifier: GPL-2.0-or-later import argparse import aiohttp import asyncio import os import re import shutil import signal import subprocess # This utility scans the dissector code for URLs, then attempts to # fetch the links. The results are shown in stdout, but also, at # the end of the run, written to files: # - URLs that couldn't be loaded are written to failures.txt # - working URLs are written to successes.txt # - any previous failures.txt is also copied to failures_last_run.txt # # N.B. preferred form of RFC link is e.g., https://tools.ietf.org/html/rfc4349 # TODO: # - option to write back to dissector file when there is a failure? # - optionally parse previous/recent successes.txt and avoid fetching them again? # - make sure URLs are really within comments in code? # - use urllib.parse or similar to better check URLs? # - improve regex to allow '+' in URL (like confluence uses) # Try to exit soon after Ctrl-C is pressed. should_exit = False def signal_handler(sig, frame): global should_exit should_exit = True print('You pressed Ctrl+C - exiting') try: tasks = asyncio.all_tasks() except (RuntimeError): # we haven't yet started the async link checking, we can exit directly exit(1) # ignore further SIGINTs while we're cancelling the running tasks signal.signal(signal.SIGINT, signal.SIG_IGN) for t in tasks: t.cancel() signal.signal(signal.SIGINT, signal_handler) class FailedLookup: def __init__(self): # Fake values that will be queried (for a requests.get() return value) self.status = 0 self.headers = {} self.headers['content-type'] = '<NONE>' def __str__(self): s = ('FailedLookup: status=' + str(self.status) + ' content-type=' + self.headers['content-type']) return s # Dictionary from url -> result cached_lookups = {} class Link(object): def __init__(self, file, line_number, url): self.file = file self.line_number = line_number self.url = url self.tested = False self.r = None self.success = False def __str__(self): epan_idx = self.file.find('epan') if epan_idx == -1: filename = self.file else: filename = self.file[epan_idx:] s = ('SUCCESS ' if self.success else 'FAILED ') + \ filename + ':' + str(self.line_number) + ' ' + self.url if True: # self.r: if self.r.status: s += " status-code=" + str(self.r.status) if 'content-type' in self.r.headers: s += (' content-type="' + self.r.headers['content-type'] + '"') else: s += ' <No response Received>' return s def validate(self): global cached_lookups global should_exit if should_exit: return self.tested = True if self.url in cached_lookups: self.r = cached_lookups[self.url] else: self.r = FailedLookup() if self.r.status < 200 or self.r.status >= 300: self.success = False else: self.success = True if (args.verbose or not self.success) and not should_exit: print(self) links = [] files = [] all_urls = set() def find_links_in_file(filename): with open(filename, 'r', encoding="utf8") as f: for line_number, line in enumerate(f, start=1): # TODO: not matching # https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol urls = re.findall( r'https?://(?:[a-zA-Z0-9./_?&=-]+|%[0-9a-fA-F]{2})+', line) for url in urls: # Lop off any trailing chars that are not part of it url = url.rstrip(").',") # A url must have a period somewhere if '.' not in url: continue global links, all_urls links.append(Link(filename, line_number, url)) all_urls.add(url) # Scan the given folder for links to test. def find_links_in_folder(folder): # Look at files in sorted order, to give some idea of how far through it # is. for filename in sorted(os.listdir(folder)): if filename.endswith('.c'): global links find_links_in_file(os.path.join(folder, filename)) async def populate_cache(sem, session, url): global cached_lookups if should_exit: return async with sem: try: async with session.get(url) as r: cached_lookups[url] = r if args.verbose: print('checking ', url, ': success', sep='') except (asyncio.CancelledError, ValueError, ConnectionError, Exception): cached_lookups[url] = FailedLookup() if args.verbose: print('checking ', url, ': failed', sep='') async def check_all_links(links): sem = asyncio.Semaphore(50) timeout = aiohttp.ClientTimeout(total=25) connector = aiohttp.TCPConnector(limit=30) headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'} async with aiohttp.ClientSession(connector=connector, headers=headers, timeout=timeout) as session: tasks = [populate_cache(sem, session, u) for u in all_urls] try: await asyncio.gather(*tasks) except (asyncio.CancelledError): await session.close() for l in links: l.validate() ################################################################# # Main logic. # command-line args. Controls which dissector files should be scanned. # If no args given, will just scan epan/dissectors folder. parser = argparse.ArgumentParser(description='Check URL links in dissectors') parser.add_argument('--file', action='append', help='specify individual dissector file to test') parser.add_argument('--commits', action='store', help='last N commits to check') parser.add_argument('--open', action='store_true', help='check open files') parser.add_argument('--verbose', action='store_true', help='when enabled, show more output') args = parser.parse_args() def is_dissector_file(filename): p = re.compile(r'epan/dissectors/packet-.*\.c') return p.match(filename) # Get files from wherever command-line args indicate. if args.file: # Add specified file(s) for f in args.file: if not f.startswith('epan'): f = os.path.join('epan', 'dissectors', f) if not os.path.isfile(f): print('Chosen file', f, 'does not exist.') exit(1) else: files.append(f) find_links_in_file(f) elif args.commits: # Get files affected by specified number of commits. command = ['git', 'diff', '--name-only', 'HEAD~' + args.commits] files = [f.decode('utf-8') for f in subprocess.check_output(command).splitlines()] # Fetch links from files (dissectors files only) files = list(filter(is_dissector_file, files)) for f in files: find_links_in_file(f) elif args.open: # Unstaged changes. command = ['git', 'diff', '--name-only'] files = [f.decode('utf-8') for f in subprocess.check_output(command).splitlines()] files = list(filter(is_dissector_file, files)) # Staged changes. command = ['git', 'diff', '--staged', '--name-only'] files_staged = [f.decode('utf-8') for f in subprocess.check_output(command).splitlines()] files_staged = list(filter(is_dissector_file, files_staged)) for f in files: find_links_in_file(f) for f in files_staged: if f not in files: find_links_in_file(f) files.append(f) else: # Find links from dissector folder. find_links_in_folder(os.path.join(os.path.dirname( __file__), '..', 'epan', 'dissectors')) # If scanning a subset of files, list them here. print('Examining:') if args.file or args.commits or args.open: if files: print(' '.join(files), '\n') else: print('No files to check.\n') else: print('All dissector modules\n') asyncio.run(check_all_links(links)) # Write failures to a file. Back up any previous first though. if os.path.exists('failures.txt'): shutil.copyfile('failures.txt', 'failures_last_run.txt') with open('failures.txt', 'w') as f_f: for l in links: if l.tested and not l.success: f_f.write(str(l) + '\n') # And successes with open('successes.txt', 'w') as f_s: for l in links: if l.tested and l.success: f_s.write(str(l) + '\n') # Count and show overall stats. passed, failed = 0, 0 for l in links: if l.tested: if l.success: passed += 1 else: failed += 1 print('--------------------------------------------------------------------------------------------------') print(len(links), 'links checked: ', passed, 'passed,', failed, 'failed')
Python
wireshark/tools/check_help_urls.py
#!/usr/bin/env python3 # Wireshark - Network traffic analyzer # By Gerald Combs <[email protected]> # Copyright 1998 Gerald Combs # # SPDX-License-Identifier: GPL-2.0-or-later ''' Go through all user guide help URLs listed in the program and confirm these are present in the User's Guide source files. ''' from re import search from glob import glob from sys import exit found = {} with open("ui/help_url.c") as f: for line in f: if url := search(r"user_guide_url\(\"(.*).html\"\);", line): chapter = url.group(1) found[chapter] = False adoc_files = glob("docbook/wsug_src/*.adoc") for adoc_file in adoc_files: with open(adoc_file) as f: for line in f: if tag := search(r"^\[\#(.*)]", line): chapter = tag.group(1) if chapter in found: found[chapter] = True missing = False for chapter in found: if not found[chapter]: if not missing: print("The following chapters are missing in the User's Guide:") missing = True print(chapter) if missing: exit(-1)
Python
wireshark/tools/check_spelling.py
#!/usr/bin/env python3 # Wireshark - Network traffic analyzer # By Gerald Combs <[email protected]> # Copyright 1998 Gerald Combs # # SPDX-License-Identifier: GPL-2.0-or-later import os import sys import re import subprocess import argparse import signal from collections import Counter # Looks for spelling errors among strings found in source or documentation files. # N.B. To run this script, you should install pyspellchecker (not spellchecker) using pip. # TODO: check structured doxygen comments? # For text colouring/highlighting. class bcolors: HEADER = '\033[95m' OKBLUE = '\033[94m' OKGREEN = '\033[92m' ADDED = '\033[45m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' # Try to exit soon after Ctrl-C is pressed. should_exit = False def signal_handler(sig, frame): global should_exit should_exit = True print('You pressed Ctrl+C - exiting') signal.signal(signal.SIGINT, signal_handler) # Create spellchecker, and augment with some Wireshark words. from spellchecker import SpellChecker # Set up our dict with words from text file. spell = SpellChecker() spell.word_frequency.load_text_file('./tools/wireshark_words.txt') # Track words that were not found. missing_words = [] # Split camelCase string into separate words. def camelCaseSplit(identifier): matches = re.finditer(r'.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', identifier) return [m.group(0) for m in matches] # A File object contains all of the strings to be checked for a given file. class File: def __init__(self, file): self.file = file self.values = [] filename, extension = os.path.splitext(file) self.code_file = extension in {'.c', '.cpp'} with open(file, 'r', encoding="utf8") as f: contents = f.read() if self.code_file: # Remove comments so as not to trip up RE. contents = removeComments(contents) # Find protocol name and add to dict. # N.B. doesn't work when a variable is used instead of a literal for the protocol name... matches = re.finditer(r'proto_register_protocol\s*\([\n\r\s]*\"(.*)\",[\n\r\s]*\"(.*)\",[\n\r\s]*\"(.*)\"', contents) for m in matches: protocol = m.group(3) # Add to dict. spell.word_frequency.load_words([protocol]) spell.known([protocol]) print('Protocol is: ' + bcolors.BOLD + protocol + bcolors.ENDC) # Add a string found in this file. def add(self, value): self.values.append(value.encode('utf-8') if sys.platform.startswith('win') else value) # Whole word is not recognised, but is it 2 words concatenated (without camelcase) ? def checkMultiWords(self, word): if len(word) < 6: return False # Don't consider if mixed cases. if not (word.islower() or word.isupper()): # But make an exception if only the fist letter is uppercase.. if not word == (word[0].upper() + word[1:]): return False # Try splitting into 2 words recognised at various points. # Allow 3-letter words. length = len(word) for idx in range(3, length-3): word1 = word[0:idx] word2 = word[idx:] if not spell.unknown([word1, word2]): return True return self.checkMultiWordsRecursive(word) # If word before 'id' is recognised, accept word. def wordBeforeId(self, word): if word.lower().endswith('id'): if not spell.unknown([word[0:len(word)-2]]): return True else: return False def checkMultiWordsRecursive(self, word): length = len(word) #print('word=', word) if length < 4: return False for idx in range(4, length+1): w = word[0:idx] if not spell.unknown([w]): if idx == len(word): return True else: if self.checkMultiWordsRecursive(word[idx:]): return True return False def numberPlusUnits(self, word): m = re.search(r'^([0-9]+)([a-zA-Z]+)$', word) if m: if m.group(2).lower() in { "bit", "bits", "gb", "kbps", "gig", "mb", "th", "mhz", "v", "hz", "k", "mbps", "m", "g", "ms", "nd", "nds", "rd", "kb", "kbit", "ghz", "khz", "km", "ms", "usec", "sec", "gbe", "ns", "ksps", "qam", "mm" }: return True return False # Check the spelling of all the words we have found def spellCheck(self): num_values = len(self.values) for value_index,v in enumerate(self.values): if should_exit: exit(1) v = str(v) # Ignore includes. if v.endswith('.h'): continue # Store original (as want to include for context in error report). original = str(v) # Replace most punctuation with spaces, and eliminate common format specifiers. v = v.replace('.', ' ') v = v.replace(',', ' ') v = v.replace('`', ' ') v = v.replace(':', ' ') v = v.replace(';', ' ') v = v.replace('"', ' ') v = v.replace('\\', ' ') v = v.replace('+', ' ') v = v.replace('|', ' ') v = v.replace('(', ' ') v = v.replace(')', ' ') v = v.replace('[', ' ') v = v.replace(']', ' ') v = v.replace('{', ' ') v = v.replace('}', ' ') v = v.replace('<', ' ') v = v.replace('>', ' ') v = v.replace('_', ' ') v = v.replace('-', ' ') v = v.replace('/', ' ') v = v.replace('!', ' ') v = v.replace('?', ' ') v = v.replace('=', ' ') v = v.replace('*', ' ') v = v.replace('%', ' ') v = v.replace('#', ' ') v = v.replace('&', ' ') v = v.replace('@', ' ') v = v.replace('$', ' ') v = v.replace("'", ' ') v = v.replace('"', ' ') v = v.replace('%u', '') v = v.replace('%d', '') v = v.replace('%s', '') # Split into words. value_words = v.split() # Further split up any camelCase words. words = [] for w in value_words: words += camelCaseSplit(w) # Check each word within this string in turn. for word in words: # Strip trailing digits from word. word = word.rstrip('1234567890') # Quote marks found in some of the docs... word = word.replace('“', '') word = word.replace('”', '') if self.numberPlusUnits(word): continue if len(word) > 4 and spell.unknown([word]) and not self.checkMultiWords(word) and not self.wordBeforeId(word): print(self.file, value_index, '/', num_values, '"' + original + '"', bcolors.FAIL + word + bcolors.ENDC, ' -> ', '?') # TODO: this can be interesting, but takes too long! # bcolors.OKGREEN + spell.correction(word) + bcolors.ENDC global missing_words missing_words.append(word) def removeWhitespaceControl(code_string): code_string = code_string.replace('\\n', ' ') code_string = code_string.replace('\\r', ' ') code_string = code_string.replace('\\t', ' ') return code_string # Remove any contractions from the given string. def removeContractions(code_string): contractions = [ "wireshark’s", "don’t", "let’s", "isn’t", "won’t", "user’s", "hasn’t", "you’re", "o’clock", "you’ll", "you’d", "developer’s", "doesn’t", "what’s", "let’s", "haven’t", "can’t", "you’ve", "shouldn’t", "didn’t", "wouldn’t", "aren’t", "there’s", "packet’s", "couldn’t", "world’s", "needn’t", "graph’s", "table’s", "parent’s", "entity’s", "server’s", "node’s", "querier’s", "sender’s", "receiver’s", "computer’s", "frame’s", "vendor’s", "system’s", "we’ll", "asciidoctor’s", "protocol’s", "microsoft’s", "wasn’t" ] for c in contractions: code_string = code_string.replace(c, "") code_string = code_string.replace(c.capitalize(), "") code_string = code_string.replace(c.replace('’', "'"), "") code_string = code_string.replace(c.capitalize().replace('’', "'"), "") return code_string def removeComments(code_string): code_string = re.sub(re.compile(r"/\*.*?\*/", re.DOTALL), "" , code_string) # C-style comment # Avoid matching // where it is allowed, e.g., https://www... or file:///... code_string = re.sub(re.compile(r"(?<!:)(?<!/)(?<!\")(?<!\"\s\s)(?<!file:/)//.*?\n" ) ,"" , code_string) # C++-style comment return code_string def removeSingleQuotes(code_string): code_string = code_string.replace('\\\\', " ") # Separate at \\ code_string = code_string.replace('\"\\\\\"', "") code_string = code_string.replace("\\\"", " ") code_string = code_string.replace("'\"'", "") code_string = code_string.replace('…', ' ') return code_string def removeHexSpecifiers(code_string): # Find all hex numbers looking = True while looking: m = re.search(r'(0x[0-9a-fA-F]*)', code_string) if m: code_string = code_string.replace(m.group(0), "") else: looking = False return code_string # Create a File object that knows about all of the strings in the given file. def findStrings(filename): with open(filename, 'r', encoding="utf8") as f: contents = f.read() # Remove comments & embedded quotes so as not to trip up RE. contents = removeContractions(contents) contents = removeWhitespaceControl(contents) contents = removeSingleQuotes(contents) contents = removeHexSpecifiers(contents) # Create file object. file = File(filename) # What we check depends upon file type. if file.code_file: contents = removeComments(contents) # Code so only checking strings. matches = re.finditer(r'\"([^\"]*)\"', contents) for m in matches: file.add(m.group(1)) else: # A documentation file, so examine all words. for w in contents.split(): file.add(w) return file # Test for whether the given file was automatically generated. def isGeneratedFile(filename): # Check file exists - e.g. may have been deleted in a recent commit. if not os.path.exists(filename): return False if not filename.endswith('.c'): return False # This file is generated, but notice is further in than want to check for all files if filename.endswith('pci-ids.c') or filename.endswith('services-data.c') or filename.endswith('manuf-data.c'): return True # Open file f_read = open(os.path.join(filename), 'r', encoding="utf8") for line_no,line in enumerate(f_read): # The comment to say that its generated is near the top, so give up once # get a few lines down. if line_no > 10: f_read.close() return False if (line.find('Generated automatically') != -1 or line.find('Autogenerated from') != -1 or line.find('is autogenerated') != -1 or line.find('automatically generated by Pidl') != -1 or line.find('Created by: The Qt Meta Object Compiler') != -1 or line.find('This file was generated') != -1 or line.find('This filter was automatically generated') != -1 or line.find('This file is auto generated, do not edit!') != -1 or line.find('this file is automatically generated') != -1): f_read.close() return True # OK, looks like a hand-written file! f_read.close() return False def isAppropriateFile(filename): file, extension = os.path.splitext(filename) if filename.find('CMake') != -1: return False return extension in { '.adoc', '.c', '.cpp', '.pod', '.nsi', '.txt'} or file.endswith('README') def findFilesInFolder(folder, recursive=True): files_to_check = [] if recursive: for root, subfolders, files in os.walk(folder): for f in files: if should_exit: return f = os.path.join(root, f) if isAppropriateFile(f) and not isGeneratedFile(f): files_to_check.append(f) else: for f in sorted(os.listdir(folder)): f = os.path.join(folder, f) if isAppropriateFile(f) and not isGeneratedFile(f): files_to_check.append(f) return files_to_check # Check the given file. def checkFile(filename): # Check file exists - e.g. may have been deleted in a recent commit. if not os.path.exists(filename): print(filename, 'does not exist!') return file = findStrings(filename) file.spellCheck() ################################################################# # Main logic. # command-line args. Controls which files should be checked. # If no args given, will just scan epan/dissectors folder. parser = argparse.ArgumentParser(description='Check spellings in specified files') parser.add_argument('--file', action='append', help='specify individual file to test') parser.add_argument('--folder', action='store', default='', help='specify folder to test') parser.add_argument('--no-recurse', action='store_true', default='', help='do not recurse inside chosen folder') parser.add_argument('--commits', action='store', help='last N commits to check') parser.add_argument('--open', action='store_true', help='check open files') args = parser.parse_args() # Get files from wherever command-line args indicate. files = [] if args.file: # Add specified file(s) for f in args.file: if not os.path.isfile(f): print('Chosen file', f, 'does not exist.') exit(1) else: files.append(f) elif args.commits: # Get files affected by specified number of commits. command = ['git', 'diff', '--name-only', 'HEAD~' + args.commits] files = [f.decode('utf-8') for f in subprocess.check_output(command).splitlines()] # Filter files files = list(filter(lambda f : os.path.exists(f) and isAppropriateFile(f) and not isGeneratedFile(f), files)) elif args.open: # Unstaged changes. command = ['git', 'diff', '--name-only'] files = [f.decode('utf-8') for f in subprocess.check_output(command).splitlines()] # Filter files. files = list(filter(lambda f : isAppropriateFile(f) and not isGeneratedFile(f), files)) # Staged changes. command = ['git', 'diff', '--staged', '--name-only'] files_staged = [f.decode('utf-8') for f in subprocess.check_output(command).splitlines()] # Filter files. files_staged = list(filter(lambda f : isAppropriateFile(f) and not isGeneratedFile(f), files_staged)) for f in files_staged: if not f in files: files.append(f) else: # By default, scan dissectors directory folder = os.path.join('epan', 'dissectors') # But overwrite with any folder entry. if args.folder: folder = args.folder if not os.path.isdir(folder): print('Folder', folder, 'not found!') exit(1) # Find files from folder. print('Looking for files in', folder) files = findFilesInFolder(folder, not args.no_recurse) # If scanning a subset of files, list them here. print('Examining:') if args.file or args.folder or args.commits or args.open: if files: print(' '.join(files), '\n') else: print('No files to check.\n') else: print('All dissector modules\n') # Now check the chosen files. for f in files: # Check this file. checkFile(f) # But get out if control-C has been pressed. if should_exit: exit(1) # Show the most commonly not-recognised words. print('') counter = Counter(missing_words).most_common(100) if len(counter) > 0: for c in counter: print(c[0], ':', c[1]) # Show error count. print('\n' + bcolors.BOLD + str(len(missing_words)) + ' issues found' + bcolors.ENDC + '\n')
Python
wireshark/tools/check_static.py
#!/usr/bin/env python3 # Wireshark - Network traffic analyzer # By Gerald Combs <[email protected]> # Copyright 1998 Gerald Combs # # SPDX-License-Identifier: GPL-2.0-or-later import os import re import subprocess import argparse import signal # Look for dissector symbols that could/should be static. # This will not run on Windows, unless/until we check the platform # and use (I think) dumpbin.exe # Try to exit soon after Ctrl-C is pressed. should_exit = False def signal_handler(sig, frame): global should_exit should_exit = True print('You pressed Ctrl+C - exiting') signal.signal(signal.SIGINT, signal_handler) # Allow this as a default build folder name... build_folder = os.getcwd() + '-build' # Record which symbols are referred to (by a set of files). class CalledSymbols: def __init__(self): self.referred = set() def addCalls(self, file): # Make sure that file is built. last_dir = os.path.split(os.path.dirname(file))[-1] if file.find('ui/cli') != -1: # A tshark target-only file object_file = os.path.join(build_folder, 'CMakeFiles', ('tshark' + '.dir'), file + '.o') elif file.find('ui/qt') != -1: object_file = os.path.join(build_folder, os.path.dirname(file), 'CMakeFiles', ('qtui' + '.dir'), os.path.basename(file) + '.o') else: if file.endswith('dissectors.c'): object_file = os.path.join(build_folder, os.path.dirname(file), 'CMakeFiles', 'dissector-registration' + '.dir', os.path.basename(file) + '.o') else: object_file = os.path.join(build_folder, os.path.dirname(file), 'CMakeFiles', last_dir + '.dir', os.path.basename(file) + '.o') if not os.path.exists(object_file): #print('Warning -', object_file, 'does not exist') return command = ['nm', object_file] for f in subprocess.check_output(command).splitlines(): l = str(f)[2:-1] # Lines might or might not have an address before letter and symbol. p1 = re.compile(r'[0-9a-f]* ([a-zA-Z]) (.*)') p2 = re.compile(r'[ ]* ([a-zA-Z]) (.*)') m = p1.match(l) if not m: m = p2.match(l) if m: letter = m.group(1) function_name = m.group(2) # Only interested in undefined references to symbols. if letter == 'U': self.referred.add(function_name) # Record which symbols are defined in a single file. class DefinedSymbols: def __init__(self, file): self.filename = file self.global_dict = {} self.header_file_contents = None # Make sure that file is built. object_file = os.path.join(build_folder, 'epan', 'dissectors', 'CMakeFiles', 'dissectors.dir', os.path.basename(file) + '.o') if not os.path.exists(object_file): #print('Warning -', object_file, 'does not exist') return header_file= file.replace('.c', '.h') try: f = open(header_file, 'r') self.header_file_contents = f.read() except IOError: pass command = ['nm', object_file] for f in subprocess.check_output(command).splitlines(): # Line consists of whitespace, [address], letter, symbolName l = str(f)[2:-1] p = re.compile(r'[0-9a-f]* ([a-zA-Z]) (.*)') m = p.match(l) if m: letter = m.group(1) function_name = m.group(2) # globally-defined symbols. Would be 't' or 'd' if already static. if letter in 'TD': self.add(function_name, l) def add(self, letter, function_name): self.global_dict[letter] = function_name def mentionedInHeaders(self, symbol): if self.header_file_contents: if self.header_file_contents.find(symbol) != -1: return True # Also check some of the 'common' header files that don't match the dissector file name. # TODO: could cache the contents of these files, but it's not that slow. common_mismatched_headers = [ os.path.join('epan', 'dissectors', 'packet-ncp-int.h'), os.path.join('epan', 'dissectors', 'packet-mq.h'), os.path.join('epan', 'dissectors', 'packet-ip.h'), os.path.join('epan', 'dissectors', 'packet-gsm_a_common.h'), os.path.join('epan', 'dissectors', 'packet-epl.h'), os.path.join('epan', 'dissectors', 'packet-bluetooth.h'), os.path.join('epan', 'dissectors', 'packet-dcerpc.h'), os.path.join('epan', 'ip_opts.h'), os.path.join('epan', 'eap.h')] for hf in common_mismatched_headers: try: f = open(hf) contents = f.read() if contents.find(symbol) != -1: return True except EnvironmentError: pass return False def check(self, called_symbols): global issues_found for f in self.global_dict: if not f in called_symbols: mentioned_in_header = self.mentionedInHeaders(f) fun = self.global_dict[f] print(self.filename, '(' + fun + ')', 'is not referred to so could be static?', '(in header)' if mentioned_in_header else '') issues_found += 1 # Helper functions. def isDissectorFile(filename): p = re.compile(r'(packet|file)-.*\.c') return p.match(filename) # Test for whether the given dissector file was automatically generated. def isGeneratedFile(filename): # Check file exists - e.g. may have been deleted in a recent commit. if not os.path.exists(filename): return False if not filename.endswith('.c'): return False # Open file f_read = open(os.path.join(filename), 'r') lines_tested = 0 for line in f_read: # The comment to say that its generated is near the top, so give up once # get a few lines down. if lines_tested > 10: f_read.close() return False if (line.find('Generated automatically') != -1 or line.find('Autogenerated from') != -1 or line.find('is autogenerated') != -1 or line.find('automatically generated by Pidl') != -1 or line.find('Created by: The Qt Meta Object Compiler') != -1 or line.find('This file was generated') != -1 or line.find('This filter was automatically generated') != -1): f_read.close() return True lines_tested = lines_tested + 1 # OK, looks like a hand-written file! f_read.close() return False def findDissectorFilesInFolder(folder, include_generated): # Look at files in sorted order, to give some idea of how far through is. tmp_files = [] for f in sorted(os.listdir(folder)): if should_exit: return if isDissectorFile(f): if include_generated or not isGeneratedFile(os.path.join('epan', 'dissectors', f)): filename = os.path.join(folder, f) tmp_files.append(filename) return tmp_files def findFilesInFolder(folder): # Look at files in sorted order, to give some idea of how far through is. tmp_files = [] for f in sorted(os.listdir(folder)): if should_exit: return if f.endswith('.c') or f.endswith('.cpp'): filename = os.path.join(folder, f) tmp_files.append(filename) return tmp_files def is_dissector_file(filename): p = re.compile(r'.*packet-.*\.c') return p.match(filename) issues_found = 0 ################################################################# # Main logic. # command-line args. Controls which dissector files should be checked. # If no args given, will just scan epan/dissectors folder. parser = argparse.ArgumentParser(description='Check calls in dissectors') parser.add_argument('--build-folder', action='store', default='', help='build folder', required=False) parser.add_argument('--file', action='append', help='specify individual dissector file to test') parser.add_argument('--commits', action='store', help='last N commits to check') parser.add_argument('--open', action='store_true', help='check open files') args = parser.parse_args() # Get files from wherever command-line args indicate. files = [] if args.build_folder: build_folder = args.build_folder if args.file: # Add specified file(s) for f in args.file: if not f.startswith('epan'): f = os.path.join('epan', 'dissectors', f) if not os.path.isfile(f): print('Chosen file', f, 'does not exist.') exit(1) else: files.append(f) elif args.commits: # Get files affected by specified number of commits. command = ['git', 'diff', '--name-only', 'HEAD~' + args.commits] files = [f.decode('utf-8') for f in subprocess.check_output(command).splitlines()] # Will examine dissector files only files = list(filter(lambda f : is_dissector_file(f), files)) elif args.open: # Unstaged changes. command = ['git', 'diff', '--name-only'] files = [f.decode('utf-8') for f in subprocess.check_output(command).splitlines()] # Only interested in dissector files. files = list(filter(lambda f : is_dissector_file(f), files)) # Staged changes. command = ['git', 'diff', '--staged', '--name-only'] files_staged = [f.decode('utf-8') for f in subprocess.check_output(command).splitlines()] # Only interested in dissector files. files_staged = list(filter(lambda f : is_dissector_file(f), files_staged)) for f in files: files.append(f) for f in files_staged: if not f in files: files.append(f) else: # Find all dissector files from folder. files = findDissectorFilesInFolder(os.path.join('epan', 'dissectors'), include_generated=False) # If scanning a subset of files, list them here. print('Examining:') if args.file or args.commits or args.open: if files: print(' '.join(files), '\n') else: print('No files to check.\n') else: print('All dissector modules\n') if not os.path.isdir(build_folder): print('Build directory not valid', build_folder, '- please set with --build') exit(1) # Get the set of called functions and referred-to data. called = CalledSymbols() for d in findDissectorFilesInFolder(os.path.join('epan', 'dissectors'), include_generated=True): called.addCalls(d) called.addCalls(os.path.join('epan', 'dissectors', 'dissectors.c')) # Also check calls from GUI code for d in findFilesInFolder('ui'): called.addCalls(d) for d in findFilesInFolder(os.path.join('ui', 'qt')): called.addCalls(d) # These are from tshark.. for d in findFilesInFolder(os.path.join('ui', 'cli')): called.addCalls(d) # Now check identified files. for f in files: if should_exit: exit(1) DefinedSymbols(f).check(called.referred) # Show summary. print(issues_found, 'issues found')
Python
wireshark/tools/check_tfs.py
#!/usr/bin/env python3 # Wireshark - Network traffic analyzer # By Gerald Combs <[email protected]> # Copyright 1998 Gerald Combs # # SPDX-License-Identifier: GPL-2.0-or-later import os import re import subprocess import argparse import signal # This utility scans for tfs items, and works out if standard ones # could have been used intead (from epan/tfs.c) # Can also check for value_string where common tfs could be used instead. # TODO: # - check how many of the definitions in epan/tfs.c are used in other dissectors # - although even if unused, might be in external dissectors? # - consider merging Item class with check_typed_item_calls.py ? # Try to exit soon after Ctrl-C is pressed. should_exit = False def signal_handler(sig, frame): global should_exit should_exit = True print('You pressed Ctrl+C - exiting') signal.signal(signal.SIGINT, signal_handler) # Test for whether the given file was automatically generated. def isGeneratedFile(filename): # Check file exists - e.g. may have been deleted in a recent commit. if not os.path.exists(filename): return False # Open file f_read = open(os.path.join(filename), 'r') lines_tested = 0 for line in f_read: # The comment to say that its generated is near the top, so give up once # get a few lines down. if lines_tested > 10: f_read.close() return False if (line.find('Generated automatically') != -1 or line.find('Generated Automatically') != -1 or line.find('Autogenerated from') != -1 or line.find('is autogenerated') != -1 or line.find('automatically generated by Pidl') != -1 or line.find('Created by: The Qt Meta Object Compiler') != -1 or line.find('This file was generated') != -1 or line.find('This filter was automatically generated') != -1 or line.find('This file is auto generated, do not edit!') != -1 or line.find('This file is auto generated') != -1): f_read.close() return True lines_tested = lines_tested + 1 # OK, looks like a hand-written file! f_read.close() return False # Keep track of custom entries that might appear in multiple dissectors, # so we can consider adding them to tfs.c custom_tfs_entries = {} def AddCustomEntry(val1, val2, file): global custom_tfs_entries if (val1, val2) in custom_tfs_entries: custom_tfs_entries[(val1, val2)].append(file) else: custom_tfs_entries[(val1, val2)] = [file] class TFS: def __init__(self, file, name, val1, val2): self.file = file self.name = name self.val1 = val1 self.val2 = val2 # Should not be empty if not len(val1) or not len(val2): print(file, name, 'has an empty field', self) #else: # Strange if one begins with capital but other doesn't? #if val1[0].isalpha() and val2[0].isalpha(): # if val1[0].isupper() != val2[0].isupper(): # print(file, name, 'one starts lowercase and the other upper', self) # Leading or trailing space should not be needed. if val1.startswith(' ') or val1.endswith(' '): print('N.B.: file=' + self.file + ' ' + self.name + ' - false val begins or ends with space \"' + self.val1 + '\"') if val2.startswith(' ') or val2.endswith(' '): print('N.B.: file=' + self.file + ' ' + self.name + ' - true val begins or ends with space \"' + self.val2 + '\"') # Should really not be identical... if val1.lower() == val2.lower(): print(file, name, 'true and false strings are the same', self) # Shouldn't both be negation (with exception..) if (file != os.path.join('epan', 'dissectors', 'packet-smb.c') and (val1.lower().find('not ') != -1) and (val2.lower().find('not ') != -1)): print(file, name, self, 'both strings contain not') # Not expecting full-stops inside strings.. if val1.find('.') != -1 or val2.find('.') != -1: print(file, name, 'Period found in string..', self) def __str__(self): return '{' + '"' + self.val1 + '", "' + self.val2 + '"}' class ValueString: def __init__(self, file, name, vals): self.file = file self.name = name self.raw_vals = vals self.parsed_vals = {} self.looks_like_tfs = True no_lines = self.raw_vals.count('{') if no_lines != 3: self.looks_like_tfs = False return # Now parse out each entry in the value_string matches = re.finditer(r'\{([\"a-zA-Z\s\d\,]*)\}', self.raw_vals) for m in matches: entry = m[1] # Check each entry looks like part of a TFS entry. match = re.match(r'\s*([01])\,\s*\"([a-zA-Z\d\s]*\s*)\"', entry) if match: if match[1] == '1': self.parsed_vals[True] = match[2] else: self.parsed_vals[False] = match[2] # Now have both entries if len(self.parsed_vals) == 2: break else: self.looks_like_tfs = False break def __str__(self): return '{' + '"' + self.raw_vals + '"}' field_widths = { 'FT_BOOLEAN' : 64, # TODO: Width depends upon 'display' field 'FT_CHAR' : 8, 'FT_UINT8' : 8, 'FT_INT8' : 8, 'FT_UINT16' : 16, 'FT_INT16' : 16, 'FT_UINT24' : 24, 'FT_INT24' : 24, 'FT_UINT32' : 32, 'FT_INT32' : 32, 'FT_UINT40' : 40, 'FT_INT40' : 40, 'FT_UINT48' : 48, 'FT_INT48' : 48, 'FT_UINT56' : 56, 'FT_INT56' : 56, 'FT_UINT64' : 64, 'FT_INT64' : 64 } # Simplified version of class that is in check_typed_item_calls.py class Item: previousItem = None def __init__(self, filename, hf, filter, label, item_type, type_modifier, strings, macros, mask=None, check_mask=False): self.filename = filename self.hf = hf self.filter = filter self.label = label self.strings = strings self.mask = mask # N.B. Not sestting mask by looking up macros. self.item_type = item_type self.type_modifier = type_modifier self.set_mask_value(macros) self.bits_set = 0 for n in range(0, self.get_field_width_in_bits()): if self.check_bit(self.mask_value, n): self.bits_set += 1 def check_bit(self, value, n): return (value & (0x1 << n)) != 0 def __str__(self): return 'Item ({0} "{1}" {2} type={3}:{4} strings={5} mask={6})'.format(self.filename, self.label, self.filter, self.item_type, self.type_modifier, self.strings, self.mask) def set_mask_value(self, macros): try: self.mask_read = True # Substitute mask if found as a macro.. if self.mask in macros: self.mask = macros[self.mask] elif any(not c in '0123456789abcdefABCDEFxX' for c in self.mask): self.mask_read = False self.mask_value = 0 return # Read according to the appropriate base. if self.mask.startswith('0x'): self.mask_value = int(self.mask, 16) elif self.mask.startswith('0'): self.mask_value = int(self.mask, 8) else: self.mask_value = int(self.mask, 10) except: self.mask_read = False self.mask_value = 0 # Return true if bit position n is set in value. def check_bit(self, value, n): return (value & (0x1 << n)) != 0 def get_field_width_in_bits(self): if self.item_type == 'FT_BOOLEAN': if self.type_modifier == 'NULL': return 8 # i.e. 1 byte elif self.type_modifier == 'BASE_NONE': return 8 elif self.type_modifier == 'SEP_DOT': # from proto.h, only meant for FT_BYTES return 64 else: try: # For FT_BOOLEAN, modifier is just numerical number of bits. Round up to next nibble. return int((int(self.type_modifier) + 3)/4)*4 except: #print('oops', self) return 0 else: if self.item_type in field_widths: # Lookup fixed width for this type return field_widths[self.item_type] else: #print('returning 0 for', self) return 0 def removeComments(code_string): code_string = re.sub(re.compile(r"/\*.*?\*/",re.DOTALL ) ,"" ,code_string) # C-style comment code_string = re.sub(re.compile(r"//.*?\n" ) ,"" ,code_string) # C++-style comment return code_string # Look for true_false_string items in a dissector file. def findTFS(filename): tfs_found = {} with open(filename, 'r', encoding="utf8") as f: contents = f.read() # Example: const true_false_string tfs_true_false = { "True", "False" }; # Remove comments so as not to trip up RE. contents = removeComments(contents) matches = re.finditer(r'\sconst\s*true_false_string\s*([a-zA-Z0-9_]*)\s*=\s*{\s*\"([a-zA-Z_0-9/:! ]*)\"\s*,\s*\"([a-zA-Z_0-9/:! ]*)\"', contents) for m in matches: name = m.group(1) val1 = m.group(2) val2 = m.group(3) # Store this entry. tfs_found[name] = TFS(filename, name, val1, val2) return tfs_found # Look for value_string entries in a dissector file. def findValueStrings(filename): vals_found = {} #static const value_string radio_type_vals[] = #{ # { 0, "FDD"}, # { 1, "TDD"}, # { 0, NULL } #}; with open(filename, 'r', encoding="utf8") as f: contents = f.read() # Remove comments so as not to trip up RE. contents = removeComments(contents) matches = re.finditer(r'.*const value_string\s*([a-zA-Z0-9_]*)\s*\[\s*\]\s*\=\s*\{([\{\}\d\,a-zA-Z0-9\s\"]*)\};', contents) for m in matches: name = m.group(1) vals = m.group(2) vals_found[name] = ValueString(filename, name, vals) return vals_found # Look for hf items (i.e. full item to be registered) in a dissector file. def find_items(filename, macros, check_mask=False, mask_exact_width=False, check_label=False, check_consecutive=False): is_generated = isGeneratedFile(filename) items = {} with open(filename, 'r', encoding="utf8") as f: contents = f.read() # Remove comments so as not to trip up RE. contents = removeComments(contents) # N.B. re extends all the way to HFILL to avoid greedy matching matches = re.finditer( r'.*\{\s*\&(hf_[a-z_A-Z0-9]*)\s*,\s*{\s*\"(.*?)\"\s*,\s*\"(.*?)\"\s*,\s*(.*?)\s*,\s*([0-9A-Z_\|\s]*?)\s*,\s*(.*?)\s*,\s*(.*?)\s*,\s*([a-zA-Z0-9\W\s_\u00f6\u00e4]*?)\s*,\s*HFILL', contents) for m in matches: # Store this item. hf = m.group(1) items[hf] = Item(filename, hf, filter=m.group(3), label=m.group(2), item_type=m.group(4), type_modifier=m.group(5), strings=m.group(6), macros=macros, mask=m.group(7)) return items def find_macros(filename): macros = {} with open(filename, 'r', encoding="utf8") as f: contents = f.read() # Remove comments so as not to trip up RE. contents = removeComments(contents) matches = re.finditer( r'#define\s*([A-Z0-9_]*)\s*([0-9xa-fA-F]*)\n', contents) for m in matches: # Store this mapping. macros[m.group(1)] = m.group(2) return macros def is_dissector_file(filename): p = re.compile(r'.*packet-.*\.c') return p.match(filename) def findDissectorFilesInFolder(folder): # Look at files in sorted order, to give some idea of how far through is. files = [] for f in sorted(os.listdir(folder)): if should_exit: return if is_dissector_file(f): filename = os.path.join(folder, f) files.append(filename) return files warnings_found = 0 errors_found = 0 tfs_found = 0 # Check the given dissector file. def checkFile(filename, common_tfs, look_for_common=False, check_value_strings=False): global warnings_found global errors_found # Check file exists - e.g. may have been deleted in a recent commit. if not os.path.exists(filename): print(filename, 'does not exist!') return # Find items. file_tfs = findTFS(filename) # See if any of these items already existed in tfs.c for f in file_tfs: for c in common_tfs: found = False # # Do not do this check for plugins; plugins cannot import # data values from libwireshark (functions, yes; data # values, no). # # Test whether there's a common prefix for the file name # and "plugin/epan/"; if so, this is a plugin, and there # is no common path and os.path.commonprefix returns an # empty string, otherwise it returns the common path, so # we check whether the common path is an empty string. # if os.path.commonprefix([filename, 'plugin/epan/']) == '': exact_case = False if file_tfs[f].val1 == common_tfs[c].val1 and file_tfs[f].val2 == common_tfs[c].val2: found = True exact_case = True elif file_tfs[f].val1.upper() == common_tfs[c].val1.upper() and file_tfs[f].val2.upper() == common_tfs[c].val2.upper(): found = True if found: print("Error:" if exact_case else "Warn: ", filename, i, "- could have used", t, 'from tfs.c instead: ', tfs_items[t], '' if exact_case else ' (capitalisation differs)') if exact_case: errors_found += 1 else: warnings_found += 1 break if not found: if look_for_common: AddCustomEntry(file_tfs[f].val1, file_tfs[f].val2, filename) if check_value_strings: # Get macros macros = find_macros(filename) # Get value_string entries. vs = findValueStrings(filename) # Also get hf items items = find_items(filename, macros, check_mask=True) for v in vs: if vs[v].looks_like_tfs: found = False exact_case = False #print('Candidate', v, vs[v]) for c in common_tfs: found = False # # Do not do this check for plugins; plugins cannot import # data values from libwireshark (functions, yes; data # values, no). # # Test whether there's a common prefix for the file name # and "plugin/epan/"; if so, this is a plugin, and there # is no common path and os.path.commonprefix returns an # empty string, otherwise it returns the common path, so # we check whether the common path is an empty string. # if os.path.commonprefix([filename, 'plugin/epan/']) == '': exact_case = False if common_tfs[c].val1 == vs[v].parsed_vals[True] and common_tfs[c].val2 == vs[v].parsed_vals[False]: found = True exact_case = True elif common_tfs[c].val1.upper() == vs[v].parsed_vals[True].upper() and common_tfs[c].val2.upper() == vs[v].parsed_vals[False].upper(): found = True # Do values match? if found: # OK, now look for items that: # - have VALS(v) AND # - have a mask width of 1 bit (no good if field can have values > 1...) for i in items: if re.match(r'VALS\(\s*'+v+r'\s*\)', items[i].strings): if items[i].bits_set == 1: print("Warn:" if exact_case else "Note:", filename, 'value_string', "'"+v+"'", "- could have used", c, 'from tfs.c instead: ', common_tfs[c], 'for', i, '' if exact_case else ' (capitalisation differs)') if exact_case: warnings_found += 1 ################################################################# # Main logic. # command-line args. Controls which dissector files should be checked. # If no args given, will just scan epan/dissectors folder. parser = argparse.ArgumentParser(description='Check calls in dissectors') parser.add_argument('--file', action='append', help='specify individual dissector file to test') parser.add_argument('--commits', action='store', help='last N commits to check') parser.add_argument('--open', action='store_true', help='check open files') parser.add_argument('--check-value-strings', action='store_true', help='check whether value_strings could have been tfs?') parser.add_argument('--common', action='store_true', help='check for potential new entries for tfs.c') args = parser.parse_args() # Get files from wherever command-line args indicate. files = [] if args.file: # Add specified file(s) for f in args.file: if not f.startswith('epan'): f = os.path.join('epan', 'dissectors', f) if not os.path.isfile(f): print('Chosen file', f, 'does not exist.') exit(1) else: files.append(f) elif args.commits: # Get files affected by specified number of commits. command = ['git', 'diff', '--name-only', 'HEAD~' + args.commits] files = [f.decode('utf-8') for f in subprocess.check_output(command).splitlines()] # Will examine dissector files only files = list(filter(lambda f : is_dissector_file(f), files)) elif args.open: # Unstaged changes. command = ['git', 'diff', '--name-only'] files = [f.decode('utf-8') for f in subprocess.check_output(command).splitlines()] # Only interested in dissector files. files = list(filter(lambda f : is_dissector_file(f), files)) # Staged changes. command = ['git', 'diff', '--staged', '--name-only'] files_staged = [f.decode('utf-8') for f in subprocess.check_output(command).splitlines()] # Only interested in dissector files. files_staged = list(filter(lambda f : is_dissector_file(f), files_staged)) for f in files_staged: if not f in files: files.append(f) else: # Find all dissector files from folder. files = findDissectorFilesInFolder(os.path.join('epan', 'dissectors')) # If scanning a subset of files, list them here. print('Examining:') if args.file or args.commits or args.open: if files: print(' '.join(files), '\n') else: print('No files to check.\n') else: print('All dissector modules\n') # Get standard/ shared ones. tfs_entries = findTFS(os.path.join('epan', 'tfs.c')) # Now check the files to see if they could have used shared ones instead. for f in files: if should_exit: exit(1) if not isGeneratedFile(f): checkFile(f, tfs_entries, look_for_common=args.common, check_value_strings=args.check_value_strings) # Report on commonly-defined values. if args.common: # Looking for items that could potentially be moved to tfs.c for c in custom_tfs_entries: # Only want to see items that have 3 or more occurrences. # Even then, probably only want to consider ones that sound generic. if len(custom_tfs_entries[c]) > 2: print(c, 'appears', len(custom_tfs_entries[c]), 'times, in: ', custom_tfs_entries[c]) # Show summary. print(warnings_found, 'warnings found') if errors_found: print(errors_found, 'errors found') exit(1)
Python
wireshark/tools/check_typed_item_calls.py
#!/usr/bin/env python3 # Wireshark - Network traffic analyzer # By Gerald Combs <[email protected]> # Copyright 1998 Gerald Combs # # SPDX-License-Identifier: GPL-2.0-or-later import os import re import argparse import signal import subprocess # This utility scans the dissector code for proto_tree_add_...() calls that constrain the type # or length of the item added, and checks that the used item is acceptable. # # - Note that this can only work where the hf_item variable or length is passed in directly - simple # macro substitution is now done in a limited way # TODO: # - Attempt to check for allowed encoding types (most likely will be literal values |'d)? # - Create maps from type -> display types for hf items (see display (FIELDDISPLAY)) in docs/README.dissector # Try to exit soon after Ctrl-C is pressed. should_exit = False def signal_handler(sig, frame): global should_exit should_exit = True print('You pressed Ctrl+C - exiting') signal.signal(signal.SIGINT, signal_handler) warnings_found = 0 errors_found = 0 def name_has_one_of(name, substring_list): for word in substring_list: if name.lower().find(word) != -1: return True return False # A call is an individual call to an API we are interested in. # Internal to APICheck below. class Call: def __init__(self, hf_name, macros, line_number=None, length=None, fields=None): self.hf_name = hf_name self.line_number = line_number self.fields = fields self.length = None if length: try: self.length = int(length) except: if length.isupper(): if length in macros: try: self.length = int(macros[length]) except: pass pass # These are variable names that have been seen to be used in calls.. common_hf_var_names = { 'hf_index', 'hf_item', 'hf_idx', 'hf_x', 'hf_id', 'hf_cookie', 'hf_flag', 'hf_dos_time', 'hf_dos_date', 'hf_value', 'hf_num', 'hf_cause_value', 'hf_uuid', 'hf_endian', 'hf_ip', 'hf_port', 'hf_suff', 'hf_string', 'hf_uint', 'hf_tag', 'hf_type', 'hf_hdr', 'hf_field', 'hf_opcode', 'hf_size', 'hf_entry', 'field' } item_lengths = {} item_lengths['FT_CHAR'] = 1 item_lengths['FT_UINT8'] = 1 item_lengths['FT_INT8'] = 1 item_lengths['FT_UINT16'] = 2 item_lengths['FT_INT16'] = 2 item_lengths['FT_UINT24'] = 3 item_lengths['FT_INT24'] = 3 item_lengths['FT_UINT32'] = 4 item_lengths['FT_INT32'] = 4 item_lengths['FT_UINT40'] = 5 item_lengths['FT_INT40'] = 5 item_lengths['FT_UINT48'] = 6 item_lengths['FT_INT48'] = 6 item_lengths['FT_UINT56'] = 7 item_lengths['FT_INT56'] = 7 item_lengths['FT_UINT64'] = 8 item_lengths['FT_INT64'] = 8 item_lengths['FT_ETHER'] = 6 # TODO: other types... # A check for a particular API function. class APICheck: def __init__(self, fun_name, allowed_types, positive_length=False): self.fun_name = fun_name self.allowed_types = allowed_types self.positive_length = positive_length self.calls = [] if fun_name.startswith('ptvcursor'): # RE captures function name + 1st 2 args (always ptvc + hfindex) self.p = re.compile('[^\n]*' + self.fun_name + '\s*\(([a-zA-Z0-9_]+),\s*([a-zA-Z0-9_]+)') elif fun_name.find('add_bitmask') == -1: # Normal case. # RE captures function name + 1st 2 args (always tree + hfindex + length) self.p = re.compile('[^\n]*' + self.fun_name + '\s*\(([a-zA-Z0-9_]+),\s*([a-zA-Z0-9_]+),\s*[a-zA-Z0-9_]+,\s*[a-zA-Z0-9_]+,\s*([a-zA-Z0-9_]+)') else: # _add_bitmask functions. # RE captures function name + 1st + 4th args (always tree + hfindex) # 6th arg is 'fields' self.p = re.compile('[^\n]*' + self.fun_name + '\s*\(([a-zA-Z0-9_]+),\s*[a-zA-Z0-9_]+,\s*[a-zA-Z0-9_]+,\s*([a-zA-Z0-9_]+)\s*,\s*[a-zA-Z0-9_]+\s*,\s*([a-zA-Z0-9_]+)\s*,') self.file = None self.mask_allowed = True if fun_name.find('proto_tree_add_bits_') != -1: self.mask_allowed = False def find_calls(self, file, macros): self.file = file self.calls = [] with open(file, 'r', encoding="utf8") as f: contents = f.read() lines = contents.splitlines() total_lines = len(lines) for line_number,line in enumerate(lines): # Want to check this, and next few lines to_check = lines[line_number-1] + '\n' # Nothing to check if function name isn't in it if to_check.find(self.fun_name) != -1: # Ok, add the next file lines before trying RE for i in range(1, 4): if to_check.find(';') != -1: break elif line_number+i < total_lines: to_check += (lines[line_number-1+i] + '\n') m = self.p.search(to_check) if m: fields = None length = None if self.fun_name.find('add_bitmask') != -1: fields = m.group(3) else: if self.p.groups == 3: length = m.group(3) # Add call. We have length if re had 3 groups. num_groups = self.p.groups self.calls.append(Call(m.group(2), macros, line_number=line_number, length=length, fields=fields)) # Return true if bit position n is set in value. def check_bit(self, value, n): return (value & (0x1 << n)) != 0 def does_mask_cover_value(self, mask, value): # Walk past any l.s. 0 bits in value n = 0 mask_start = n # Walk through any bits that are set and check they are in mask while self.check_bit(value, n) and n <= 63: if not self.check_bit(mask, n): return False n += 1 return True def check_against_items(self, items_defined, items_declared, items_declared_extern, check_missing_items=False, field_arrays=None): global errors_found global warnings_found for call in self.calls: # Check lengths, but for now only for APIs that have length in bytes. if self.fun_name.find('add_bits') == -1 and call.hf_name in items_defined: if call.length and items_defined[call.hf_name].item_type in item_lengths: if item_lengths[items_defined[call.hf_name].item_type] < call.length: print('Warning:', self.file + ':' + str(call.line_number), self.fun_name + ' called for', call.hf_name, ' - ', 'item type is', items_defined[call.hf_name].item_type, 'but call has len', call.length) warnings_found += 1 # Needs a +ve length if self.positive_length and call.length != None: if call.length != -1 and call.length <= 0: print('Error: ' + self.fun_name + '(.., ' + call.hf_name + ', ...) called at ' + self.file + ':' + str(call.line_number) + ' with length ' + str(call.length) + ' - must be > 0 or -1') errors_found += 1 if call.hf_name in items_defined: # Is type allowed? if not items_defined[call.hf_name].item_type in self.allowed_types: print('Error: ' + self.fun_name + '(.., ' + call.hf_name + ', ...) called at ' + self.file + ':' + str(call.line_number) + ' with type ' + items_defined[call.hf_name].item_type) print(' (allowed types are', self.allowed_types, ')\n') errors_found += 1 # No mask allowed if not self.mask_allowed and items_defined[call.hf_name].mask_value != 0: print('Error: ' + self.fun_name + '(.., ' + call.hf_name + ', ...) called at ' + self.file + ':' + str(call.line_number) + ' with mask ' + items_defined[call.hf_name].mask + ' (must be zero!)\n') errors_found += 1 if self.fun_name.find('add_bitmask') != -1 and call.hf_name in items_defined and field_arrays: if call.fields in field_arrays: if (items_defined[call.hf_name].mask_value and field_arrays[call.fields][1] != 0 and items_defined[call.hf_name].mask_value != field_arrays[call.fields][1]): # TODO: only really a problem if bit is set in array but not in top-level item? if not self.does_mask_cover_value(items_defined[call.hf_name].mask_value, field_arrays[call.fields][1]): print('Warning:', self.file, call.hf_name, call.fields, "masks don't match. root=", items_defined[call.hf_name].mask, "array has", hex(field_arrays[call.fields][1])) warnings_found += 1 if check_missing_items: if call.hf_name in items_declared and not call.hf_name in items_declared_extern: #not in common_hf_var_names: print('Warning:', self.file + ':' + str(call.line_number), self.fun_name + ' called for "' + call.hf_name + '"', ' - but no item found') warnings_found += 1 class ProtoTreeAddItemCheck(APICheck): def __init__(self, ptv=None): # RE will capture whole call. if not ptv: # proto_item * # proto_tree_add_item(proto_tree *tree, int hfindex, tvbuff_t *tvb, # const gint start, gint length, const guint encoding) self.fun_name = 'proto_tree_add_item' self.p = re.compile('[^\n]*' + self.fun_name + '\s*\(\s*[a-zA-Z0-9_]+?,\s*([a-zA-Z0-9_]+?),\s*[a-zA-Z0-9_\+\s]+?,\s*[^,.]+?,\s*(.+),\s*([^,.]+?)\);') else: # proto_item * # ptvcursor_add(ptvcursor_t *ptvc, int hfindex, gint length, # const guint encoding) self.fun_name = 'ptvcursor_add' self.p = re.compile('[^\n]*' + self.fun_name + '\s*\([^,.]+?,\s*([^,.]+?),\s*([^,.]+?),\s*([a-zA-Z0-9_\-\>]+)') def find_calls(self, file, macros): self.file = file self.calls = [] with open(file, 'r', encoding="utf8") as f: contents = f.read() lines = contents.splitlines() total_lines = len(lines) for line_number,line in enumerate(lines): # Want to check this, and next few lines to_check = lines[line_number-1] + '\n' # Nothing to check if function name isn't in it fun_idx = to_check.find(self.fun_name) if fun_idx != -1: # Ok, add the next file lines before trying RE for i in range(1, 5): if to_check.find(';') != -1: break elif line_number+i < total_lines: to_check += (lines[line_number-1+i] + '\n') # Lose anything before function call itself. to_check = to_check[fun_idx:] m = self.p.search(to_check) if m: # Throw out if parens not matched if m.group(0).count('(') != m.group(0).count(')'): continue enc = m.group(3) hf_name = m.group(1) if not enc.startswith('ENC_'): if not enc in { 'encoding', 'enc', 'client_is_le', 'cigi_byte_order', 'endian', 'endianess', 'machine_encoding', 'byte_order', 'bLittleEndian', 'p_mq_parm->mq_str_enc', 'p_mq_parm->mq_int_enc', 'iEnc', 'strid_enc', 'iCod', 'nl_data->encoding', 'argp->info->encoding', 'gquic_info->encoding', 'writer_encoding', 'tds_get_int2_encoding(tds_info)', 'tds_get_int4_encoding(tds_info)', 'tds_get_char_encoding(tds_info)', 'info->encoding', 'item->encoding', 'DREP_ENC_INTEGER(drep)', 'string_encoding', 'item', 'type', 'dvb_enc_to_item_enc(encoding)', 'packet->enc', 'IS_EBCDIC(uCCS) ? ENC_EBCDIC : ENC_ASCII', 'DREP_ENC_INTEGER(hdr->drep)', 'dhcp_uuid_endian', 'payload_le', 'local_encoding', 'big_endian', 'hf_data_encoding', 'IS_EBCDIC(eStr) ? ENC_EBCDIC : ENC_ASCII', 'big_endian ? ENC_BIG_ENDIAN : ENC_LITTLE_ENDIAN', '(skip == 1) ? ENC_BIG_ENDIAN : ENC_LITTLE_ENDIAN', 'pdu_info->sbc', 'pdu_info->mbc', 'seq_info->txt_enc | ENC_NA', 'BASE_SHOW_UTF_8_PRINTABLE' }: global warnings_found print('Warning:', self.file + ':' + str(line_number), self.fun_name + ' called for "' + hf_name + '"', 'check last/enc param:', enc, '?') warnings_found += 1 self.calls.append(Call(hf_name, macros, line_number=line_number, length=m.group(2))) def check_against_items(self, items_defined, items_declared, items_declared_extern, check_missing_items=False, field_arrays=None): # For now, only complaining if length if call is longer than the item type implies. # # Could also be bugs where the length is always less than the type allows. # Would involve keeping track (in the item) of whether any call had used the full length. global warnings_found for call in self.calls: if call.hf_name in items_defined: if call.length and items_defined[call.hf_name].item_type in item_lengths: if item_lengths[items_defined[call.hf_name].item_type] < call.length: print('Warning:', self.file + ':' + str(call.line_number), self.fun_name + ' called for', call.hf_name, ' - ', 'item type is', items_defined[call.hf_name].item_type, 'but call has len', call.length) warnings_found += 1 elif check_missing_items: if call.hf_name in items_declared and not call.hf_name in items_declared_extern: #not in common_hf_var_names: print('Warning:', self.file + ':' + str(call.line_number), self.fun_name + ' called for "' + call.hf_name + '"', ' - but no item found') warnings_found += 1 ################################################################################################## # This is a set of items (by filter name) where we know that the bitmask is non-contiguous, # but is still believed to be correct. known_non_contiguous_fields = { 'wlan.fixed.capabilities.cfpoll.sta', 'wlan.wfa.ie.wme.qos_info.sta.reserved', 'btrfcomm.frame_type', # https://os.itec.kit.edu/downloads/sa_2006_roehricht-martin_flow-control-in-bluez.pdf 'capwap.control.message_element.ac_descriptor.dtls_policy.r', # RFC 5415 'couchbase.extras.subdoc.flags.reserved', 'wlan.fixed.capabilities.cfpoll.ap', # These are 3 separate bits... 'wlan.wfa.ie.wme.tspec.ts_info.reserved', # matches other fields in same sequence 'zbee_zcl_se.pp.attr.payment_control_configuration.reserved', # matches other fields in same sequence 'zbee_zcl_se.pp.snapshot_payload_cause.reserved', # matches other fields in same sequence 'ebhscr.eth.rsv', # matches other fields in same sequence 'v120.lli', # non-contiguous field (http://www.acacia-net.com/wwwcla/protocol/v120_l2.htm) 'stun.type.class', 'bssgp.csg_id', 'tiff.t6.unused', 'artnet.ip_prog_reply.unused', 'telnet.auth.mod.enc', 'osc.message.midi.bender', 'btle.data_header.rfu', 'stun.type.method', # figure 3 in rfc 5389 'tds.done.status', # covers all bits in bitset 'hf_iax2_video_csub', # RFC 5456, table 8.7 'iax2.video.subclass', 'dnp3.al.ana.int', 'pwcesopsn.cw.lm', 'gsm_a.rr.format_id', # EN 301 503 'siii.mst.phase', # comment in code seems convinced 'xmcp.type.class', 'xmcp.type.method', 'hf_hiqnet_flags', 'hf_hiqnet_flagmask' } ################################################################################################## field_widths = { 'FT_BOOLEAN' : 64, # TODO: Width depends upon 'display' field 'FT_CHAR' : 8, 'FT_UINT8' : 8, 'FT_INT8' : 8, 'FT_UINT16' : 16, 'FT_INT16' : 16, 'FT_UINT24' : 24, 'FT_INT24' : 24, 'FT_UINT32' : 32, 'FT_INT32' : 32, 'FT_UINT40' : 40, 'FT_INT40' : 40, 'FT_UINT48' : 48, 'FT_INT48' : 48, 'FT_UINT56' : 56, 'FT_INT56' : 56, 'FT_UINT64' : 64, 'FT_INT64' : 64 } def is_ignored_consecutive_filter(filter): ignore_patterns = [ re.compile(r'^elf.sh_type'), re.compile(r'^elf.p_type'), re.compile(r'^btavrcp.pdu_id'), re.compile(r'^nstrace.trcdbg.val(\d+)'), re.compile(r'^netlogon.dummy_string'), re.compile(r'^opa.reserved'), re.compile(r'^mpls_pm.timestamp\d\..*'), re.compile(r'^wassp.data.mu_mac'), re.compile(r'^thrift.type'), re.compile(r'^quake2.game.client.command.move.angles'), re.compile(r'^ipp.enum_value'), re.compile(r'^idrp.error.subcode'), re.compile(r'^ftdi-ft.lValue'), re.compile(r'^6lowpan.src'), re.compile(r'^couchbase.flex_frame.frame.id'), re.compile(r'^rtps.param.id'), re.compile(r'^rtps.locator.port'), re.compile(r'^sigcomp.udvm.value'), re.compile(r'^opa.mad.attributemodifier.n'), re.compile(r'^smb.cmd'), re.compile(r'^sctp.checksum'), re.compile(r'^dhcp.option.end'), re.compile(r'^nfapi.num.bf.vector.bf.value'), re.compile(r'^dnp3.al.range.abs'), re.compile(r'^dnp3.al.range.quantity'), re.compile(r'^dnp3.al.index'), re.compile(r'^dnp3.al.size'), re.compile(r'^ftdi-ft.hValue'), re.compile(r'^homeplug_av.op_attr_cnf.data.sw_sub'), re.compile(r'^radiotap.he_mu.preamble_puncturing'), re.compile(r'^ndmp.file'), re.compile(r'^ocfs2.dlm.lvb'), re.compile(r'^oran_fh_cus.reserved'), re.compile(r'^qnet6.kif.msgsend.msg.read.xtypes0-7'), re.compile(r'^qnet6.kif.msgsend.msg.write.xtypes0-7'), re.compile(r'^mih.sig_strength'), re.compile(r'^couchbase.flex_frame.frame.len'), re.compile(r'^nvme-rdma.read_to_host_req'), re.compile(r'^rpcap.dummy'), re.compile(r'^sflow.flow_sample.output_interface'), re.compile(r'^socks.results'), re.compile(r'^opa.mad.attributemodifier.p'), re.compile(r'^v5ua.efa'), re.compile(r'^zbncp.data.tx_power'), re.compile(r'^zbncp.data.nwk_addr'), re.compile(r'^zbee_zcl_hvac.pump_config_control.attr.ctrl_mode'), re.compile(r'^nat-pmp.external_port'), re.compile(r'^zbee_zcl.attr.float'), re.compile(r'^wpan-tap.phr.fsk_ms.mode'), re.compile(r'^mysql.exec_flags'), re.compile(r'^pim.metric_pref'), re.compile(r'^modbus.regval_float'), re.compile(r'^alcap.cau.value'), re.compile(r'^bpv7.crc_field'), re.compile(r'^at.chld.mode'), re.compile(r'^btl2cap.psm'), re.compile(r'^srvloc.srvtypereq.nameauthlistlen'), re.compile(r'^a11.ext.code'), re.compile(r'^adwin_config.port'), re.compile(r'^afp.unknown'), re.compile(r'^ansi_a_bsmap.mid.digit_1'), re.compile(r'^ber.unknown.OCTETSTRING'), re.compile(r'^btatt.handle'), re.compile(r'^btl2cap.option_flushto'), re.compile(r'^cip.network_segment.prod_inhibit'), re.compile(r'^cql.result.rows.table_name'), re.compile(r'^dcom.sa.vartype'), re.compile(r'^f5ethtrailer.slot'), re.compile(r'^ipdr.cm_ipv6_addr'), re.compile(r'^mojito.kuid'), re.compile(r'^mtp3.priority'), re.compile(r'^pw.cw.length'), re.compile(r'^rlc.ciphered_data'), re.compile(r'^vp8.pld.pictureid'), re.compile(r'^gryphon.sched.channel'), re.compile(r'^pn_io.ioxs'), re.compile(r'^pn_dcp.block_qualifier_reset'), re.compile(r'^pn_dcp.suboption_device_instance'), re.compile(r'^nfs.attr'), re.compile(r'^nfs.create_session_flags'), re.compile(r'^rmt-lct.toi64'), re.compile(r'^gryphon.data.header_length'), re.compile(r'^quake2.game.client.command.move.movement'), re.compile(r'^isup.parameter_type'), re.compile(r'^cip.port'), re.compile(r'^adwin.fifo_no'), re.compile(r'^bthci_evt.hci_vers_nr'), re.compile(r'^gryphon.usdt.stmin_active'), re.compile(r'^dnp3.al.anaout.int'), re.compile(r'^dnp3.al.ana.int'), re.compile(r'^dnp3.al.cnt'), re.compile(r'^bthfp.chld.mode'), re.compile(r'^nat-pmp.pml'), re.compile(r'^isystemactivator.actproperties.ts.hdr'), re.compile(r'^rtpdump.txt_addr'), re.compile(r'^unistim.vocoder.id'), re.compile(r'^mac.ueid') ] for patt in ignore_patterns: if patt.match(filter): return True return False class ValueString: def __init__(self, file, name, vals, macros): self.file = file self.name = name self.raw_vals = vals self.parsed_vals = {} self.valid = True # Now parse out each entry in the value_string matches = re.finditer(r'\{\s*([0-9_A-Za-z]*)\s*,\s*(".*?")\s*}\s*,', self.raw_vals) for m in matches: value,label = m.group(1), m.group(2) if value in macros: value = macros[value] elif any(not c in '0123456789abcdefABCDEFxX' for c in value): self.valid = False return else: # Read according to the appropriate base. if value.lower().startswith('0x'): value = int(value, 16) elif value.startswith('0'): value = int(value, 8) else: value = int(value, 10) # Check for conflict before inserting if value in self.parsed_vals and not label == self.parsed_vals[value]: print('Error:', self.file, ': value_string', self.name, '- value ', value, 'repeated with different values - was', self.parsed_vals[value], 'now', label) global errors_found errors_found += 1 else: # Add into table self.parsed_vals[value] = label def __str__(self): return self.name + '= { ' + self.raw_vals + ' }' # Look for value_string entries in a dissector file. Return a dict name -> ValueString def findValueStrings(filename, macros): vals_found = {} #static const value_string radio_type_vals[] = #{ # { 0, "FDD"}, # { 1, "TDD"}, # { 0, NULL } #}; with open(filename, 'r', encoding="utf8") as f: contents = f.read() # Remove comments so as not to trip up RE. contents = removeComments(contents) matches = re.finditer(r'.*const value_string\s*([a-zA-Z0-9_]*)\s*\[\s*\]\s*\=\s*\{([\{\}\d\,a-zA-Z0-9_\-\s\"]*)\};', contents) for m in matches: name = m.group(1) vals = m.group(2) vals_found[name] = ValueString(filename, name, vals, macros) return vals_found # The relevant parts of an hf item. Used as value in dict where hf variable name is key. class Item: previousItem = None def __init__(self, filename, hf, filter, label, item_type, type_modifier, macros, mask=None, check_mask=False, mask_exact_width=False, check_label=False, check_consecutive=False): self.filename = filename self.hf = hf self.filter = filter self.label = label self.mask = mask self.mask_exact_width = mask_exact_width global warnings_found self.set_mask_value(macros) if check_consecutive: if Item.previousItem and Item.previousItem.filter == filter: if label != Item.previousItem.label: if not is_ignored_consecutive_filter(self.filter): print('Warning:', filename, hf, ': - filter "' + filter + '" appears consecutively - labels are "' + Item.previousItem.label + '" and "' + label + '"') warnings_found += 1 Item.previousItem = self # Optionally check label. if check_label: if label.startswith(' ') or label.endswith(' '): print('Warning: ' + filename, hf, 'filter "' + filter + '" label' + label + '" begins or ends with a space') warnings_found += 1 if (label.count('(') != label.count(')') or label.count('[') != label.count(']') or label.count('{') != label.count('}')): # Ignore if includes quotes, as may be unbalanced. if label.find("'") == -1: print('Warning: ' + filename, hf, 'filter "' + filter + '" label', '"' + label + '"', 'has unbalanced parens/braces/brackets') warnings_found += 1 if item_type != 'FT_NONE' and label.endswith(':'): print('Warning: ' + filename, hf, 'filter "' + filter + '" label', '"' + label + '"', 'ends with an unnecessary colon') warnings_found += 1 self.item_type = item_type self.type_modifier = type_modifier # Optionally check that mask bits are contiguous if check_mask: if self.mask_read and not mask in { 'NULL', '0x0', '0', '0x00'}: self.check_contiguous_bits(mask) self.check_num_digits(self.mask) self.check_digits_all_zeros(self.mask) def __str__(self): return 'Item ({0} "{1}" {2} type={3}:{4} mask={5})'.format(self.filename, self.label, self.filter, self.item_type, self.type_modifier, self.mask) def set_mask_value(self, macros): try: self.mask_read = True # Substitute mask if found as a macro.. if self.mask in macros: self.mask = macros[self.mask] elif any(not c in '0123456789abcdefABCDEFxX' for c in self.mask): self.mask_read = False self.mask_value = 0 return # Read according to the appropriate base. if self.mask.startswith('0x'): self.mask_value = int(self.mask, 16) elif self.mask.startswith('0'): self.mask_value = int(self.mask, 8) else: self.mask_value = int(self.mask, 10) except: self.mask_read = False self.mask_value = 0 # Return true if bit position n is set in value. def check_bit(self, value, n): return (value & (0x1 << n)) != 0 # Output a warning if non-contigous bits are found in the mask (guint64). # Note that this legimately happens in several dissectors where multiple reserved/unassigned # bits are conflated into one field. # TODO: there is probably a cool/efficient way to check this? def check_contiguous_bits(self, mask): if not self.mask_value: return # Do see legitimate non-contiguous bits often for these.. if name_has_one_of(self.hf, ['reserved', 'unknown', 'unused', 'spare']): return if name_has_one_of(self.label, ['reserved', 'unknown', 'unused', 'spare']): return # Walk past any l.s. 0 bits n = 0 while not self.check_bit(self.mask_value, n) and n <= 63: n += 1 if n==63: return mask_start = n # Walk through any bits that are set while self.check_bit(self.mask_value, n) and n <= 63: n += 1 n += 1 if n >= 63: return # Look up the field width field_width = 0 if not self.item_type in field_widths: print('unexpected item_type is ', self.item_type) field_width = 64 else: field_width = self.get_field_width_in_bits() # Its a problem is the mask_width is > field_width - some of the bits won't get looked at!? mask_width = n-1-mask_start if field_width is not None and (mask_width > field_width): # N.B. No call, so no line number. print(self.filename + ':', self.hf, 'filter=', self.filter, self.item_type, 'so field_width=', field_width, 'but mask is', mask, 'which is', mask_width, 'bits wide!') global warnings_found warnings_found += 1 # Now, any more zero set bits are an error! if self.filter in known_non_contiguous_fields or self.filter.startswith('rtpmidi'): # Don't report if we know this one is Ok. # TODO: also exclude items that are used as root in add_bitmask() calls? return while n <= 63: if self.check_bit(self.mask_value, n): print('Warning:', self.filename, self.hf, 'filter=', self.filter, ' - mask with non-contiguous bits', mask, '(', hex(self.mask_value), ')') warnings_found += 1 return n += 1 def get_field_width_in_bits(self): if self.item_type == 'FT_BOOLEAN': if self.type_modifier == 'NULL': return 8 # i.e. 1 byte elif self.type_modifier == 'BASE_NONE': return 8 elif self.type_modifier == 'SEP_DOT': # from proto.h, only meant for FT_BYTES return 64 else: try: # For FT_BOOLEAN, modifier is just numerical number of bits. Round up to next nibble. return int((int(self.type_modifier) + 3)/4)*4 except: return None else: if self.item_type in field_widths: # Lookup fixed width for this type return field_widths[self.item_type] else: return None def check_num_digits(self, mask): if mask.startswith('0x') and len(mask) > 3: global warnings_found global errors_found # Warn if odd number of digits/ TODO: only if >= 5? if len(mask) % 2 and self.item_type != 'FT_BOOLEAN': print('Warning:', self.filename, self.hf, 'filter=', self.filter, ' - mask has odd number of digits', mask, 'expected max for', self.item_type, 'is', int((self.get_field_width_in_bits())/4)) warnings_found += 1 if self.item_type in field_widths: # Longer than it should be? width_in_bits = self.get_field_width_in_bits() if width_in_bits is None: return if len(mask)-2 > width_in_bits/4: extra_digits = mask[2:2+(len(mask)-2 - int(self.get_field_width_in_bits()/4))] # Its definitely an error if any of these are non-zero, as they won't have any effect! if extra_digits != '0'*len(extra_digits): print('Error:', self.filename, self.hf, 'filter=', self.filter, 'mask', self.mask, "with len is", len(mask)-2, "but type", self.item_type, " indicates max of", int(self.get_field_width_in_bits()/4), "and extra digits are non-zero (" + extra_digits + ")") errors_found += 1 else: # Has extra leading zeros, still confusing, so warn. print('Warning:', self.filename, self.hf, 'filter=', self.filter, 'mask', self.mask, "with len", len(mask)-2, "but type", self.item_type, " indicates max of", int(self.get_field_width_in_bits()/4)) warnings_found += 1 # Strict/fussy check - expecting mask length to match field width exactly! # Currently only doing for FT_BOOLEAN, and don't expect to be in full for 64-bit fields! if self.mask_exact_width: ideal_mask_width = int(self.get_field_width_in_bits()/4) if self.item_type == 'FT_BOOLEAN' and ideal_mask_width < 16 and len(mask)-2 != ideal_mask_width: print('Warning:', self.filename, self.hf, 'filter=', self.filter, 'mask', self.mask, "with len", len(mask)-2, "but type", self.item_type, "|", self.type_modifier, " indicates should be", int(self.get_field_width_in_bits()/4)) warnings_found += 1 else: # This type shouldn't have a mask set at all. print('Warning:', self.filename, self.hf, 'filter=', self.filter, ' - item has type', self.item_type, 'but mask set:', mask) warnings_found += 1 def check_digits_all_zeros(self, mask): if mask.startswith('0x') and len(mask) > 3: if mask[2:] == '0'*(len(mask)-2): print('Warning:', self.filename, self.hf, 'filter=', self.filter, ' - item mask has all zeros - this is confusing! :', '"' + mask + '"') global warnings_found warnings_found += 1 # A mask where all bits are set should instead be 0. # Exceptions might be where: # - in add_bitmask() set and only one there! # - represents flags, but dissector is not yet decoding them def check_full_mask(self, mask, field_arrays): if self.item_type == "FT_BOOLEAN": return if self.label.lower().find('mask') != -1 or self.label.lower().find('flag') != -1 or self.label.lower().find('bitmap') != -1: return if mask.startswith('0x') and len(mask) > 3: width_in_bits = self.get_field_width_in_bits() if not width_in_bits: return num_digits = int(width_in_bits / 4) if num_digits is None: return if mask[2:] == 'f'*num_digits or mask[2:] == 'F'*num_digits: # Don't report though if the only item in a field_array for arr in field_arrays: list = field_arrays[arr][0] if len(list) == 1 and list[0] == self.hf: # Was first and only! return print('Warning:', self.filename, self.hf, 'filter=', self.filter, ' - mask is all set - this is confusing - set 0 instead! :', '"' + mask + '"') global warnings_found warnings_found += 1 # Return True if appears to be a match def check_label_vs_filter(self, reportError=True, reportNumericalMismatch=True): global warnings_found last_filter = self.filter.split('.')[-1] last_filter_orig = last_filter last_filter = last_filter.replace('-', '') last_filter = last_filter.replace('_', '') last_filter = last_filter.replace(' ', '') label = self.label label_orig = label label = label.replace(' ', '') label = label.replace('-', '') label = label.replace('_', '') label = label.replace('(', '') label = label.replace(')', '') label = label.replace('/', '') # OK if filter is abbrev of label. label_words = self.label.split(' ') label_words = [w for w in label_words if len(w)] if len(label_words) == len(last_filter): #print(label_words) abbrev_letters = [w[0] for w in label_words] abbrev = ''.join(abbrev_letters) if abbrev.lower() == last_filter.lower(): return True # If both have numbers, they should probably match! label_numbers = [int(n) for n in re.findall(r'\d+', label_orig)] filter_numbers = [int(n) for n in re.findall(r'\d+', last_filter_orig)] if len(label_numbers) == len(filter_numbers) and label_numbers != filter_numbers: if reportNumericalMismatch: print('Warning:', self.filename, self.hf, 'label="' + self.label + '" has different **numbers** from filter="' + self.filter + '"') print(label_numbers, filter_numbers) warnings_found += 1 return False # If they match after trimming number from filter, they should match. if label.lower() == last_filter.lower().rstrip("0123456789"): return True # Are they just different? if label.lower().find(last_filter.lower()) == -1: if reportError: print('Warning:', self.filename, self.hf, 'label="' + self.label + '" does not seem to match filter="' + self.filter + '"') warnings_found += 1 return False return True class CombinedCallsCheck: def __init__(self, file, apiChecks): self.file = file self.apiChecks = apiChecks self.get_all_calls() def get_all_calls(self): self.all_calls = [] # Combine calls into one list. for check in self.apiChecks: self.all_calls += check.calls # Sort by line number. self.all_calls.sort(key=lambda x:x.line_number) def check_consecutive_item_calls(self): lines = open(self.file, 'r', encoding="utf8").read().splitlines() prev = None for call in self.all_calls: # These names commonly do appear together.. if name_has_one_of(call.hf_name, [ 'unused', 'unknown', 'spare', 'reserved', 'default']): return if prev and call.hf_name == prev.hf_name: # More compelling if close together.. if call.line_number>prev.line_number and call.line_number-prev.line_number <= 4: scope_different = False for l in range(prev.line_number, call.line_number-1): if lines[l].find('{') != -1 or lines[l].find('}') != -1 or lines[l].find('else') != -1 or lines[l].find('break;') != -1 or lines[l].find('if ') != -1: scope_different = True break # Also more compelling if check for and scope changes { } in lines in-between? if not scope_different: print('Warning:', f + ':' + str(call.line_number), call.hf_name + ' called consecutively at line', call.line_number, '- previous at', prev.line_number) global warnings_found warnings_found += 1 prev = call # These are APIs in proto.c that check a set of types at runtime and can print '.. is not of type ..' to the console # if the type is not suitable. apiChecks = [] apiChecks.append(APICheck('proto_tree_add_item_ret_uint', { 'FT_CHAR', 'FT_UINT8', 'FT_UINT16', 'FT_UINT24', 'FT_UINT32'}, positive_length=True)) apiChecks.append(APICheck('proto_tree_add_item_ret_int', { 'FT_INT8', 'FT_INT16', 'FT_INT24', 'FT_INT32'})) apiChecks.append(APICheck('ptvcursor_add_ret_uint', { 'FT_CHAR', 'FT_UINT8', 'FT_UINT16', 'FT_UINT24', 'FT_UINT32'}, positive_length=True)) apiChecks.append(APICheck('ptvcursor_add_ret_int', { 'FT_INT8', 'FT_INT16', 'FT_INT24', 'FT_INT32'}, positive_length=True)) apiChecks.append(APICheck('ptvcursor_add_ret_string', { 'FT_STRING', 'FT_STRINGZ', 'FT_UINT_STRING', 'FT_STRINGZPAD', 'FT_STRINGZTRUNC'})) apiChecks.append(APICheck('ptvcursor_add_ret_boolean', { 'FT_BOOLEAN'}, positive_length=True)) apiChecks.append(APICheck('proto_tree_add_item_ret_uint64', { 'FT_UINT40', 'FT_UINT48', 'FT_UINT56', 'FT_UINT64'}, positive_length=True)) apiChecks.append(APICheck('proto_tree_add_item_ret_int64', { 'FT_INT40', 'FT_INT48', 'FT_INT56', 'FT_INT64'}, positive_length=True)) apiChecks.append(APICheck('proto_tree_add_item_ret_boolean', { 'FT_BOOLEAN'}, positive_length=True)) apiChecks.append(APICheck('proto_tree_add_item_ret_string_and_length', { 'FT_STRING', 'FT_STRINGZ', 'FT_UINT_STRING', 'FT_STRINGZPAD', 'FT_STRINGZTRUNC'})) apiChecks.append(APICheck('proto_tree_add_item_ret_display_string_and_length', { 'FT_STRING', 'FT_STRINGZ', 'FT_UINT_STRING', 'FT_STRINGZPAD', 'FT_STRINGZTRUNC', 'FT_BYTES', 'FT_UINT_BYTES'})) apiChecks.append(APICheck('proto_tree_add_item_ret_time_string', { 'FT_ABSOLUTE_TIME', 'FT_RELATIVE_TIME'})) apiChecks.append(APICheck('proto_tree_add_uint', { 'FT_CHAR', 'FT_UINT8', 'FT_UINT16', 'FT_UINT24', 'FT_UINT32', 'FT_FRAMENUM'})) apiChecks.append(APICheck('proto_tree_add_uint_format_value', { 'FT_CHAR', 'FT_UINT8', 'FT_UINT16', 'FT_UINT24', 'FT_UINT32', 'FT_FRAMENUM'})) apiChecks.append(APICheck('proto_tree_add_uint_format', { 'FT_CHAR', 'FT_UINT8', 'FT_UINT16', 'FT_UINT24', 'FT_UINT32', 'FT_FRAMENUM'})) apiChecks.append(APICheck('proto_tree_add_uint64', { 'FT_UINT40', 'FT_UINT48', 'FT_UINT56', 'FT_UINT64', 'FT_FRAMENUM'})) apiChecks.append(APICheck('proto_tree_add_int64', { 'FT_INT40', 'FT_INT48', 'FT_INT56', 'FT_INT64'})) apiChecks.append(APICheck('proto_tree_add_int64_format_value', { 'FT_INT40', 'FT_INT48', 'FT_INT56', 'FT_INT64'})) apiChecks.append(APICheck('proto_tree_add_int64_format', { 'FT_INT40', 'FT_INT48', 'FT_INT56', 'FT_INT64'})) apiChecks.append(APICheck('proto_tree_add_int', { 'FT_INT8', 'FT_INT16', 'FT_INT24', 'FT_INT32'})) apiChecks.append(APICheck('proto_tree_add_int_format_value', { 'FT_INT8', 'FT_INT16', 'FT_INT24', 'FT_INT32'})) apiChecks.append(APICheck('proto_tree_add_int_format', { 'FT_INT8', 'FT_INT16', 'FT_INT24', 'FT_INT32'})) apiChecks.append(APICheck('proto_tree_add_boolean', { 'FT_BOOLEAN'})) apiChecks.append(APICheck('proto_tree_add_boolean64', { 'FT_BOOLEAN'})) apiChecks.append(APICheck('proto_tree_add_float', { 'FT_FLOAT'})) apiChecks.append(APICheck('proto_tree_add_float_format', { 'FT_FLOAT'})) apiChecks.append(APICheck('proto_tree_add_float_format_value', { 'FT_FLOAT'})) apiChecks.append(APICheck('proto_tree_add_double', { 'FT_DOUBLE'})) apiChecks.append(APICheck('proto_tree_add_double_format', { 'FT_DOUBLE'})) apiChecks.append(APICheck('proto_tree_add_double_format_value', { 'FT_DOUBLE'})) apiChecks.append(APICheck('proto_tree_add_string', { 'FT_STRING', 'FT_STRINGZ', 'FT_UINT_STRING', 'FT_STRINGZPAD', 'FT_STRINGZTRUNC'})) apiChecks.append(APICheck('proto_tree_add_string_format', { 'FT_STRING', 'FT_STRINGZ', 'FT_UINT_STRING', 'FT_STRINGZPAD', 'FT_STRINGZTRUNC'})) apiChecks.append(APICheck('proto_tree_add_string_format_value', { 'FT_STRING', 'FT_STRINGZ', 'FT_UINT_STRING', 'FT_STRINGZPAD', 'FT_STRINGZTRUNC'})) apiChecks.append(APICheck('proto_tree_add_guid', { 'FT_GUID'})) apiChecks.append(APICheck('proto_tree_add_oid', { 'FT_OID'})) apiChecks.append(APICheck('proto_tree_add_none_format', { 'FT_NONE'})) apiChecks.append(APICheck('proto_tree_add_item_ret_varint', { 'FT_INT8', 'FT_INT16', 'FT_INT24', 'FT_INT32', 'FT_INT40', 'FT_INT48', 'FT_INT56', 'FT_INT64', 'FT_CHAR', 'FT_UINT8', 'FT_UINT16', 'FT_UINT24', 'FT_UINT32', 'FT_FRAMENUM', 'FT_UINT40', 'FT_UINT48', 'FT_UINT56', 'FT_UINT64',})) apiChecks.append(APICheck('proto_tree_add_boolean_bits_format_value', { 'FT_BOOLEAN'})) apiChecks.append(APICheck('proto_tree_add_boolean_bits_format_value64', { 'FT_BOOLEAN'})) apiChecks.append(APICheck('proto_tree_add_ascii_7bits_item', { 'FT_STRING'})) # TODO: positions are different, and takes 2 hf_fields.. #apiChecks.append(APICheck('proto_tree_add_checksum', { 'FT_UINT8', 'FT_UINT16', 'FT_UINT24', 'FT_UINT32'})) apiChecks.append(APICheck('proto_tree_add_int64_bits_format_value', { 'FT_INT40', 'FT_INT48', 'FT_INT56', 'FT_INT64'})) # TODO: add proto_tree_add_bytes_item, proto_tree_add_time_item ? bitmask_types = { 'FT_CHAR', 'FT_UINT8', 'FT_UINT16', 'FT_UINT24', 'FT_UINT32', 'FT_INT8', 'FT_INT16', 'FT_INT24', 'FT_INT32', 'FT_UINT40', 'FT_UINT48', 'FT_UINT56', 'FT_UINT64', 'FT_INT40', 'FT_INT48', 'FT_INT56', 'FT_INT64', 'FT_BOOLEAN'} apiChecks.append(APICheck('proto_tree_add_bitmask', bitmask_types)) apiChecks.append(APICheck('proto_tree_add_bitmask_tree', bitmask_types)) apiChecks.append(APICheck('proto_tree_add_bitmask_ret_uint64', bitmask_types)) apiChecks.append(APICheck('proto_tree_add_bitmask_with_flags', bitmask_types)) apiChecks.append(APICheck('proto_tree_add_bitmask_with_flags_ret_uint64', bitmask_types)) apiChecks.append(APICheck('proto_tree_add_bitmask_value', bitmask_types)) apiChecks.append(APICheck('proto_tree_add_bitmask_value_with_flags', bitmask_types)) apiChecks.append(APICheck('proto_tree_add_bitmask_len', bitmask_types)) # N.B., proto_tree_add_bitmask_list does not have a root item, just a subtree... add_bits_types = { 'FT_CHAR', 'FT_BOOLEAN', 'FT_UINT8', 'FT_UINT16', 'FT_UINT24', 'FT_UINT32', 'FT_UINT40', 'FT_UINT48', 'FT_UINT56', 'FT_UINT64', 'FT_INT8', 'FT_INT16', 'FT_INT24', 'FT_INT32', 'FT_INT40', 'FT_INT48', 'FT_INT56', 'FT_INT64', 'FT_BYTES'} apiChecks.append(APICheck('proto_tree_add_bits_item', add_bits_types)) apiChecks.append(APICheck('proto_tree_add_bits_ret_val', add_bits_types)) # TODO: doesn't even have an hf_item ! #apiChecks.append(APICheck('proto_tree_add_bitmask_text', bitmask_types)) # Check some ptvcuror calls too. apiChecks.append(APICheck('ptvcursor_add_ret_uint', { 'FT_CHAR', 'FT_UINT8', 'FT_UINT16', 'FT_UINT24', 'FT_UINT32'})) apiChecks.append(APICheck('ptvcursor_add_ret_int', { 'FT_INT8', 'FT_INT16', 'FT_INT24', 'FT_INT32'})) apiChecks.append(APICheck('ptvcursor_add_ret_boolean', { 'FT_BOOLEAN'})) # Also try to check proto_tree_add_item() calls (for length) apiChecks.append(ProtoTreeAddItemCheck()) apiChecks.append(ProtoTreeAddItemCheck(True)) # for ptvcursor_add() def removeComments(code_string): code_string = re.sub(re.compile(r"/\*.*?\*/",re.DOTALL ) ,"" , code_string) # C-style comment code_string = re.sub(re.compile(r"//.*?\n" ) ,"" , code_string) # C++-style comment code_string = re.sub(re.compile(r"#if 0.*?#endif",re.DOTALL ) ,"" , code_string) # Ignored region return code_string # Test for whether the given file was automatically generated. def isGeneratedFile(filename): # Check file exists - e.g. may have been deleted in a recent commit. if not os.path.exists(filename): return False # Open file f_read = open(os.path.join(filename), 'r', encoding="utf8") lines_tested = 0 for line in f_read: # The comment to say that its generated is near the top, so give up once # get a few lines down. if lines_tested > 10: f_read.close() return False if (line.find('Generated automatically') != -1 or line.find('Generated Automatically') != -1 or line.find('Autogenerated from') != -1 or line.find('is autogenerated') != -1 or line.find('automatically generated by Pidl') != -1 or line.find('Created by: The Qt Meta Object Compiler') != -1 or line.find('This file was generated') != -1 or line.find('This filter was automatically generated') != -1 or line.find('This file is auto generated, do not edit!') != -1): f_read.close() return True lines_tested = lines_tested + 1 # OK, looks like a hand-written file! f_read.close() return False def find_macros(filename): macros = {} with open(filename, 'r', encoding="utf8") as f: contents = f.read() # Remove comments so as not to trip up RE. contents = removeComments(contents) matches = re.finditer( r'#define\s*([A-Z0-9_]*)\s*([0-9xa-fA-F]*)\n', contents) for m in matches: # Store this mapping. macros[m.group(1)] = m.group(2) return macros # Look for hf items (i.e. full item to be registered) in a dissector file. def find_items(filename, macros, check_mask=False, mask_exact_width=False, check_label=False, check_consecutive=False): is_generated = isGeneratedFile(filename) items = {} with open(filename, 'r', encoding="utf8") as f: contents = f.read() # Remove comments so as not to trip up RE. contents = removeComments(contents) # N.B. re extends all the way to HFILL to avoid greedy matching matches = re.finditer( r'.*\{\s*\&(hf_[a-z_A-Z0-9]*)\s*,\s*{\s*\"(.*?)\"\s*,\s*\"(.*?)\"\s*,\s*(.*?)\s*,\s*([0-9A-Z_\|\s]*?)\s*,\s*(.*?)\s*,\s*(.*?)\s*,\s*([a-zA-Z0-9\W\s_\u00f6\u00e4]*?)\s*,\s*HFILL', contents) for m in matches: # Store this item. hf = m.group(1) items[hf] = Item(filename, hf, filter=m.group(3), label=m.group(2), item_type=m.group(4), type_modifier=m.group(5), macros=macros, mask=m.group(7), check_mask=check_mask, mask_exact_width=mask_exact_width, check_label=check_label, check_consecutive=(not is_generated and check_consecutive)) return items # Looking for args to ..add_bitmask_..() calls that are not NULL-terminated or have repeated items. # TODO: some dissectors have similar-looking hf arrays for other reasons, so need to cross-reference with # the 6th arg of ..add_bitmask_..() calls... # TODO: return items (rather than local checks) from here so can be checked against list of calls for given filename def find_field_arrays(filename, all_fields, all_hf): field_entries = {} global warnings_found with open(filename, 'r', encoding="utf8") as f: contents = f.read() # Remove comments so as not to trip up RE. contents = removeComments(contents) # Find definition of hf array matches = re.finditer(r'static\s*g?int\s*\*\s*const\s+([a-zA-Z0-9_]*)\s*\[\]\s*\=\s*\{([a-zA-Z0-9,_\&\s]*)\}', contents) for m in matches: name = m.group(1) # Ignore if not used in a call to an _add_bitmask_ API if not name in all_fields: continue fields_text = m.group(2) fields_text = fields_text.replace('&', '') fields_text = fields_text.replace(',', '') # Get list of each hf field in the array fields = fields_text.split() if fields[0].startswith('ett_'): continue if fields[-1].find('NULL') == -1 and fields[-1] != '0': print('Warning:', filename, name, 'is not NULL-terminated - {', ', '.join(fields), '}') warnings_found += 1 continue # Do any hf items reappear? seen_fields = set() for f in fields: if f in seen_fields: print(filename, name, f, 'already added!') warnings_found += 1 seen_fields.add(f) # Check for duplicated flags among entries.. combined_mask = 0x0 for f in fields[0:-1]: if f in all_hf: new_mask = all_hf[f].mask_value if new_mask & combined_mask: print('Warning:', filename, name, 'has overlapping mask - {', ', '.join(fields), '} combined currently', hex(combined_mask), f, 'adds', hex(new_mask)) warnings_found += 1 combined_mask |= new_mask # Make sure all entries have the same width set_field_width = None for f in fields[0:-1]: if f in all_hf: new_field_width = all_hf[f].get_field_width_in_bits() if set_field_width is not None and new_field_width != set_field_width: # Its not uncommon for fields to be used in multiple sets, some of which can be different widths.. print('Note:', filename, name, 'set items not all same width - {', ', '.join(fields), '} seen', set_field_width, 'now', new_field_width) set_field_width = new_field_width # Add entry to table field_entries[name] = (fields[0:-1], combined_mask) return field_entries def find_item_declarations(filename): items = set() with open(filename, 'r', encoding="utf8") as f: lines = f.read().splitlines() p = re.compile(r'^static int (hf_[a-zA-Z0-9_]*)\s*\=\s*-1;') for line in lines: m = p.search(line) if m: items.add(m.group(1)) return items def find_item_extern_declarations(filename): items = set() with open(filename, 'r', encoding="utf8") as f: lines = f.read().splitlines() p = re.compile(r'^\s*(hf_[a-zA-Z0-9_]*)\s*\=\s*proto_registrar_get_id_byname\s*\(') for line in lines: m = p.search(line) if m: items.add(m.group(1)) return items def is_dissector_file(filename): p = re.compile(r'.*(packet|file)-.*\.c$') return p.match(filename) def findDissectorFilesInFolder(folder, recursive=False): dissector_files = [] if recursive: for root, subfolders, files in os.walk(folder): for f in files: if should_exit: return f = os.path.join(root, f) dissector_files.append(f) else: for f in sorted(os.listdir(folder)): if should_exit: return filename = os.path.join(folder, f) dissector_files.append(filename) return [x for x in filter(is_dissector_file, dissector_files)] # Run checks on the given dissector file. def checkFile(filename, check_mask=False, mask_exact_width=False, check_label=False, check_consecutive=False, check_missing_items=False, check_bitmask_fields=False, label_vs_filter=False): # Check file exists - e.g. may have been deleted in a recent commit. if not os.path.exists(filename): print(filename, 'does not exist!') return # Find simple macros so can substitute into items and calls. macros = find_macros(filename) # Find important parts of items. items_defined = find_items(filename, macros, check_mask, mask_exact_width, check_label, check_consecutive) items_extern_declared = {} items_declared = {} if check_missing_items: items_declared = find_item_declarations(filename) items_extern_declared = find_item_extern_declarations(filename) fields = set() # Get 'fields' out of calls for c in apiChecks: c.find_calls(filename, macros) for call in c.calls: # From _add_bitmask() calls if call.fields: fields.add(call.fields) # Checking for lists of fields for add_bitmask calls field_arrays = {} if check_bitmask_fields: field_arrays = find_field_arrays(filename, fields, items_defined) # Find (and sanity-check) value_strings value_strings = findValueStrings(filename, macros) if check_mask and check_bitmask_fields: for i in items_defined: item = items_defined[i] item.check_full_mask(item.mask, field_arrays) # Now actually check the calls for c in apiChecks: c.check_against_items(items_defined, items_declared, items_extern_declared, check_missing_items, field_arrays) if label_vs_filter: matches = 0 for hf in items_defined: if items_defined[hf].check_label_vs_filter(reportError=False, reportNumericalMismatch=True): matches += 1 # Only checking if almost every field does match. checking = len(items_defined) and matches<len(items_defined) and ((matches / len(items_defined)) > 0.9) if checking: print(filename, ':', matches, 'label-vs-filter matches of out of', len(items_defined), 'so reporting mismatches') for hf in items_defined: items_defined[hf].check_label_vs_filter(reportError=True, reportNumericalMismatch=False) ################################################################# # Main logic. # command-line args. Controls which dissector files should be checked. # If no args given, will just scan epan/dissectors folder. parser = argparse.ArgumentParser(description='Check calls in dissectors') parser.add_argument('--file', action='append', help='specify individual dissector file to test') parser.add_argument('--folder', action='store', default='', help='specify folder to test') parser.add_argument('--commits', action='store', help='last N commits to check') parser.add_argument('--open', action='store_true', help='check open files') parser.add_argument('--mask', action='store_true', help='when set, check mask field too') parser.add_argument('--mask-exact-width', action='store_true', help='when set, check width of mask against field width') parser.add_argument('--label', action='store_true', help='when set, check label field too') parser.add_argument('--consecutive', action='store_true', help='when set, copy copy/paste errors between consecutive items') parser.add_argument('--missing-items', action='store_true', help='when set, look for used items that were never registered') parser.add_argument('--check-bitmask-fields', action='store_true', help='when set, attempt to check arrays of hf items passed to add_bitmask() calls') parser.add_argument('--label-vs-filter', action='store_true', help='when set, check whether label matches last part of filter') parser.add_argument('--all-checks', action='store_true', help='when set, apply all checks to selected files') args = parser.parse_args() # Turn all checks on. if args.all_checks: args.mask = True args.mask_exact_width = True args.consecutive = True args.check_bitmask_fields = True args.label_vs_filter = True if args.check_bitmask_fields: args.mask = True # Get files from wherever command-line args indicate. files = [] if args.file: # Add specified file(s) for f in args.file: if not os.path.isfile(f): print('Chosen file', f, 'does not exist.') exit(1) else: files.append(f) elif args.folder: # Add all files from a given folder. folder = args.folder if not os.path.isdir(folder): print('Folder', folder, 'not found!') exit(1) # Find files from folder. print('Looking for files in', folder) files = findDissectorFilesInFolder(folder, recursive=True) elif args.commits: # Get files affected by specified number of commits. command = ['git', 'diff', '--name-only', '--diff-filter=d', 'HEAD~' + args.commits] files = [f.decode('utf-8') for f in subprocess.check_output(command).splitlines()] # Will examine dissector files only files = list(filter(lambda f : is_dissector_file(f), files)) elif args.open: # Unstaged changes. command = ['git', 'diff', '--name-only', '--diff-filter=d'] files = [f.decode('utf-8') for f in subprocess.check_output(command).splitlines()] # Only interested in dissector files. files = list(filter(lambda f : is_dissector_file(f), files)) # Staged changes. command = ['git', 'diff', '--staged', '--name-only', '--diff-filter=d'] files_staged = [f.decode('utf-8') for f in subprocess.check_output(command).splitlines()] # Only interested in dissector files. files_staged = list(filter(lambda f : is_dissector_file(f), files_staged)) for f in files_staged: if not f in files: files.append(f) else: # Find all dissector files. files = findDissectorFilesInFolder(os.path.join('epan', 'dissectors')) files += findDissectorFilesInFolder(os.path.join('plugins', 'epan'), recursive=True) # If scanning a subset of files, list them here. print('Examining:') if args.file or args.commits or args.open: if files: print(' '.join(files), '\n') else: print('No files to check.\n') else: print('All dissector modules\n') # Now check the files. for f in files: if should_exit: exit(1) checkFile(f, check_mask=args.mask, mask_exact_width=args.mask_exact_width, check_label=args.label, check_consecutive=args.consecutive, check_missing_items=args.missing_items, check_bitmask_fields=args.check_bitmask_fields, label_vs_filter=args.label_vs_filter) # Do checks against all calls. if args.consecutive: combined_calls = CombinedCallsCheck(f, apiChecks) # This hasn't really found any issues, but shows lots of false positives (and are difficult to investigate) #combined_calls.check_consecutive_item_calls() # Show summary. print(warnings_found, 'warnings') if errors_found: print(errors_found, 'errors') exit(1)